Add x86_debug_reg_state to gdbserver
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
32d0add0 3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
125f8a3d
GB
33#include "nat/linux-ptrace.h"
34#include "nat/linux-procfs.h"
8cc73a39 35#include "nat/linux-personality.h"
ac264b3b 36#include "linux-fork.h"
d6b0e80f
AC
37#include "gdbthread.h"
38#include "gdbcmd.h"
39#include "regcache.h"
4f844a66 40#include "regset.h"
dab06dbe 41#include "inf-child.h"
10d6c8cd
DJ
42#include "inf-ptrace.h"
43#include "auxv.h"
1777feb0 44#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
45#include "elf-bfd.h" /* for elfcore_write_* */
46#include "gregset.h" /* for gregset */
47#include "gdbcore.h" /* for get_exec_file */
48#include <ctype.h> /* for isdigit */
53ce3c39 49#include <sys/stat.h> /* for struct stat */
dba24537 50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
2978b111 56#include <dirent.h>
07e059b5 57#include "xml-support.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
125f8a3d 60#include "nat/linux-osdata.h"
6432734d 61#include "linux-tdep.h"
7dcd53a0 62#include "symfile.h"
5808517f
YQ
63#include "agent.h"
64#include "tracepoint.h"
87b0bb13 65#include "buffer.h"
6ecd4729 66#include "target-descriptions.h"
614c279d 67#include "filestuff.h"
77e371c0 68#include "objfiles.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
79When waiting for an event in a specific thread, we just use waitpid, passing
80the specific pid, and not passing WNOHANG.
81
1777feb0 82When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 83version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 84threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
85miss an event. The solution is to use non-blocking waitpid, together with
86sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 87process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
88flag to check for events in cloned processes. If nothing is found, we use
89sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
90happened to a child process -- and SIGCHLD will be delivered both for events
91in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
92an event, we get back to calling nonblocking waitpid with and without
93__WCLONED.
8a77dff3
VP
94
95Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 96so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
97blocked, the signal becomes pending and sigsuspend immediately
98notices it and returns.
99
100Waiting for events in async mode
101================================
102
7feb7d06
PA
103In async mode, GDB should always be ready to handle both user input
104and target events, so neither blocking waitpid nor sigsuspend are
105viable options. Instead, we should asynchronously notify the GDB main
106event loop whenever there's an unprocessed event from the target. We
107detect asynchronous target events by handling SIGCHLD signals. To
108notify the event loop about target events, the self-pipe trick is used
109--- a pipe is registered as waitable event source in the event loop,
110the event loop select/poll's on the read end of this pipe (as well on
111other event sources, e.g., stdin), and the SIGCHLD handler writes a
112byte to this pipe. This is more portable than relying on
113pselect/ppoll, since on kernels that lack those syscalls, libc
114emulates them with select/poll+sigprocmask, and that is racy
115(a.k.a. plain broken).
116
117Obviously, if we fail to notify the event loop if there's a target
118event, it's bad. OTOH, if we notify the event loop when there's no
119event from the target, linux_nat_wait will detect that there's no real
120event to report, and return event of type TARGET_WAITKIND_IGNORE.
121This is mostly harmless, but it will waste time and is better avoided.
122
123The main design point is that every time GDB is outside linux-nat.c,
124we have a SIGCHLD handler installed that is called when something
125happens to the target and notifies the GDB event loop. Whenever GDB
126core decides to handle the event, and calls into linux-nat.c, we
127process things as in sync mode, except that the we never block in
128sigsuspend.
129
130While processing an event, we may end up momentarily blocked in
131waitpid calls. Those waitpid calls, while blocking, are guarantied to
132return quickly. E.g., in all-stop mode, before reporting to the core
133that an LWP hit a breakpoint, all LWPs are stopped by sending them
134SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
135Note that this is different from blocking indefinitely waiting for the
136next event --- here, we're already handling an event.
8a77dff3
VP
137
138Use of signals
139==============
140
141We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
142signal is not entirely significant; we just need for a signal to be delivered,
143so that we can intercept it. SIGSTOP's advantage is that it can not be
144blocked. A disadvantage is that it is not a real-time signal, so it can only
145be queued once; we do not keep track of other sources of SIGSTOP.
146
147Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
148use them, because they have special behavior when the signal is generated -
149not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
150kills the entire thread group.
151
152A delivered SIGSTOP would stop the entire thread group, not just the thread we
153tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
154cancel it (by PTRACE_CONT without passing SIGSTOP).
155
156We could use a real-time signal instead. This would solve those problems; we
157could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
158But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
159generates it, and there are races with trying to find a signal that is not
160blocked. */
a0ef4274 161
dba24537
AC
162#ifndef O_LARGEFILE
163#define O_LARGEFILE 0
164#endif
0274a8ce 165
10d6c8cd
DJ
166/* The single-threaded native GNU/Linux target_ops. We save a pointer for
167 the use of the multi-threaded target. */
168static struct target_ops *linux_ops;
f973ed9c 169static struct target_ops linux_ops_saved;
10d6c8cd 170
9f0bdab8 171/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
172static void (*linux_nat_new_thread) (struct lwp_info *);
173
26cb8b7c
PA
174/* The method to call, if any, when a new fork is attached. */
175static linux_nat_new_fork_ftype *linux_nat_new_fork;
176
177/* The method to call, if any, when a process is no longer
178 attached. */
179static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
180
7b50312a
PA
181/* Hook to call prior to resuming a thread. */
182static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 183
5b009018
PA
184/* The method to call, if any, when the siginfo object needs to be
185 converted between the layout returned by ptrace, and the layout in
186 the architecture of the inferior. */
a5362b9a 187static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
188 gdb_byte *,
189 int);
190
ac264b3b
MS
191/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
192 Called by our to_xfer_partial. */
4ac248ca 193static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 194
6a3cb8e8
PA
195/* The saved to_close method, inherited from inf-ptrace.c.
196 Called by our to_close. */
197static void (*super_close) (struct target_ops *);
198
ccce17b0 199static unsigned int debug_linux_nat;
920d2a44
AC
200static void
201show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203{
204 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
205 value);
206}
d6b0e80f 207
ae087d01
DJ
208struct simple_pid_list
209{
210 int pid;
3d799a95 211 int status;
ae087d01
DJ
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
3dd5b83d
PA
216/* Async mode support. */
217
b84876c2
PA
218/* The read/write ends of the pipe registered as waitable file in the
219 event loop. */
220static int linux_nat_event_pipe[2] = { -1, -1 };
221
198297aa
PA
222/* True if we're currently in async mode. */
223#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
224
7feb7d06 225/* Flush the event pipe. */
b84876c2 226
7feb7d06
PA
227static void
228async_file_flush (void)
b84876c2 229{
7feb7d06
PA
230 int ret;
231 char buf;
b84876c2 232
7feb7d06 233 do
b84876c2 234 {
7feb7d06 235 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 236 }
7feb7d06 237 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
238}
239
7feb7d06
PA
240/* Put something (anything, doesn't matter what, or how much) in event
241 pipe, so that the select/poll in the event-loop realizes we have
242 something to process. */
252fbfc8 243
b84876c2 244static void
7feb7d06 245async_file_mark (void)
b84876c2 246{
7feb7d06 247 int ret;
b84876c2 248
7feb7d06
PA
249 /* It doesn't really matter what the pipe contains, as long we end
250 up with something in it. Might as well flush the previous
251 left-overs. */
252 async_file_flush ();
b84876c2 253
7feb7d06 254 do
b84876c2 255 {
7feb7d06 256 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 257 }
7feb7d06 258 while (ret == -1 && errno == EINTR);
b84876c2 259
7feb7d06
PA
260 /* Ignore EAGAIN. If the pipe is full, the event loop will already
261 be awakened anyway. */
b84876c2
PA
262}
263
7feb7d06
PA
264static int kill_lwp (int lwpid, int signo);
265
266static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 267static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
268
269static void block_child_signals (sigset_t *prev_mask);
270static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
271
272struct lwp_info;
273static struct lwp_info *add_lwp (ptid_t ptid);
274static void purge_lwp_list (int pid);
4403d8e9 275static void delete_lwp (ptid_t ptid);
2277426b
PA
276static struct lwp_info *find_lwp_pid (ptid_t ptid);
277
8a99810d
PA
278static int lwp_status_pending_p (struct lwp_info *lp);
279
9c02b525
PA
280static int check_stopped_by_breakpoint (struct lwp_info *lp);
281static int sigtrap_is_event (int status);
282static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
283
ae087d01
DJ
284\f
285/* Trivial list manipulation functions to keep track of a list of
286 new stopped processes. */
287static void
3d799a95 288add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
289{
290 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 291
ae087d01 292 new_pid->pid = pid;
3d799a95 293 new_pid->status = status;
ae087d01
DJ
294 new_pid->next = *listp;
295 *listp = new_pid;
296}
297
84636d28
PA
298static int
299in_pid_list_p (struct simple_pid_list *list, int pid)
300{
301 struct simple_pid_list *p;
302
303 for (p = list; p != NULL; p = p->next)
304 if (p->pid == pid)
305 return 1;
306 return 0;
307}
308
ae087d01 309static int
46a96992 310pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
311{
312 struct simple_pid_list **p;
313
314 for (p = listp; *p != NULL; p = &(*p)->next)
315 if ((*p)->pid == pid)
316 {
317 struct simple_pid_list *next = (*p)->next;
e0881a8e 318
46a96992 319 *statusp = (*p)->status;
ae087d01
DJ
320 xfree (*p);
321 *p = next;
322 return 1;
323 }
324 return 0;
325}
326
96d7229d 327/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
328 features given PID.
329
330 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
331
332static void
beed38b8 333linux_init_ptrace (pid_t pid, int attached)
3993f6b1 334{
beed38b8 335 linux_enable_event_reporting (pid, attached);
96d7229d 336 linux_ptrace_init_warnings ();
4de4c07c
DJ
337}
338
6d8fd2b7 339static void
f045800c 340linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 341{
beed38b8 342 linux_init_ptrace (pid, 1);
4de4c07c
DJ
343}
344
10d6c8cd 345static void
2e97a79e 346linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 347{
beed38b8 348 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
349}
350
4403d8e9
JK
351/* Return the number of known LWPs in the tgid given by PID. */
352
353static int
354num_lwps (int pid)
355{
356 int count = 0;
357 struct lwp_info *lp;
358
359 for (lp = lwp_list; lp; lp = lp->next)
360 if (ptid_get_pid (lp->ptid) == pid)
361 count++;
362
363 return count;
364}
365
366/* Call delete_lwp with prototype compatible for make_cleanup. */
367
368static void
369delete_lwp_cleanup (void *lp_voidp)
370{
371 struct lwp_info *lp = lp_voidp;
372
373 delete_lwp (lp->ptid);
374}
375
d83ad864
DB
376/* Target hook for follow_fork. On entry inferior_ptid must be the
377 ptid of the followed inferior. At return, inferior_ptid will be
378 unchanged. */
379
6d8fd2b7 380static int
07107ca6
LM
381linux_child_follow_fork (struct target_ops *ops, int follow_child,
382 int detach_fork)
3993f6b1 383{
d83ad864 384 if (!follow_child)
4de4c07c 385 {
6c95b8df 386 struct lwp_info *child_lp = NULL;
d83ad864
DB
387 int status = W_STOPCODE (0);
388 struct cleanup *old_chain;
389 int has_vforked;
79639e11 390 ptid_t parent_ptid, child_ptid;
d83ad864
DB
391 int parent_pid, child_pid;
392
393 has_vforked = (inferior_thread ()->pending_follow.kind
394 == TARGET_WAITKIND_VFORKED);
79639e11
PA
395 parent_ptid = inferior_ptid;
396 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
397 parent_pid = ptid_get_lwp (parent_ptid);
398 child_pid = ptid_get_lwp (child_ptid);
4de4c07c 399
1777feb0 400 /* We're already attached to the parent, by default. */
d83ad864 401 old_chain = save_inferior_ptid ();
79639e11 402 inferior_ptid = child_ptid;
d83ad864
DB
403 child_lp = add_lwp (inferior_ptid);
404 child_lp->stopped = 1;
405 child_lp->last_resume_kind = resume_stop;
4de4c07c 406
ac264b3b
MS
407 /* Detach new forked process? */
408 if (detach_fork)
f75c00e4 409 {
4403d8e9
JK
410 make_cleanup (delete_lwp_cleanup, child_lp);
411
4403d8e9
JK
412 if (linux_nat_prepare_to_resume != NULL)
413 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
414
415 /* When debugging an inferior in an architecture that supports
416 hardware single stepping on a kernel without commit
417 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
418 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
419 set if the parent process had them set.
420 To work around this, single step the child process
421 once before detaching to clear the flags. */
422
423 if (!gdbarch_software_single_step_p (target_thread_architecture
424 (child_lp->ptid)))
425 {
c077881a
HZ
426 linux_disable_event_reporting (child_pid);
427 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
428 perror_with_name (_("Couldn't do single step"));
429 if (my_waitpid (child_pid, &status, 0) < 0)
430 perror_with_name (_("Couldn't wait vfork process"));
431 }
432
433 if (WIFSTOPPED (status))
9caaaa83
PA
434 {
435 int signo;
436
437 signo = WSTOPSIG (status);
438 if (signo != 0
439 && !signal_pass_state (gdb_signal_from_host (signo)))
440 signo = 0;
441 ptrace (PTRACE_DETACH, child_pid, 0, signo);
442 }
4403d8e9 443
d83ad864 444 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 445 do_cleanups (old_chain);
ac264b3b
MS
446 }
447 else
448 {
6c95b8df 449 /* Let the thread_db layer learn about this new process. */
2277426b 450 check_for_thread_db ();
ac264b3b 451 }
9016a515 452
d83ad864
DB
453 do_cleanups (old_chain);
454
9016a515
DJ
455 if (has_vforked)
456 {
3ced3da4 457 struct lwp_info *parent_lp;
6c95b8df 458
79639e11 459 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 460 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 461
96d7229d 462 if (linux_supports_tracevforkdone ())
9016a515 463 {
6c95b8df
PA
464 if (debug_linux_nat)
465 fprintf_unfiltered (gdb_stdlog,
466 "LCFF: waiting for VFORK_DONE on %d\n",
467 parent_pid);
3ced3da4 468 parent_lp->stopped = 1;
9016a515 469
6c95b8df
PA
470 /* We'll handle the VFORK_DONE event like any other
471 event, in target_wait. */
9016a515
DJ
472 }
473 else
474 {
475 /* We can't insert breakpoints until the child has
476 finished with the shared memory region. We need to
477 wait until that happens. Ideal would be to just
478 call:
479 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
480 - waitpid (parent_pid, &status, __WALL);
481 However, most architectures can't handle a syscall
482 being traced on the way out if it wasn't traced on
483 the way in.
484
485 We might also think to loop, continuing the child
486 until it exits or gets a SIGTRAP. One problem is
487 that the child might call ptrace with PTRACE_TRACEME.
488
489 There's no simple and reliable way to figure out when
490 the vforked child will be done with its copy of the
491 shared memory. We could step it out of the syscall,
492 two instructions, let it go, and then single-step the
493 parent once. When we have hardware single-step, this
494 would work; with software single-step it could still
495 be made to work but we'd have to be able to insert
496 single-step breakpoints in the child, and we'd have
497 to insert -just- the single-step breakpoint in the
498 parent. Very awkward.
499
500 In the end, the best we can do is to make sure it
501 runs for a little while. Hopefully it will be out of
502 range of any breakpoints we reinsert. Usually this
503 is only the single-step breakpoint at vfork's return
504 point. */
505
6c95b8df
PA
506 if (debug_linux_nat)
507 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
508 "LCFF: no VFORK_DONE "
509 "support, sleeping a bit\n");
6c95b8df 510
9016a515 511 usleep (10000);
9016a515 512
6c95b8df
PA
513 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
514 and leave it pending. The next linux_nat_resume call
515 will notice a pending event, and bypasses actually
516 resuming the inferior. */
3ced3da4
PA
517 parent_lp->status = 0;
518 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
519 parent_lp->stopped = 1;
6c95b8df
PA
520
521 /* If we're in async mode, need to tell the event loop
522 there's something here to process. */
d9d41e78 523 if (target_is_async_p ())
6c95b8df
PA
524 async_file_mark ();
525 }
9016a515 526 }
4de4c07c 527 }
3993f6b1 528 else
4de4c07c 529 {
3ced3da4 530 struct lwp_info *child_lp;
4de4c07c 531
3ced3da4
PA
532 child_lp = add_lwp (inferior_ptid);
533 child_lp->stopped = 1;
25289eb2 534 child_lp->last_resume_kind = resume_stop;
6c95b8df 535
6c95b8df 536 /* Let the thread_db layer learn about this new process. */
ef29ce1a 537 check_for_thread_db ();
4de4c07c
DJ
538 }
539
540 return 0;
541}
542
4de4c07c 543\f
77b06cd7 544static int
a863b201 545linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 546{
96d7229d 547 return !linux_supports_tracefork ();
3993f6b1
DJ
548}
549
eb73ad13 550static int
973fc227 551linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
552{
553 return 0;
554}
555
77b06cd7 556static int
3ecc7da0 557linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 558{
96d7229d 559 return !linux_supports_tracefork ();
3993f6b1
DJ
560}
561
eb73ad13 562static int
e98cf0cd 563linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
564{
565 return 0;
566}
567
77b06cd7 568static int
ba025e51 569linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 570{
96d7229d 571 return !linux_supports_tracefork ();
3993f6b1
DJ
572}
573
eb73ad13 574static int
758e29d2 575linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
576{
577 return 0;
578}
579
a96d9b2e 580static int
ff214e67
TT
581linux_child_set_syscall_catchpoint (struct target_ops *self,
582 int pid, int needed, int any_count,
a96d9b2e
SDJ
583 int table_size, int *table)
584{
96d7229d 585 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
586 return 1;
587
a96d9b2e
SDJ
588 /* On GNU/Linux, we ignore the arguments. It means that we only
589 enable the syscall catchpoints, but do not disable them.
77b06cd7 590
a96d9b2e
SDJ
591 Also, we do not use the `table' information because we do not
592 filter system calls here. We let GDB do the logic for us. */
593 return 0;
594}
595
d6b0e80f
AC
596/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
597 are processes sharing the same VM space. A multi-threaded process
598 is basically a group of such processes. However, such a grouping
599 is almost entirely a user-space issue; the kernel doesn't enforce
600 such a grouping at all (this might change in the future). In
601 general, we'll rely on the threads library (i.e. the GNU/Linux
602 Threads library) to provide such a grouping.
603
604 It is perfectly well possible to write a multi-threaded application
605 without the assistance of a threads library, by using the clone
606 system call directly. This module should be able to give some
607 rudimentary support for debugging such applications if developers
608 specify the CLONE_PTRACE flag in the clone system call, and are
609 using the Linux kernel 2.4 or above.
610
611 Note that there are some peculiarities in GNU/Linux that affect
612 this code:
613
614 - In general one should specify the __WCLONE flag to waitpid in
615 order to make it report events for any of the cloned processes
616 (and leave it out for the initial process). However, if a cloned
617 process has exited the exit status is only reported if the
618 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
619 we cannot use it since GDB must work on older systems too.
620
621 - When a traced, cloned process exits and is waited for by the
622 debugger, the kernel reassigns it to the original parent and
623 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
624 library doesn't notice this, which leads to the "zombie problem":
625 When debugged a multi-threaded process that spawns a lot of
626 threads will run out of processes, even if the threads exit,
627 because the "zombies" stay around. */
628
629/* List of known LWPs. */
9f0bdab8 630struct lwp_info *lwp_list;
d6b0e80f
AC
631\f
632
d6b0e80f
AC
633/* Original signal mask. */
634static sigset_t normal_mask;
635
636/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
637 _initialize_linux_nat. */
638static sigset_t suspend_mask;
639
7feb7d06
PA
640/* Signals to block to make that sigsuspend work. */
641static sigset_t blocked_mask;
642
643/* SIGCHLD action. */
644struct sigaction sigchld_action;
b84876c2 645
7feb7d06
PA
646/* Block child signals (SIGCHLD and linux threads signals), and store
647 the previous mask in PREV_MASK. */
84e46146 648
7feb7d06
PA
649static void
650block_child_signals (sigset_t *prev_mask)
651{
652 /* Make sure SIGCHLD is blocked. */
653 if (!sigismember (&blocked_mask, SIGCHLD))
654 sigaddset (&blocked_mask, SIGCHLD);
655
656 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
657}
658
659/* Restore child signals mask, previously returned by
660 block_child_signals. */
661
662static void
663restore_child_signals_mask (sigset_t *prev_mask)
664{
665 sigprocmask (SIG_SETMASK, prev_mask, NULL);
666}
2455069d
UW
667
668/* Mask of signals to pass directly to the inferior. */
669static sigset_t pass_mask;
670
671/* Update signals to pass to the inferior. */
672static void
94bedb42
TT
673linux_nat_pass_signals (struct target_ops *self,
674 int numsigs, unsigned char *pass_signals)
2455069d
UW
675{
676 int signo;
677
678 sigemptyset (&pass_mask);
679
680 for (signo = 1; signo < NSIG; signo++)
681 {
2ea28649 682 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
683 if (target_signo < numsigs && pass_signals[target_signo])
684 sigaddset (&pass_mask, signo);
685 }
686}
687
d6b0e80f
AC
688\f
689
690/* Prototypes for local functions. */
691static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 692static int linux_thread_alive (ptid_t ptid);
8dd27370 693static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
20ba1ce6 694static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
710151dd 695
d6b0e80f 696\f
d6b0e80f 697
7b50312a
PA
698/* Destroy and free LP. */
699
700static void
701lwp_free (struct lwp_info *lp)
702{
703 xfree (lp->arch_private);
704 xfree (lp);
705}
706
d90e17a7
PA
707/* Remove all LWPs belong to PID from the lwp list. */
708
709static void
710purge_lwp_list (int pid)
711{
712 struct lwp_info *lp, *lpprev, *lpnext;
713
714 lpprev = NULL;
715
716 for (lp = lwp_list; lp; lp = lpnext)
717 {
718 lpnext = lp->next;
719
720 if (ptid_get_pid (lp->ptid) == pid)
721 {
722 if (lp == lwp_list)
723 lwp_list = lp->next;
724 else
725 lpprev->next = lp->next;
726
7b50312a 727 lwp_free (lp);
d90e17a7
PA
728 }
729 else
730 lpprev = lp;
731 }
732}
733
26cb8b7c
PA
734/* Add the LWP specified by PTID to the list. PTID is the first LWP
735 in the process. Return a pointer to the structure describing the
736 new LWP.
737
738 This differs from add_lwp in that we don't let the arch specific
739 bits know about this new thread. Current clients of this callback
740 take the opportunity to install watchpoints in the new thread, and
741 we shouldn't do that for the first thread. If we're spawning a
742 child ("run"), the thread executes the shell wrapper first, and we
743 shouldn't touch it until it execs the program we want to debug.
744 For "attach", it'd be okay to call the callback, but it's not
745 necessary, because watchpoints can't yet have been inserted into
746 the inferior. */
d6b0e80f
AC
747
748static struct lwp_info *
26cb8b7c 749add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
750{
751 struct lwp_info *lp;
752
dfd4cc63 753 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f
AC
754
755 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
756
757 memset (lp, 0, sizeof (struct lwp_info));
758
25289eb2 759 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
760 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
761
762 lp->ptid = ptid;
dc146f7c 763 lp->core = -1;
d6b0e80f
AC
764
765 lp->next = lwp_list;
766 lwp_list = lp;
d6b0e80f 767
26cb8b7c
PA
768 return lp;
769}
770
771/* Add the LWP specified by PID to the list. Return a pointer to the
772 structure describing the new LWP. The LWP should already be
773 stopped. */
774
775static struct lwp_info *
776add_lwp (ptid_t ptid)
777{
778 struct lwp_info *lp;
779
780 lp = add_initial_lwp (ptid);
781
6e012a6c
PA
782 /* Let the arch specific bits know about this new thread. Current
783 clients of this callback take the opportunity to install
26cb8b7c
PA
784 watchpoints in the new thread. We don't do this for the first
785 thread though. See add_initial_lwp. */
786 if (linux_nat_new_thread != NULL)
7b50312a 787 linux_nat_new_thread (lp);
9f0bdab8 788
d6b0e80f
AC
789 return lp;
790}
791
792/* Remove the LWP specified by PID from the list. */
793
794static void
795delete_lwp (ptid_t ptid)
796{
797 struct lwp_info *lp, *lpprev;
798
799 lpprev = NULL;
800
801 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
802 if (ptid_equal (lp->ptid, ptid))
803 break;
804
805 if (!lp)
806 return;
807
d6b0e80f
AC
808 if (lpprev)
809 lpprev->next = lp->next;
810 else
811 lwp_list = lp->next;
812
7b50312a 813 lwp_free (lp);
d6b0e80f
AC
814}
815
816/* Return a pointer to the structure describing the LWP corresponding
817 to PID. If no corresponding LWP could be found, return NULL. */
818
819static struct lwp_info *
820find_lwp_pid (ptid_t ptid)
821{
822 struct lwp_info *lp;
823 int lwp;
824
dfd4cc63
LM
825 if (ptid_lwp_p (ptid))
826 lwp = ptid_get_lwp (ptid);
d6b0e80f 827 else
dfd4cc63 828 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
829
830 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 831 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
832 return lp;
833
834 return NULL;
835}
836
837/* Call CALLBACK with its second argument set to DATA for every LWP in
838 the list. If CALLBACK returns 1 for a particular LWP, return a
839 pointer to the structure describing that LWP immediately.
840 Otherwise return NULL. */
841
842struct lwp_info *
d90e17a7
PA
843iterate_over_lwps (ptid_t filter,
844 int (*callback) (struct lwp_info *, void *),
845 void *data)
d6b0e80f
AC
846{
847 struct lwp_info *lp, *lpnext;
848
849 for (lp = lwp_list; lp; lp = lpnext)
850 {
851 lpnext = lp->next;
d90e17a7
PA
852
853 if (ptid_match (lp->ptid, filter))
854 {
855 if ((*callback) (lp, data))
856 return lp;
857 }
d6b0e80f
AC
858 }
859
860 return NULL;
861}
862
2277426b
PA
863/* Update our internal state when changing from one checkpoint to
864 another indicated by NEW_PTID. We can only switch single-threaded
865 applications, so we only create one new LWP, and the previous list
866 is discarded. */
f973ed9c
DJ
867
868void
869linux_nat_switch_fork (ptid_t new_ptid)
870{
871 struct lwp_info *lp;
872
dfd4cc63 873 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 874
f973ed9c
DJ
875 lp = add_lwp (new_ptid);
876 lp->stopped = 1;
e26af52f 877
2277426b
PA
878 /* This changes the thread's ptid while preserving the gdb thread
879 num. Also changes the inferior pid, while preserving the
880 inferior num. */
881 thread_change_ptid (inferior_ptid, new_ptid);
882
883 /* We've just told GDB core that the thread changed target id, but,
884 in fact, it really is a different thread, with different register
885 contents. */
886 registers_changed ();
e26af52f
DJ
887}
888
e26af52f
DJ
889/* Handle the exit of a single thread LP. */
890
891static void
892exit_lwp (struct lwp_info *lp)
893{
e09875d4 894 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
895
896 if (th)
e26af52f 897 {
17faa917
DJ
898 if (print_thread_events)
899 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
900
4f8d22e3 901 delete_thread (lp->ptid);
e26af52f
DJ
902 }
903
904 delete_lwp (lp->ptid);
905}
906
a0ef4274
DJ
907/* Wait for the LWP specified by LP, which we have just attached to.
908 Returns a wait status for that LWP, to cache. */
909
910static int
911linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
912 int *signalled)
913{
dfd4cc63 914 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
915 int status;
916
644cebc9 917 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
918 {
919 if (debug_linux_nat)
920 fprintf_unfiltered (gdb_stdlog,
921 "LNPAW: Attaching to a stopped process\n");
922
923 /* The process is definitely stopped. It is in a job control
924 stop, unless the kernel predates the TASK_STOPPED /
925 TASK_TRACED distinction, in which case it might be in a
926 ptrace stop. Make sure it is in a ptrace stop; from there we
927 can kill it, signal it, et cetera.
928
929 First make sure there is a pending SIGSTOP. Since we are
930 already attached, the process can not transition from stopped
931 to running without a PTRACE_CONT; so we know this signal will
932 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
933 probably already in the queue (unless this kernel is old
934 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
935 is not an RT signal, it can only be queued once. */
936 kill_lwp (pid, SIGSTOP);
937
938 /* Finally, resume the stopped process. This will deliver the SIGSTOP
939 (or a higher priority signal, just like normal PTRACE_ATTACH). */
940 ptrace (PTRACE_CONT, pid, 0, 0);
941 }
942
943 /* Make sure the initial process is stopped. The user-level threads
944 layer might want to poke around in the inferior, and that won't
945 work if things haven't stabilized yet. */
946 new_pid = my_waitpid (pid, &status, 0);
947 if (new_pid == -1 && errno == ECHILD)
948 {
949 if (first)
950 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
951
952 /* Try again with __WCLONE to check cloned processes. */
953 new_pid = my_waitpid (pid, &status, __WCLONE);
954 *cloned = 1;
955 }
956
dacc9cb2
PP
957 gdb_assert (pid == new_pid);
958
959 if (!WIFSTOPPED (status))
960 {
961 /* The pid we tried to attach has apparently just exited. */
962 if (debug_linux_nat)
963 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
964 pid, status_to_str (status));
965 return status;
966 }
a0ef4274
DJ
967
968 if (WSTOPSIG (status) != SIGSTOP)
969 {
970 *signalled = 1;
971 if (debug_linux_nat)
972 fprintf_unfiltered (gdb_stdlog,
973 "LNPAW: Received %s after attaching\n",
974 status_to_str (status));
975 }
976
977 return status;
978}
979
84636d28
PA
980/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
981 the new LWP could not be attached, or 1 if we're already auto
982 attached to this thread, but haven't processed the
983 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
984 its existance, without considering it an error. */
d6b0e80f 985
9ee57c33 986int
93815fbf 987lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 988{
9ee57c33 989 struct lwp_info *lp;
84636d28 990 int lwpid;
d6b0e80f 991
dfd4cc63 992 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 993
9ee57c33 994 lp = find_lwp_pid (ptid);
dfd4cc63 995 lwpid = ptid_get_lwp (ptid);
d6b0e80f 996
3b27ef47 997 /* We assume that we're already attached to any LWP that is already
d6b0e80f
AC
998 in our list of LWPs. If we're not seeing exit events from threads
999 and we've had PID wraparound since we last tried to stop all threads,
1000 this assumption might be wrong; fortunately, this is very unlikely
1001 to happen. */
3b27ef47 1002 if (lp == NULL)
d6b0e80f 1003 {
a0ef4274 1004 int status, cloned = 0, signalled = 0;
d6b0e80f 1005
84636d28 1006 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1007 {
96d7229d 1008 if (linux_supports_tracefork ())
84636d28
PA
1009 {
1010 /* If we haven't stopped all threads when we get here,
1011 we may have seen a thread listed in thread_db's list,
1012 but not processed the PTRACE_EVENT_CLONE yet. If
1013 that's the case, ignore this new thread, and let
1014 normal event handling discover it later. */
1015 if (in_pid_list_p (stopped_pids, lwpid))
1016 {
1017 /* We've already seen this thread stop, but we
1018 haven't seen the PTRACE_EVENT_CLONE extended
1019 event yet. */
3b27ef47
PA
1020 if (debug_linux_nat)
1021 fprintf_unfiltered (gdb_stdlog,
1022 "LLAL: attach failed, but already seen "
1023 "this thread %s stop\n",
1024 target_pid_to_str (ptid));
1025 return 1;
84636d28
PA
1026 }
1027 else
1028 {
1029 int new_pid;
1030 int status;
1031
3b27ef47
PA
1032 if (debug_linux_nat)
1033 fprintf_unfiltered (gdb_stdlog,
1034 "LLAL: attach failed, and haven't seen "
1035 "this thread %s stop yet\n",
1036 target_pid_to_str (ptid));
1037
1038 /* We may or may not be attached to the LWP already.
1039 Try waitpid on it. If that errors, we're not
1040 attached to the LWP yet. Otherwise, we're
1041 already attached. */
a33e3959 1042 gdb_assert (lwpid > 0);
84636d28
PA
1043 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1044 if (new_pid == -1 && errno == ECHILD)
1045 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1046 if (new_pid != -1)
1047 {
3b27ef47
PA
1048 if (new_pid == 0)
1049 {
1050 /* The child hasn't stopped for its initial
1051 SIGSTOP stop yet. */
1052 if (debug_linux_nat)
1053 fprintf_unfiltered (gdb_stdlog,
1054 "LLAL: child hasn't "
1055 "stopped yet\n");
1056 }
1057 else if (WIFSTOPPED (status))
1058 {
1059 if (debug_linux_nat)
1060 fprintf_unfiltered (gdb_stdlog,
1061 "LLAL: adding to stopped_pids\n");
1062 add_to_pid_list (&stopped_pids, lwpid, status);
1063 }
84636d28
PA
1064 return 1;
1065 }
1066 }
1067 }
1068
9ee57c33
DJ
1069 /* If we fail to attach to the thread, issue a warning,
1070 but continue. One way this can happen is if thread
e9efe249 1071 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1072 bug may place threads in the thread list and then fail
1073 to create them. */
1074 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1075 safe_strerror (errno));
1076 return -1;
1077 }
1078
d6b0e80f
AC
1079 if (debug_linux_nat)
1080 fprintf_unfiltered (gdb_stdlog,
1081 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1082 target_pid_to_str (ptid));
1083
a0ef4274 1084 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1085 if (!WIFSTOPPED (status))
12696c10 1086 return 1;
dacc9cb2 1087
a0ef4274
DJ
1088 lp = add_lwp (ptid);
1089 lp->stopped = 1;
3b27ef47 1090 lp->last_resume_kind = resume_stop;
a0ef4274
DJ
1091 lp->cloned = cloned;
1092 lp->signalled = signalled;
1093 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1094 {
a0ef4274
DJ
1095 lp->resumed = 1;
1096 lp->status = status;
d6b0e80f
AC
1097 }
1098
dfd4cc63 1099 target_post_attach (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
1100
1101 if (debug_linux_nat)
1102 {
1103 fprintf_unfiltered (gdb_stdlog,
1104 "LLAL: waitpid %s received %s\n",
1105 target_pid_to_str (ptid),
1106 status_to_str (status));
1107 }
1108 }
9ee57c33 1109
9ee57c33 1110 return 0;
d6b0e80f
AC
1111}
1112
b84876c2 1113static void
136d6dae
VP
1114linux_nat_create_inferior (struct target_ops *ops,
1115 char *exec_file, char *allargs, char **env,
b84876c2
PA
1116 int from_tty)
1117{
8cc73a39
SDJ
1118 struct cleanup *restore_personality
1119 = maybe_disable_address_space_randomization (disable_randomization);
b84876c2
PA
1120
1121 /* The fork_child mechanism is synchronous and calls target_wait, so
1122 we have to mask the async mode. */
1123
2455069d 1124 /* Make sure we report all signals during startup. */
94bedb42 1125 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1126
136d6dae 1127 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1128
8cc73a39 1129 do_cleanups (restore_personality);
b84876c2
PA
1130}
1131
8784d563
PA
1132/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1133 already attached. Returns true if a new LWP is found, false
1134 otherwise. */
1135
1136static int
1137attach_proc_task_lwp_callback (ptid_t ptid)
1138{
1139 struct lwp_info *lp;
1140
1141 /* Ignore LWPs we're already attached to. */
1142 lp = find_lwp_pid (ptid);
1143 if (lp == NULL)
1144 {
1145 int lwpid = ptid_get_lwp (ptid);
1146
1147 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1148 {
1149 int err = errno;
1150
1151 /* Be quiet if we simply raced with the thread exiting.
1152 EPERM is returned if the thread's task still exists, and
1153 is marked as exited or zombie, as well as other
1154 conditions, so in that case, confirm the status in
1155 /proc/PID/status. */
1156 if (err == ESRCH
1157 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1158 {
1159 if (debug_linux_nat)
1160 {
1161 fprintf_unfiltered (gdb_stdlog,
1162 "Cannot attach to lwp %d: "
1163 "thread is gone (%d: %s)\n",
1164 lwpid, err, safe_strerror (err));
1165 }
1166 }
1167 else
1168 {
f71f0b0d 1169 warning (_("Cannot attach to lwp %d: %s"),
8784d563
PA
1170 lwpid,
1171 linux_ptrace_attach_fail_reason_string (ptid,
1172 err));
1173 }
1174 }
1175 else
1176 {
1177 if (debug_linux_nat)
1178 fprintf_unfiltered (gdb_stdlog,
1179 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1180 target_pid_to_str (ptid));
1181
1182 lp = add_lwp (ptid);
1183 lp->cloned = 1;
1184
1185 /* The next time we wait for this LWP we'll see a SIGSTOP as
1186 PTRACE_ATTACH brings it to a halt. */
1187 lp->signalled = 1;
1188
1189 /* We need to wait for a stop before being able to make the
1190 next ptrace call on this LWP. */
1191 lp->must_set_ptrace_flags = 1;
1192 }
1193
1194 return 1;
1195 }
1196 return 0;
1197}
1198
d6b0e80f 1199static void
c0939df1 1200linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1201{
1202 struct lwp_info *lp;
d6b0e80f 1203 int status;
af990527 1204 ptid_t ptid;
d6b0e80f 1205
2455069d 1206 /* Make sure we report all signals during attach. */
94bedb42 1207 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1208
492d29ea 1209 TRY
87b0bb13
JK
1210 {
1211 linux_ops->to_attach (ops, args, from_tty);
1212 }
492d29ea 1213 CATCH (ex, RETURN_MASK_ERROR)
87b0bb13
JK
1214 {
1215 pid_t pid = parse_pid_to_attach (args);
1216 struct buffer buffer;
1217 char *message, *buffer_s;
1218
1219 message = xstrdup (ex.message);
1220 make_cleanup (xfree, message);
1221
1222 buffer_init (&buffer);
7ae1a6a6 1223 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1224
1225 buffer_grow_str0 (&buffer, "");
1226 buffer_s = buffer_finish (&buffer);
1227 make_cleanup (xfree, buffer_s);
1228
7ae1a6a6
PA
1229 if (*buffer_s != '\0')
1230 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1231 else
1232 throw_error (ex.error, "%s", message);
87b0bb13 1233 }
492d29ea 1234 END_CATCH
d6b0e80f 1235
af990527
PA
1236 /* The ptrace base target adds the main thread with (pid,0,0)
1237 format. Decorate it with lwp info. */
dfd4cc63
LM
1238 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1239 ptid_get_pid (inferior_ptid),
1240 0);
af990527
PA
1241 thread_change_ptid (inferior_ptid, ptid);
1242
9f0bdab8 1243 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1244 lp = add_initial_lwp (ptid);
a0ef4274
DJ
1245
1246 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1247 &lp->signalled);
dacc9cb2
PP
1248 if (!WIFSTOPPED (status))
1249 {
1250 if (WIFEXITED (status))
1251 {
1252 int exit_code = WEXITSTATUS (status);
1253
1254 target_terminal_ours ();
1255 target_mourn_inferior ();
1256 if (exit_code == 0)
1257 error (_("Unable to attach: program exited normally."));
1258 else
1259 error (_("Unable to attach: program exited with code %d."),
1260 exit_code);
1261 }
1262 else if (WIFSIGNALED (status))
1263 {
2ea28649 1264 enum gdb_signal signo;
dacc9cb2
PP
1265
1266 target_terminal_ours ();
1267 target_mourn_inferior ();
1268
2ea28649 1269 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1270 error (_("Unable to attach: program terminated with signal "
1271 "%s, %s."),
2ea28649
PA
1272 gdb_signal_to_name (signo),
1273 gdb_signal_to_string (signo));
dacc9cb2
PP
1274 }
1275
1276 internal_error (__FILE__, __LINE__,
1277 _("unexpected status %d for PID %ld"),
dfd4cc63 1278 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1279 }
1280
a0ef4274 1281 lp->stopped = 1;
9f0bdab8 1282
a0ef4274 1283 /* Save the wait status to report later. */
d6b0e80f 1284 lp->resumed = 1;
a0ef4274
DJ
1285 if (debug_linux_nat)
1286 fprintf_unfiltered (gdb_stdlog,
1287 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1288 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1289
7feb7d06
PA
1290 lp->status = status;
1291
8784d563
PA
1292 /* We must attach to every LWP. If /proc is mounted, use that to
1293 find them now. The inferior may be using raw clone instead of
1294 using pthreads. But even if it is using pthreads, thread_db
1295 walks structures in the inferior's address space to find the list
1296 of threads/LWPs, and those structures may well be corrupted.
1297 Note that once thread_db is loaded, we'll still use it to list
1298 threads and associate pthread info with each LWP. */
1299 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1300 attach_proc_task_lwp_callback);
1301
7feb7d06
PA
1302 if (target_can_async_p ())
1303 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1304}
1305
a0ef4274
DJ
1306/* Get pending status of LP. */
1307static int
1308get_pending_status (struct lwp_info *lp, int *status)
1309{
a493e3e2 1310 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1311
1312 /* If we paused threads momentarily, we may have stored pending
1313 events in lp->status or lp->waitstatus (see stop_wait_callback),
1314 and GDB core hasn't seen any signal for those threads.
1315 Otherwise, the last signal reported to the core is found in the
1316 thread object's stop_signal.
1317
1318 There's a corner case that isn't handled here at present. Only
1319 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1320 stop_signal make sense as a real signal to pass to the inferior.
1321 Some catchpoint related events, like
1322 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1323 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1324 those traps are debug API (ptrace in our case) related and
1325 induced; the inferior wouldn't see them if it wasn't being
1326 traced. Hence, we should never pass them to the inferior, even
1327 when set to pass state. Since this corner case isn't handled by
1328 infrun.c when proceeding with a signal, for consistency, neither
1329 do we handle it here (or elsewhere in the file we check for
1330 signal pass state). Normally SIGTRAP isn't set to pass state, so
1331 this is really a corner case. */
1332
1333 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1334 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1335 else if (lp->status)
2ea28649 1336 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1337 else if (non_stop && !is_executing (lp->ptid))
1338 {
1339 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1340
16c381f0 1341 signo = tp->suspend.stop_signal;
ca2163eb
PA
1342 }
1343 else if (!non_stop)
a0ef4274 1344 {
ca2163eb
PA
1345 struct target_waitstatus last;
1346 ptid_t last_ptid;
4c28f408 1347
ca2163eb 1348 get_last_target_status (&last_ptid, &last);
4c28f408 1349
dfd4cc63 1350 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1351 {
e09875d4 1352 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1353
16c381f0 1354 signo = tp->suspend.stop_signal;
4c28f408 1355 }
ca2163eb 1356 }
4c28f408 1357
ca2163eb 1358 *status = 0;
4c28f408 1359
a493e3e2 1360 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1361 {
1362 if (debug_linux_nat)
1363 fprintf_unfiltered (gdb_stdlog,
1364 "GPT: lwp %s has no pending signal\n",
1365 target_pid_to_str (lp->ptid));
1366 }
1367 else if (!signal_pass_state (signo))
1368 {
1369 if (debug_linux_nat)
3e43a32a
MS
1370 fprintf_unfiltered (gdb_stdlog,
1371 "GPT: lwp %s had signal %s, "
1372 "but it is in no pass state\n",
ca2163eb 1373 target_pid_to_str (lp->ptid),
2ea28649 1374 gdb_signal_to_string (signo));
a0ef4274 1375 }
a0ef4274 1376 else
4c28f408 1377 {
2ea28649 1378 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1379
1380 if (debug_linux_nat)
1381 fprintf_unfiltered (gdb_stdlog,
1382 "GPT: lwp %s has pending signal %s\n",
1383 target_pid_to_str (lp->ptid),
2ea28649 1384 gdb_signal_to_string (signo));
4c28f408 1385 }
a0ef4274
DJ
1386
1387 return 0;
1388}
1389
d6b0e80f
AC
1390static int
1391detach_callback (struct lwp_info *lp, void *data)
1392{
1393 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1394
1395 if (debug_linux_nat && lp->status)
1396 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1397 strsignal (WSTOPSIG (lp->status)),
1398 target_pid_to_str (lp->ptid));
1399
a0ef4274
DJ
1400 /* If there is a pending SIGSTOP, get rid of it. */
1401 if (lp->signalled)
d6b0e80f 1402 {
d6b0e80f
AC
1403 if (debug_linux_nat)
1404 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1405 "DC: Sending SIGCONT to %s\n",
1406 target_pid_to_str (lp->ptid));
d6b0e80f 1407
dfd4cc63 1408 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1409 lp->signalled = 0;
d6b0e80f
AC
1410 }
1411
1412 /* We don't actually detach from the LWP that has an id equal to the
1413 overall process id just yet. */
dfd4cc63 1414 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1415 {
a0ef4274
DJ
1416 int status = 0;
1417
1418 /* Pass on any pending signal for this LWP. */
1419 get_pending_status (lp, &status);
1420
7b50312a
PA
1421 if (linux_nat_prepare_to_resume != NULL)
1422 linux_nat_prepare_to_resume (lp);
d6b0e80f 1423 errno = 0;
dfd4cc63 1424 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1425 WSTOPSIG (status)) < 0)
8a3fe4f8 1426 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1427 safe_strerror (errno));
1428
1429 if (debug_linux_nat)
1430 fprintf_unfiltered (gdb_stdlog,
1431 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1432 target_pid_to_str (lp->ptid),
7feb7d06 1433 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1434
1435 delete_lwp (lp->ptid);
1436 }
1437
1438 return 0;
1439}
1440
1441static void
52554a0e 1442linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1443{
b84876c2 1444 int pid;
a0ef4274 1445 int status;
d90e17a7
PA
1446 struct lwp_info *main_lwp;
1447
dfd4cc63 1448 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1449
ae5e0686
MK
1450 /* Don't unregister from the event loop, as there may be other
1451 inferiors running. */
b84876c2 1452
4c28f408
PA
1453 /* Stop all threads before detaching. ptrace requires that the
1454 thread is stopped to sucessfully detach. */
d90e17a7 1455 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1456 /* ... and wait until all of them have reported back that
1457 they're no longer running. */
d90e17a7 1458 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1459
d90e17a7 1460 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1461
1462 /* Only the initial process should be left right now. */
dfd4cc63 1463 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1464
1465 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1466
a0ef4274
DJ
1467 /* Pass on any pending signal for the last LWP. */
1468 if ((args == NULL || *args == '\0')
d90e17a7 1469 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1470 && WIFSTOPPED (status))
1471 {
52554a0e
TT
1472 char *tem;
1473
a0ef4274
DJ
1474 /* Put the signal number in ARGS so that inf_ptrace_detach will
1475 pass it along with PTRACE_DETACH. */
52554a0e 1476 tem = alloca (8);
cde33bf1 1477 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1478 args = tem;
ddabfc73
TT
1479 if (debug_linux_nat)
1480 fprintf_unfiltered (gdb_stdlog,
1481 "LND: Sending signal %s to %s\n",
1482 args,
1483 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1484 }
1485
7b50312a
PA
1486 if (linux_nat_prepare_to_resume != NULL)
1487 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1488 delete_lwp (main_lwp->ptid);
b84876c2 1489
7a7d3353
PA
1490 if (forks_exist_p ())
1491 {
1492 /* Multi-fork case. The current inferior_ptid is being detached
1493 from, but there are other viable forks to debug. Detach from
1494 the current fork, and context-switch to the first
1495 available. */
1496 linux_fork_detach (args, from_tty);
7a7d3353
PA
1497 }
1498 else
1499 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1500}
1501
8a99810d
PA
1502/* Resume execution of the inferior process. If STEP is nonzero,
1503 single-step it. If SIGNAL is nonzero, give it that signal. */
1504
1505static void
23f238d3
PA
1506linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1507 enum gdb_signal signo)
8a99810d 1508{
8a99810d 1509 lp->step = step;
9c02b525
PA
1510
1511 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1512 We only presently need that if the LWP is stepped though (to
1513 handle the case of stepping a breakpoint instruction). */
1514 if (step)
1515 {
1516 struct regcache *regcache = get_thread_regcache (lp->ptid);
1517
1518 lp->stop_pc = regcache_read_pc (regcache);
1519 }
1520 else
1521 lp->stop_pc = 0;
1522
8a99810d
PA
1523 if (linux_nat_prepare_to_resume != NULL)
1524 linux_nat_prepare_to_resume (lp);
90ad5e1d 1525 linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
23f238d3
PA
1526
1527 /* Successfully resumed. Clear state that no longer makes sense,
1528 and mark the LWP as running. Must not do this before resuming
1529 otherwise if that fails other code will be confused. E.g., we'd
1530 later try to stop the LWP and hang forever waiting for a stop
1531 status. Note that we must not throw after this is cleared,
1532 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1533 lp->stopped = 0;
23f238d3 1534 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8a99810d
PA
1535 registers_changed_ptid (lp->ptid);
1536}
1537
23f238d3
PA
1538/* Called when we try to resume a stopped LWP and that errors out. If
1539 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1540 or about to become), discard the error, clear any pending status
1541 the LWP may have, and return true (we'll collect the exit status
1542 soon enough). Otherwise, return false. */
1543
1544static int
1545check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1546{
1547 /* If we get an error after resuming the LWP successfully, we'd
1548 confuse !T state for the LWP being gone. */
1549 gdb_assert (lp->stopped);
1550
1551 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1552 because even if ptrace failed with ESRCH, the tracee may be "not
1553 yet fully dead", but already refusing ptrace requests. In that
1554 case the tracee has 'R (Running)' state for a little bit
1555 (observed in Linux 3.18). See also the note on ESRCH in the
1556 ptrace(2) man page. Instead, check whether the LWP has any state
1557 other than ptrace-stopped. */
1558
1559 /* Don't assume anything if /proc/PID/status can't be read. */
1560 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1561 {
1562 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1563 lp->status = 0;
1564 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1565 return 1;
1566 }
1567 return 0;
1568}
1569
1570/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1571 disappears while we try to resume it. */
1572
1573static void
1574linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1575{
1576 TRY
1577 {
1578 linux_resume_one_lwp_throw (lp, step, signo);
1579 }
1580 CATCH (ex, RETURN_MASK_ERROR)
1581 {
1582 if (!check_ptrace_stopped_lwp_gone (lp))
1583 throw_exception (ex);
1584 }
1585 END_CATCH
1586}
1587
d6b0e80f
AC
1588/* Resume LP. */
1589
25289eb2 1590static void
e5ef252a 1591resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1592{
25289eb2 1593 if (lp->stopped)
6c95b8df 1594 {
c9657e70 1595 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1596
1597 if (inf->vfork_child != NULL)
1598 {
1599 if (debug_linux_nat)
1600 fprintf_unfiltered (gdb_stdlog,
1601 "RC: Not resuming %s (vfork parent)\n",
1602 target_pid_to_str (lp->ptid));
1603 }
8a99810d 1604 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1605 {
1606 if (debug_linux_nat)
1607 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1608 "RC: Resuming sibling %s, %s, %s\n",
1609 target_pid_to_str (lp->ptid),
1610 (signo != GDB_SIGNAL_0
1611 ? strsignal (gdb_signal_to_host (signo))
1612 : "0"),
1613 step ? "step" : "resume");
25289eb2 1614
8a99810d 1615 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1616 }
1617 else
1618 {
1619 if (debug_linux_nat)
1620 fprintf_unfiltered (gdb_stdlog,
1621 "RC: Not resuming sibling %s (has pending)\n",
1622 target_pid_to_str (lp->ptid));
1623 }
6c95b8df 1624 }
25289eb2 1625 else
d6b0e80f 1626 {
d90e17a7
PA
1627 if (debug_linux_nat)
1628 fprintf_unfiltered (gdb_stdlog,
25289eb2 1629 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1630 target_pid_to_str (lp->ptid));
d6b0e80f 1631 }
25289eb2 1632}
d6b0e80f 1633
8817a6f2
PA
1634/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1635 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1636
25289eb2 1637static int
8817a6f2 1638linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1639{
e5ef252a
PA
1640 enum gdb_signal signo = GDB_SIGNAL_0;
1641
8817a6f2
PA
1642 if (lp == except)
1643 return 0;
1644
e5ef252a
PA
1645 if (lp->stopped)
1646 {
1647 struct thread_info *thread;
1648
1649 thread = find_thread_ptid (lp->ptid);
1650 if (thread != NULL)
1651 {
70509625 1652 signo = thread->suspend.stop_signal;
e5ef252a
PA
1653 thread->suspend.stop_signal = GDB_SIGNAL_0;
1654 }
1655 }
1656
1657 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1658 return 0;
1659}
1660
1661static int
1662resume_clear_callback (struct lwp_info *lp, void *data)
1663{
1664 lp->resumed = 0;
25289eb2 1665 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1666 return 0;
1667}
1668
1669static int
1670resume_set_callback (struct lwp_info *lp, void *data)
1671{
1672 lp->resumed = 1;
25289eb2 1673 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1674 return 0;
1675}
1676
1677static void
28439f5e 1678linux_nat_resume (struct target_ops *ops,
2ea28649 1679 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1680{
1681 struct lwp_info *lp;
d90e17a7 1682 int resume_many;
d6b0e80f 1683
76f50ad1
DJ
1684 if (debug_linux_nat)
1685 fprintf_unfiltered (gdb_stdlog,
1686 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1687 step ? "step" : "resume",
1688 target_pid_to_str (ptid),
a493e3e2 1689 (signo != GDB_SIGNAL_0
2ea28649 1690 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1691 target_pid_to_str (inferior_ptid));
1692
d6b0e80f 1693 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1694 resume_many = (ptid_equal (minus_one_ptid, ptid)
1695 || ptid_is_pid (ptid));
4c28f408 1696
e3e9f5a2
PA
1697 /* Mark the lwps we're resuming as resumed. */
1698 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1699
d90e17a7
PA
1700 /* See if it's the current inferior that should be handled
1701 specially. */
1702 if (resume_many)
1703 lp = find_lwp_pid (inferior_ptid);
1704 else
1705 lp = find_lwp_pid (ptid);
9f0bdab8 1706 gdb_assert (lp != NULL);
d6b0e80f 1707
9f0bdab8 1708 /* Remember if we're stepping. */
25289eb2 1709 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1710
9f0bdab8
DJ
1711 /* If we have a pending wait status for this thread, there is no
1712 point in resuming the process. But first make sure that
1713 linux_nat_wait won't preemptively handle the event - we
1714 should never take this short-circuit if we are going to
1715 leave LP running, since we have skipped resuming all the
1716 other threads. This bit of code needs to be synchronized
1717 with linux_nat_wait. */
76f50ad1 1718
9f0bdab8
DJ
1719 if (lp->status && WIFSTOPPED (lp->status))
1720 {
2455069d
UW
1721 if (!lp->step
1722 && WSTOPSIG (lp->status)
1723 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1724 {
9f0bdab8
DJ
1725 if (debug_linux_nat)
1726 fprintf_unfiltered (gdb_stdlog,
1727 "LLR: Not short circuiting for ignored "
1728 "status 0x%x\n", lp->status);
1729
d6b0e80f
AC
1730 /* FIXME: What should we do if we are supposed to continue
1731 this thread with a signal? */
a493e3e2 1732 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1733 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1734 lp->status = 0;
1735 }
1736 }
76f50ad1 1737
8a99810d 1738 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1739 {
1740 /* FIXME: What should we do if we are supposed to continue
1741 this thread with a signal? */
a493e3e2 1742 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1743
9f0bdab8
DJ
1744 if (debug_linux_nat)
1745 fprintf_unfiltered (gdb_stdlog,
1746 "LLR: Short circuiting for status 0x%x\n",
1747 lp->status);
d6b0e80f 1748
7feb7d06
PA
1749 if (target_can_async_p ())
1750 {
1751 target_async (inferior_event_handler, 0);
1752 /* Tell the event loop we have something to process. */
1753 async_file_mark ();
1754 }
9f0bdab8 1755 return;
d6b0e80f
AC
1756 }
1757
d90e17a7 1758 if (resume_many)
8817a6f2 1759 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1760
8a99810d 1761 linux_resume_one_lwp (lp, step, signo);
9f0bdab8 1762
d6b0e80f
AC
1763 if (debug_linux_nat)
1764 fprintf_unfiltered (gdb_stdlog,
1765 "LLR: %s %s, %s (resume event thread)\n",
1766 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1767 target_pid_to_str (ptid),
a493e3e2 1768 (signo != GDB_SIGNAL_0
2ea28649 1769 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2
PA
1770
1771 if (target_can_async_p ())
8ea051c5 1772 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1773}
1774
c5f62d5f 1775/* Send a signal to an LWP. */
d6b0e80f
AC
1776
1777static int
1778kill_lwp (int lwpid, int signo)
1779{
c5f62d5f
DE
1780 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1781 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1782
1783#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1784 {
1785 static int tkill_failed;
1786
1787 if (!tkill_failed)
1788 {
1789 int ret;
1790
1791 errno = 0;
1792 ret = syscall (__NR_tkill, lwpid, signo);
1793 if (errno != ENOSYS)
1794 return ret;
1795 tkill_failed = 1;
1796 }
1797 }
d6b0e80f
AC
1798#endif
1799
1800 return kill (lwpid, signo);
1801}
1802
ca2163eb
PA
1803/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1804 event, check if the core is interested in it: if not, ignore the
1805 event, and keep waiting; otherwise, we need to toggle the LWP's
1806 syscall entry/exit status, since the ptrace event itself doesn't
1807 indicate it, and report the trap to higher layers. */
1808
1809static int
1810linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1811{
1812 struct target_waitstatus *ourstatus = &lp->waitstatus;
1813 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1814 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1815
1816 if (stopping)
1817 {
1818 /* If we're stopping threads, there's a SIGSTOP pending, which
1819 makes it so that the LWP reports an immediate syscall return,
1820 followed by the SIGSTOP. Skip seeing that "return" using
1821 PTRACE_CONT directly, and let stop_wait_callback collect the
1822 SIGSTOP. Later when the thread is resumed, a new syscall
1823 entry event. If we didn't do this (and returned 0), we'd
1824 leave a syscall entry pending, and our caller, by using
1825 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1826 itself. Later, when the user re-resumes this LWP, we'd see
1827 another syscall entry event and we'd mistake it for a return.
1828
1829 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1830 (leaving immediately with LWP->signalled set, without issuing
1831 a PTRACE_CONT), it would still be problematic to leave this
1832 syscall enter pending, as later when the thread is resumed,
1833 it would then see the same syscall exit mentioned above,
1834 followed by the delayed SIGSTOP, while the syscall didn't
1835 actually get to execute. It seems it would be even more
1836 confusing to the user. */
1837
1838 if (debug_linux_nat)
1839 fprintf_unfiltered (gdb_stdlog,
1840 "LHST: ignoring syscall %d "
1841 "for LWP %ld (stopping threads), "
1842 "resuming with PTRACE_CONT for SIGSTOP\n",
1843 syscall_number,
dfd4cc63 1844 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1845
1846 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1847 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1848 lp->stopped = 0;
ca2163eb
PA
1849 return 1;
1850 }
1851
1852 if (catch_syscall_enabled ())
1853 {
1854 /* Always update the entry/return state, even if this particular
1855 syscall isn't interesting to the core now. In async mode,
1856 the user could install a new catchpoint for this syscall
1857 between syscall enter/return, and we'll need to know to
1858 report a syscall return if that happens. */
1859 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1860 ? TARGET_WAITKIND_SYSCALL_RETURN
1861 : TARGET_WAITKIND_SYSCALL_ENTRY);
1862
1863 if (catching_syscall_number (syscall_number))
1864 {
1865 /* Alright, an event to report. */
1866 ourstatus->kind = lp->syscall_state;
1867 ourstatus->value.syscall_number = syscall_number;
1868
1869 if (debug_linux_nat)
1870 fprintf_unfiltered (gdb_stdlog,
1871 "LHST: stopping for %s of syscall %d"
1872 " for LWP %ld\n",
3e43a32a
MS
1873 lp->syscall_state
1874 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1875 ? "entry" : "return",
1876 syscall_number,
dfd4cc63 1877 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1878 return 0;
1879 }
1880
1881 if (debug_linux_nat)
1882 fprintf_unfiltered (gdb_stdlog,
1883 "LHST: ignoring %s of syscall %d "
1884 "for LWP %ld\n",
1885 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1886 ? "entry" : "return",
1887 syscall_number,
dfd4cc63 1888 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1889 }
1890 else
1891 {
1892 /* If we had been syscall tracing, and hence used PT_SYSCALL
1893 before on this LWP, it could happen that the user removes all
1894 syscall catchpoints before we get to process this event.
1895 There are two noteworthy issues here:
1896
1897 - When stopped at a syscall entry event, resuming with
1898 PT_STEP still resumes executing the syscall and reports a
1899 syscall return.
1900
1901 - Only PT_SYSCALL catches syscall enters. If we last
1902 single-stepped this thread, then this event can't be a
1903 syscall enter. If we last single-stepped this thread, this
1904 has to be a syscall exit.
1905
1906 The points above mean that the next resume, be it PT_STEP or
1907 PT_CONTINUE, can not trigger a syscall trace event. */
1908 if (debug_linux_nat)
1909 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1910 "LHST: caught syscall event "
1911 "with no syscall catchpoints."
ca2163eb
PA
1912 " %d for LWP %ld, ignoring\n",
1913 syscall_number,
dfd4cc63 1914 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1915 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1916 }
1917
1918 /* The core isn't interested in this event. For efficiency, avoid
1919 stopping all threads only to have the core resume them all again.
1920 Since we're not stopping threads, if we're still syscall tracing
1921 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1922 subsequent syscall. Simply resume using the inf-ptrace layer,
1923 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1924
8a99810d 1925 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1926 return 1;
1927}
1928
3d799a95
DJ
1929/* Handle a GNU/Linux extended wait response. If we see a clone
1930 event, we need to add the new LWP to our list (and not report the
1931 trap to higher layers). This function returns non-zero if the
1932 event should be ignored and we should wait again. If STOPPING is
1933 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1934
1935static int
3d799a95
DJ
1936linux_handle_extended_wait (struct lwp_info *lp, int status,
1937 int stopping)
d6b0e80f 1938{
dfd4cc63 1939 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1940 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1941 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1942
3d799a95
DJ
1943 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1944 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1945 {
3d799a95
DJ
1946 unsigned long new_pid;
1947 int ret;
1948
1949 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1950
3d799a95
DJ
1951 /* If we haven't already seen the new PID stop, wait for it now. */
1952 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1953 {
1954 /* The new child has a pending SIGSTOP. We can't affect it until it
1955 hits the SIGSTOP, but we're already attached. */
1956 ret = my_waitpid (new_pid, &status,
1957 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1958 if (ret == -1)
1959 perror_with_name (_("waiting for new child"));
1960 else if (ret != new_pid)
1961 internal_error (__FILE__, __LINE__,
1962 _("wait returned unexpected PID %d"), ret);
1963 else if (!WIFSTOPPED (status))
1964 internal_error (__FILE__, __LINE__,
1965 _("wait returned unexpected status 0x%x"), status);
1966 }
1967
3a3e9ee3 1968 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1969
26cb8b7c
PA
1970 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1971 {
1972 /* The arch-specific native code may need to know about new
1973 forks even if those end up never mapped to an
1974 inferior. */
1975 if (linux_nat_new_fork != NULL)
1976 linux_nat_new_fork (lp, new_pid);
1977 }
1978
2277426b 1979 if (event == PTRACE_EVENT_FORK
dfd4cc63 1980 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 1981 {
2277426b
PA
1982 /* Handle checkpointing by linux-fork.c here as a special
1983 case. We don't want the follow-fork-mode or 'catch fork'
1984 to interfere with this. */
1985
1986 /* This won't actually modify the breakpoint list, but will
1987 physically remove the breakpoints from the child. */
d80ee84f 1988 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
1989
1990 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1991 if (!find_fork_pid (new_pid))
1992 add_fork (new_pid);
2277426b
PA
1993
1994 /* Report as spurious, so that infrun doesn't want to follow
1995 this fork. We're actually doing an infcall in
1996 linux-fork.c. */
1997 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1998
1999 /* Report the stop to the core. */
2000 return 0;
2001 }
2002
3d799a95
DJ
2003 if (event == PTRACE_EVENT_FORK)
2004 ourstatus->kind = TARGET_WAITKIND_FORKED;
2005 else if (event == PTRACE_EVENT_VFORK)
2006 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2007 else
3d799a95 2008 {
78768c4a
JK
2009 struct lwp_info *new_lp;
2010
3d799a95 2011 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2012
3c4d7e12
PA
2013 if (debug_linux_nat)
2014 fprintf_unfiltered (gdb_stdlog,
2015 "LHEW: Got clone event "
2016 "from LWP %d, new child is LWP %ld\n",
2017 pid, new_pid);
2018
dfd4cc63 2019 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
3d799a95 2020 new_lp->cloned = 1;
4c28f408 2021 new_lp->stopped = 1;
d6b0e80f 2022
3d799a95
DJ
2023 if (WSTOPSIG (status) != SIGSTOP)
2024 {
2025 /* This can happen if someone starts sending signals to
2026 the new thread before it gets a chance to run, which
2027 have a lower number than SIGSTOP (e.g. SIGUSR1).
2028 This is an unlikely case, and harder to handle for
2029 fork / vfork than for clone, so we do not try - but
2030 we handle it for clone events here. We'll send
2031 the other signal on to the thread below. */
2032
2033 new_lp->signalled = 1;
2034 }
2035 else
79395f92
PA
2036 {
2037 struct thread_info *tp;
2038
2039 /* When we stop for an event in some other thread, and
2040 pull the thread list just as this thread has cloned,
2041 we'll have seen the new thread in the thread_db list
2042 before handling the CLONE event (glibc's
2043 pthread_create adds the new thread to the thread list
2044 before clone'ing, and has the kernel fill in the
2045 thread's tid on the clone call with
2046 CLONE_PARENT_SETTID). If that happened, and the core
2047 had requested the new thread to stop, we'll have
2048 killed it with SIGSTOP. But since SIGSTOP is not an
2049 RT signal, it can only be queued once. We need to be
2050 careful to not resume the LWP if we wanted it to
2051 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 2052 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
2053 tp = find_thread_ptid (new_lp->ptid);
2054 if (tp != NULL && tp->stop_requested)
2055 new_lp->last_resume_kind = resume_stop;
2056 else
2057 status = 0;
2058 }
d6b0e80f 2059
2db9a427
PA
2060 /* If the thread_db layer is active, let it record the user
2061 level thread id and status, and add the thread to GDB's
2062 list. */
2063 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 2064 {
2db9a427
PA
2065 /* The process is not using thread_db. Add the LWP to
2066 GDB's list. */
2067 target_post_attach (ptid_get_lwp (new_lp->ptid));
2068 add_thread (new_lp->ptid);
2069 }
4c28f408 2070
2db9a427
PA
2071 if (!stopping)
2072 {
2073 set_running (new_lp->ptid, 1);
2074 set_executing (new_lp->ptid, 1);
2075 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2076 resume_stop. */
2077 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2078 }
2079
79395f92
PA
2080 if (status != 0)
2081 {
2082 /* We created NEW_LP so it cannot yet contain STATUS. */
2083 gdb_assert (new_lp->status == 0);
2084
2085 /* Save the wait status to report later. */
2086 if (debug_linux_nat)
2087 fprintf_unfiltered (gdb_stdlog,
2088 "LHEW: waitpid of new LWP %ld, "
2089 "saving status %s\n",
dfd4cc63 2090 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
2091 status_to_str (status));
2092 new_lp->status = status;
2093 }
2094
20ba1ce6 2095 new_lp->resumed = !stopping;
3d799a95
DJ
2096 return 1;
2097 }
2098
2099 return 0;
d6b0e80f
AC
2100 }
2101
3d799a95
DJ
2102 if (event == PTRACE_EVENT_EXEC)
2103 {
a75724bc
PA
2104 if (debug_linux_nat)
2105 fprintf_unfiltered (gdb_stdlog,
2106 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2107 ptid_get_lwp (lp->ptid));
a75724bc 2108
3d799a95
DJ
2109 ourstatus->kind = TARGET_WAITKIND_EXECD;
2110 ourstatus->value.execd_pathname
8dd27370 2111 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 2112
8af756ef
PA
2113 /* The thread that execed must have been resumed, but, when a
2114 thread execs, it changes its tid to the tgid, and the old
2115 tgid thread might have not been resumed. */
2116 lp->resumed = 1;
6c95b8df
PA
2117 return 0;
2118 }
2119
2120 if (event == PTRACE_EVENT_VFORK_DONE)
2121 {
2122 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2123 {
6c95b8df 2124 if (debug_linux_nat)
3e43a32a
MS
2125 fprintf_unfiltered (gdb_stdlog,
2126 "LHEW: Got expected PTRACE_EVENT_"
2127 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2128 ptid_get_lwp (lp->ptid));
3d799a95 2129
6c95b8df
PA
2130 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2131 return 0;
3d799a95
DJ
2132 }
2133
6c95b8df 2134 if (debug_linux_nat)
3e43a32a
MS
2135 fprintf_unfiltered (gdb_stdlog,
2136 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 2137 "from LWP %ld: ignoring\n",
dfd4cc63 2138 ptid_get_lwp (lp->ptid));
6c95b8df 2139 return 1;
3d799a95
DJ
2140 }
2141
2142 internal_error (__FILE__, __LINE__,
2143 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2144}
2145
2146/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2147 exited. */
2148
2149static int
2150wait_lwp (struct lwp_info *lp)
2151{
2152 pid_t pid;
432b4d03 2153 int status = 0;
d6b0e80f 2154 int thread_dead = 0;
432b4d03 2155 sigset_t prev_mask;
d6b0e80f
AC
2156
2157 gdb_assert (!lp->stopped);
2158 gdb_assert (lp->status == 0);
2159
432b4d03
JK
2160 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2161 block_child_signals (&prev_mask);
2162
2163 for (;;)
d6b0e80f 2164 {
432b4d03
JK
2165 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2166 was right and we should just call sigsuspend. */
2167
dfd4cc63 2168 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
d6b0e80f 2169 if (pid == -1 && errno == ECHILD)
dfd4cc63 2170 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2171 if (pid == -1 && errno == ECHILD)
2172 {
2173 /* The thread has previously exited. We need to delete it
2174 now because, for some vendor 2.4 kernels with NPTL
2175 support backported, there won't be an exit event unless
2176 it is the main thread. 2.6 kernels will report an exit
2177 event for each thread that exits, as expected. */
2178 thread_dead = 1;
2179 if (debug_linux_nat)
2180 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2181 target_pid_to_str (lp->ptid));
2182 }
432b4d03
JK
2183 if (pid != 0)
2184 break;
2185
2186 /* Bugs 10970, 12702.
2187 Thread group leader may have exited in which case we'll lock up in
2188 waitpid if there are other threads, even if they are all zombies too.
2189 Basically, we're not supposed to use waitpid this way.
2190 __WCLONE is not applicable for the leader so we can't use that.
2191 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2192 process; it gets ESRCH both for the zombie and for running processes.
2193
2194 As a workaround, check if we're waiting for the thread group leader and
2195 if it's a zombie, and avoid calling waitpid if it is.
2196
2197 This is racy, what if the tgl becomes a zombie right after we check?
2198 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2199 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2200
dfd4cc63
LM
2201 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2202 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2203 {
d6b0e80f
AC
2204 thread_dead = 1;
2205 if (debug_linux_nat)
432b4d03
JK
2206 fprintf_unfiltered (gdb_stdlog,
2207 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2208 target_pid_to_str (lp->ptid));
432b4d03 2209 break;
d6b0e80f 2210 }
432b4d03
JK
2211
2212 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2213 get invoked despite our caller had them intentionally blocked by
2214 block_child_signals. This is sensitive only to the loop of
2215 linux_nat_wait_1 and there if we get called my_waitpid gets called
2216 again before it gets to sigsuspend so we can safely let the handlers
2217 get executed here. */
2218
d36bf488
DE
2219 if (debug_linux_nat)
2220 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2221 sigsuspend (&suspend_mask);
2222 }
2223
2224 restore_child_signals_mask (&prev_mask);
2225
d6b0e80f
AC
2226 if (!thread_dead)
2227 {
dfd4cc63 2228 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2229
2230 if (debug_linux_nat)
2231 {
2232 fprintf_unfiltered (gdb_stdlog,
2233 "WL: waitpid %s received %s\n",
2234 target_pid_to_str (lp->ptid),
2235 status_to_str (status));
2236 }
d6b0e80f 2237
a9f4bb21
PA
2238 /* Check if the thread has exited. */
2239 if (WIFEXITED (status) || WIFSIGNALED (status))
2240 {
2241 thread_dead = 1;
2242 if (debug_linux_nat)
2243 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2244 target_pid_to_str (lp->ptid));
2245 }
d6b0e80f
AC
2246 }
2247
2248 if (thread_dead)
2249 {
e26af52f 2250 exit_lwp (lp);
d6b0e80f
AC
2251 return 0;
2252 }
2253
2254 gdb_assert (WIFSTOPPED (status));
8817a6f2 2255 lp->stopped = 1;
d6b0e80f 2256
8784d563
PA
2257 if (lp->must_set_ptrace_flags)
2258 {
2259 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2260
2261 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2262 lp->must_set_ptrace_flags = 0;
2263 }
2264
ca2163eb
PA
2265 /* Handle GNU/Linux's syscall SIGTRAPs. */
2266 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2267 {
2268 /* No longer need the sysgood bit. The ptrace event ends up
2269 recorded in lp->waitstatus if we care for it. We can carry
2270 on handling the event like a regular SIGTRAP from here
2271 on. */
2272 status = W_STOPCODE (SIGTRAP);
2273 if (linux_handle_syscall_trap (lp, 1))
2274 return wait_lwp (lp);
2275 }
2276
d6b0e80f 2277 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2278 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2279 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2280 {
2281 if (debug_linux_nat)
2282 fprintf_unfiltered (gdb_stdlog,
2283 "WL: Handling extended status 0x%06x\n",
2284 status);
20ba1ce6
PA
2285 linux_handle_extended_wait (lp, status, 1);
2286 return 0;
d6b0e80f
AC
2287 }
2288
2289 return status;
2290}
2291
2292/* Send a SIGSTOP to LP. */
2293
2294static int
2295stop_callback (struct lwp_info *lp, void *data)
2296{
2297 if (!lp->stopped && !lp->signalled)
2298 {
2299 int ret;
2300
2301 if (debug_linux_nat)
2302 {
2303 fprintf_unfiltered (gdb_stdlog,
2304 "SC: kill %s **<SIGSTOP>**\n",
2305 target_pid_to_str (lp->ptid));
2306 }
2307 errno = 0;
dfd4cc63 2308 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2309 if (debug_linux_nat)
2310 {
2311 fprintf_unfiltered (gdb_stdlog,
2312 "SC: lwp kill %d %s\n",
2313 ret,
2314 errno ? safe_strerror (errno) : "ERRNO-OK");
2315 }
2316
2317 lp->signalled = 1;
2318 gdb_assert (lp->status == 0);
2319 }
2320
2321 return 0;
2322}
2323
7b50312a
PA
2324/* Request a stop on LWP. */
2325
2326void
2327linux_stop_lwp (struct lwp_info *lwp)
2328{
2329 stop_callback (lwp, NULL);
2330}
2331
2db9a427
PA
2332/* See linux-nat.h */
2333
2334void
2335linux_stop_and_wait_all_lwps (void)
2336{
2337 /* Stop all LWP's ... */
2338 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2339
2340 /* ... and wait until all of them have reported back that
2341 they're no longer running. */
2342 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2343}
2344
2345/* See linux-nat.h */
2346
2347void
2348linux_unstop_all_lwps (void)
2349{
2350 iterate_over_lwps (minus_one_ptid,
2351 resume_stopped_resumed_lwps, &minus_one_ptid);
2352}
2353
57380f4e 2354/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2355
2356static int
57380f4e
DJ
2357linux_nat_has_pending_sigint (int pid)
2358{
2359 sigset_t pending, blocked, ignored;
57380f4e
DJ
2360
2361 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2362
2363 if (sigismember (&pending, SIGINT)
2364 && !sigismember (&ignored, SIGINT))
2365 return 1;
2366
2367 return 0;
2368}
2369
2370/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2371
2372static int
2373set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2374{
57380f4e
DJ
2375 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2376 flag to consume the next one. */
2377 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2378 && WSTOPSIG (lp->status) == SIGINT)
2379 lp->status = 0;
2380 else
2381 lp->ignore_sigint = 1;
2382
2383 return 0;
2384}
2385
2386/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2387 This function is called after we know the LWP has stopped; if the LWP
2388 stopped before the expected SIGINT was delivered, then it will never have
2389 arrived. Also, if the signal was delivered to a shared queue and consumed
2390 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2391
57380f4e
DJ
2392static void
2393maybe_clear_ignore_sigint (struct lwp_info *lp)
2394{
2395 if (!lp->ignore_sigint)
2396 return;
2397
dfd4cc63 2398 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2399 {
2400 if (debug_linux_nat)
2401 fprintf_unfiltered (gdb_stdlog,
2402 "MCIS: Clearing bogus flag for %s\n",
2403 target_pid_to_str (lp->ptid));
2404 lp->ignore_sigint = 0;
2405 }
2406}
2407
ebec9a0f
PA
2408/* Fetch the possible triggered data watchpoint info and store it in
2409 LP.
2410
2411 On some archs, like x86, that use debug registers to set
2412 watchpoints, it's possible that the way to know which watched
2413 address trapped, is to check the register that is used to select
2414 which address to watch. Problem is, between setting the watchpoint
2415 and reading back which data address trapped, the user may change
2416 the set of watchpoints, and, as a consequence, GDB changes the
2417 debug registers in the inferior. To avoid reading back a stale
2418 stopped-data-address when that happens, we cache in LP the fact
2419 that a watchpoint trapped, and the corresponding data address, as
2420 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2421 registers meanwhile, we have the cached data we can rely on. */
2422
9c02b525
PA
2423static int
2424check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f
PA
2425{
2426 struct cleanup *old_chain;
2427
2428 if (linux_ops->to_stopped_by_watchpoint == NULL)
9c02b525 2429 return 0;
ebec9a0f
PA
2430
2431 old_chain = save_inferior_ptid ();
2432 inferior_ptid = lp->ptid;
2433
9c02b525 2434 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
ebec9a0f 2435 {
15c66dd6 2436 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
9c02b525 2437
ebec9a0f
PA
2438 if (linux_ops->to_stopped_data_address != NULL)
2439 lp->stopped_data_address_p =
2440 linux_ops->to_stopped_data_address (&current_target,
2441 &lp->stopped_data_address);
2442 else
2443 lp->stopped_data_address_p = 0;
2444 }
2445
2446 do_cleanups (old_chain);
9c02b525 2447
15c66dd6 2448 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2449}
2450
2451/* Called when the LWP stopped for a trap that could be explained by a
2452 watchpoint or a breakpoint. */
2453
2454static void
2455save_sigtrap (struct lwp_info *lp)
2456{
15c66dd6 2457 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
9c02b525
PA
2458 gdb_assert (lp->status != 0);
2459
faf09f01
PA
2460 /* Check first if this was a SW/HW breakpoint before checking
2461 watchpoints, because at least s390 can't tell the data address of
2462 hardware watchpoint hits, and the kernel returns
2463 stopped-by-watchpoint as long as there's a watchpoint set. */
9c02b525
PA
2464 if (linux_nat_status_is_event (lp->status))
2465 check_stopped_by_breakpoint (lp);
faf09f01
PA
2466
2467 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2468 or hardware watchpoint. Check which is which if we got
2469 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2470 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON
2471 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2472 check_stopped_by_watchpoint (lp);
ebec9a0f
PA
2473}
2474
9c02b525 2475/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f
PA
2476
2477static int
6a109b6b 2478linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2479{
2480 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2481
2482 gdb_assert (lp != NULL);
2483
15c66dd6 2484 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2485}
2486
2487static int
2488linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2489{
2490 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2491
2492 gdb_assert (lp != NULL);
2493
2494 *addr_p = lp->stopped_data_address;
2495
2496 return lp->stopped_data_address_p;
2497}
2498
26ab7092
JK
2499/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2500
2501static int
2502sigtrap_is_event (int status)
2503{
2504 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2505}
2506
26ab7092
JK
2507/* Set alternative SIGTRAP-like events recognizer. If
2508 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2509 applied. */
2510
2511void
2512linux_nat_set_status_is_event (struct target_ops *t,
2513 int (*status_is_event) (int status))
2514{
2515 linux_nat_status_is_event = status_is_event;
2516}
2517
57380f4e
DJ
2518/* Wait until LP is stopped. */
2519
2520static int
2521stop_wait_callback (struct lwp_info *lp, void *data)
2522{
c9657e70 2523 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2524
2525 /* If this is a vfork parent, bail out, it is not going to report
2526 any SIGSTOP until the vfork is done with. */
2527 if (inf->vfork_child != NULL)
2528 return 0;
2529
d6b0e80f
AC
2530 if (!lp->stopped)
2531 {
2532 int status;
2533
2534 status = wait_lwp (lp);
2535 if (status == 0)
2536 return 0;
2537
57380f4e
DJ
2538 if (lp->ignore_sigint && WIFSTOPPED (status)
2539 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2540 {
57380f4e 2541 lp->ignore_sigint = 0;
d6b0e80f
AC
2542
2543 errno = 0;
dfd4cc63 2544 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2545 lp->stopped = 0;
d6b0e80f
AC
2546 if (debug_linux_nat)
2547 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2548 "PTRACE_CONT %s, 0, 0 (%s) "
2549 "(discarding SIGINT)\n",
d6b0e80f
AC
2550 target_pid_to_str (lp->ptid),
2551 errno ? safe_strerror (errno) : "OK");
2552
57380f4e 2553 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2554 }
2555
57380f4e
DJ
2556 maybe_clear_ignore_sigint (lp);
2557
d6b0e80f
AC
2558 if (WSTOPSIG (status) != SIGSTOP)
2559 {
e5ef252a 2560 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2561
e5ef252a
PA
2562 if (debug_linux_nat)
2563 fprintf_unfiltered (gdb_stdlog,
2564 "SWC: Pending event %s in %s\n",
2565 status_to_str ((int) status),
2566 target_pid_to_str (lp->ptid));
2567
2568 /* Save the sigtrap event. */
2569 lp->status = status;
e5ef252a 2570 gdb_assert (lp->signalled);
9c02b525 2571 save_sigtrap (lp);
d6b0e80f
AC
2572 }
2573 else
2574 {
2575 /* We caught the SIGSTOP that we intended to catch, so
2576 there's no SIGSTOP pending. */
e5ef252a
PA
2577
2578 if (debug_linux_nat)
2579 fprintf_unfiltered (gdb_stdlog,
2580 "SWC: Delayed SIGSTOP caught for %s.\n",
2581 target_pid_to_str (lp->ptid));
2582
e5ef252a
PA
2583 /* Reset SIGNALLED only after the stop_wait_callback call
2584 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2585 lp->signalled = 0;
2586 }
2587 }
2588
2589 return 0;
2590}
2591
9c02b525
PA
2592/* Return non-zero if LP has a wait status pending. Discard the
2593 pending event and resume the LWP if the event that originally
2594 caused the stop became uninteresting. */
d6b0e80f
AC
2595
2596static int
2597status_callback (struct lwp_info *lp, void *data)
2598{
2599 /* Only report a pending wait status if we pretend that this has
2600 indeed been resumed. */
ca2163eb
PA
2601 if (!lp->resumed)
2602 return 0;
2603
eb54c8bf
PA
2604 if (!lwp_status_pending_p (lp))
2605 return 0;
2606
15c66dd6
PA
2607 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2608 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2609 {
2610 struct regcache *regcache = get_thread_regcache (lp->ptid);
2611 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2612 CORE_ADDR pc;
2613 int discard = 0;
2614
9c02b525
PA
2615 pc = regcache_read_pc (regcache);
2616
2617 if (pc != lp->stop_pc)
2618 {
2619 if (debug_linux_nat)
2620 fprintf_unfiltered (gdb_stdlog,
2621 "SC: PC of %s changed. was=%s, now=%s\n",
2622 target_pid_to_str (lp->ptid),
2623 paddress (target_gdbarch (), lp->stop_pc),
2624 paddress (target_gdbarch (), pc));
2625 discard = 1;
2626 }
faf09f01
PA
2627
2628#if !USE_SIGTRAP_SIGINFO
9c02b525
PA
2629 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2630 {
2631 if (debug_linux_nat)
2632 fprintf_unfiltered (gdb_stdlog,
2633 "SC: previous breakpoint of %s, at %s gone\n",
2634 target_pid_to_str (lp->ptid),
2635 paddress (target_gdbarch (), lp->stop_pc));
2636
2637 discard = 1;
2638 }
faf09f01 2639#endif
9c02b525
PA
2640
2641 if (discard)
2642 {
2643 if (debug_linux_nat)
2644 fprintf_unfiltered (gdb_stdlog,
2645 "SC: pending event of %s cancelled.\n",
2646 target_pid_to_str (lp->ptid));
2647
2648 lp->status = 0;
2649 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2650 return 0;
2651 }
9c02b525
PA
2652 }
2653
eb54c8bf 2654 return 1;
d6b0e80f
AC
2655}
2656
2657/* Return non-zero if LP isn't stopped. */
2658
2659static int
2660running_callback (struct lwp_info *lp, void *data)
2661{
25289eb2 2662 return (!lp->stopped
8a99810d 2663 || (lwp_status_pending_p (lp) && lp->resumed));
d6b0e80f
AC
2664}
2665
2666/* Count the LWP's that have had events. */
2667
2668static int
2669count_events_callback (struct lwp_info *lp, void *data)
2670{
2671 int *count = data;
2672
2673 gdb_assert (count != NULL);
2674
9c02b525
PA
2675 /* Select only resumed LWPs that have an event pending. */
2676 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2677 (*count)++;
2678
2679 return 0;
2680}
2681
2682/* Select the LWP (if any) that is currently being single-stepped. */
2683
2684static int
2685select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2686{
25289eb2
PA
2687 if (lp->last_resume_kind == resume_step
2688 && lp->status != 0)
d6b0e80f
AC
2689 return 1;
2690 else
2691 return 0;
2692}
2693
8a99810d
PA
2694/* Returns true if LP has a status pending. */
2695
2696static int
2697lwp_status_pending_p (struct lwp_info *lp)
2698{
2699 /* We check for lp->waitstatus in addition to lp->status, because we
2700 can have pending process exits recorded in lp->status and
2701 W_EXITCODE(0,0) happens to be 0. */
2702 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2703}
2704
b90fc188 2705/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2706
2707static int
2708select_event_lwp_callback (struct lwp_info *lp, void *data)
2709{
2710 int *selector = data;
2711
2712 gdb_assert (selector != NULL);
2713
9c02b525
PA
2714 /* Select only resumed LWPs that have an event pending. */
2715 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2716 if ((*selector)-- == 0)
2717 return 1;
2718
2719 return 0;
2720}
2721
9c02b525
PA
2722/* Called when the LWP got a signal/trap that could be explained by a
2723 software or hardware breakpoint. */
2724
710151dd 2725static int
9c02b525 2726check_stopped_by_breakpoint (struct lwp_info *lp)
710151dd
PA
2727{
2728 /* Arrange for a breakpoint to be hit again later. We don't keep
2729 the SIGTRAP status and don't forward the SIGTRAP signal to the
2730 LWP. We will handle the current event, eventually we will resume
2731 this LWP, and this breakpoint will trap again.
2732
2733 If we do not do this, then we run the risk that the user will
2734 delete or disable the breakpoint, but the LWP will have already
2735 tripped on it. */
2736
515630c5
UW
2737 struct regcache *regcache = get_thread_regcache (lp->ptid);
2738 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2739 CORE_ADDR pc;
9c02b525 2740 CORE_ADDR sw_bp_pc;
faf09f01
PA
2741#if USE_SIGTRAP_SIGINFO
2742 siginfo_t siginfo;
2743#endif
9c02b525
PA
2744
2745 pc = regcache_read_pc (regcache);
527a273a 2746 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2747
faf09f01
PA
2748#if USE_SIGTRAP_SIGINFO
2749 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2750 {
2751 if (siginfo.si_signo == SIGTRAP)
2752 {
2753 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
2754 {
2755 if (debug_linux_nat)
2756 fprintf_unfiltered (gdb_stdlog,
2757 "CSBB: Push back software "
2758 "breakpoint for %s\n",
2759 target_pid_to_str (lp->ptid));
2760
2761 /* Back up the PC if necessary. */
2762 if (pc != sw_bp_pc)
2763 regcache_write_pc (regcache, sw_bp_pc);
2764
2765 lp->stop_pc = sw_bp_pc;
2766 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2767 return 1;
2768 }
2769 else if (siginfo.si_code == TRAP_HWBKPT)
2770 {
2771 if (debug_linux_nat)
2772 fprintf_unfiltered (gdb_stdlog,
2773 "CSBB: Push back hardware "
2774 "breakpoint/watchpoint for %s\n",
2775 target_pid_to_str (lp->ptid));
2776
2777 lp->stop_pc = pc;
2778 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2779 return 1;
2780 }
2781 }
2782 }
2783#else
9c02b525
PA
2784 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2785 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2786 sw_bp_pc))
710151dd 2787 {
9c02b525
PA
2788 /* The LWP was either continued, or stepped a software
2789 breakpoint instruction. */
710151dd
PA
2790 if (debug_linux_nat)
2791 fprintf_unfiltered (gdb_stdlog,
9c02b525 2792 "CB: Push back software breakpoint for %s\n",
710151dd
PA
2793 target_pid_to_str (lp->ptid));
2794
2795 /* Back up the PC if necessary. */
9c02b525
PA
2796 if (pc != sw_bp_pc)
2797 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2798
9c02b525 2799 lp->stop_pc = sw_bp_pc;
15c66dd6 2800 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
710151dd
PA
2801 return 1;
2802 }
710151dd 2803
9c02b525
PA
2804 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2805 {
2806 if (debug_linux_nat)
2807 fprintf_unfiltered (gdb_stdlog,
2808 "CB: Push back hardware breakpoint for %s\n",
2809 target_pid_to_str (lp->ptid));
d6b0e80f 2810
9c02b525 2811 lp->stop_pc = pc;
15c66dd6 2812 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
9c02b525
PA
2813 return 1;
2814 }
faf09f01 2815#endif
d6b0e80f
AC
2816
2817 return 0;
2818}
2819
faf09f01
PA
2820
2821/* Returns true if the LWP had stopped for a software breakpoint. */
2822
2823static int
2824linux_nat_stopped_by_sw_breakpoint (struct target_ops *ops)
2825{
2826 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2827
2828 gdb_assert (lp != NULL);
2829
2830 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2831}
2832
2833/* Implement the supports_stopped_by_sw_breakpoint method. */
2834
2835static int
2836linux_nat_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2837{
2838 return USE_SIGTRAP_SIGINFO;
2839}
2840
2841/* Returns true if the LWP had stopped for a hardware
2842 breakpoint/watchpoint. */
2843
2844static int
2845linux_nat_stopped_by_hw_breakpoint (struct target_ops *ops)
2846{
2847 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2848
2849 gdb_assert (lp != NULL);
2850
2851 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2852}
2853
2854/* Implement the supports_stopped_by_hw_breakpoint method. */
2855
2856static int
2857linux_nat_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2858{
2859 return USE_SIGTRAP_SIGINFO;
2860}
2861
d6b0e80f
AC
2862/* Select one LWP out of those that have events pending. */
2863
2864static void
d90e17a7 2865select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2866{
2867 int num_events = 0;
2868 int random_selector;
9c02b525 2869 struct lwp_info *event_lp = NULL;
d6b0e80f 2870
ac264b3b 2871 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2872 (*orig_lp)->status = *status;
2873
9c02b525
PA
2874 /* In all-stop, give preference to the LWP that is being
2875 single-stepped. There will be at most one, and it will be the
2876 LWP that the core is most interested in. If we didn't do this,
2877 then we'd have to handle pending step SIGTRAPs somehow in case
2878 the core later continues the previously-stepped thread, as
2879 otherwise we'd report the pending SIGTRAP then, and the core, not
2880 having stepped the thread, wouldn't understand what the trap was
2881 for, and therefore would report it to the user as a random
2882 signal. */
2883 if (!non_stop)
d6b0e80f 2884 {
9c02b525
PA
2885 event_lp = iterate_over_lwps (filter,
2886 select_singlestep_lwp_callback, NULL);
2887 if (event_lp != NULL)
2888 {
2889 if (debug_linux_nat)
2890 fprintf_unfiltered (gdb_stdlog,
2891 "SEL: Select single-step %s\n",
2892 target_pid_to_str (event_lp->ptid));
2893 }
d6b0e80f 2894 }
9c02b525
PA
2895
2896 if (event_lp == NULL)
d6b0e80f 2897 {
9c02b525 2898 /* Pick one at random, out of those which have had events. */
d6b0e80f 2899
9c02b525 2900 /* First see how many events we have. */
d90e17a7 2901 iterate_over_lwps (filter, count_events_callback, &num_events);
8bf3b159 2902 gdb_assert (num_events > 0);
d6b0e80f 2903
9c02b525
PA
2904 /* Now randomly pick a LWP out of those that have had
2905 events. */
d6b0e80f
AC
2906 random_selector = (int)
2907 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2908
2909 if (debug_linux_nat && num_events > 1)
2910 fprintf_unfiltered (gdb_stdlog,
9c02b525 2911 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2912 num_events, random_selector);
2913
d90e17a7
PA
2914 event_lp = iterate_over_lwps (filter,
2915 select_event_lwp_callback,
d6b0e80f
AC
2916 &random_selector);
2917 }
2918
2919 if (event_lp != NULL)
2920 {
2921 /* Switch the event LWP. */
2922 *orig_lp = event_lp;
2923 *status = event_lp->status;
2924 }
2925
2926 /* Flush the wait status for the event LWP. */
2927 (*orig_lp)->status = 0;
2928}
2929
2930/* Return non-zero if LP has been resumed. */
2931
2932static int
2933resumed_callback (struct lwp_info *lp, void *data)
2934{
2935 return lp->resumed;
2936}
2937
12d9289a
PA
2938/* Stop an active thread, verify it still exists, then resume it. If
2939 the thread ends up with a pending status, then it is not resumed,
2940 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
2941
2942static int
2943stop_and_resume_callback (struct lwp_info *lp, void *data)
2944{
25289eb2 2945 if (!lp->stopped)
d6b0e80f 2946 {
25289eb2
PA
2947 ptid_t ptid = lp->ptid;
2948
d6b0e80f
AC
2949 stop_callback (lp, NULL);
2950 stop_wait_callback (lp, NULL);
25289eb2
PA
2951
2952 /* Resume if the lwp still exists, and the core wanted it
2953 running. */
12d9289a
PA
2954 lp = find_lwp_pid (ptid);
2955 if (lp != NULL)
25289eb2 2956 {
12d9289a 2957 if (lp->last_resume_kind == resume_stop
8a99810d 2958 && !lwp_status_pending_p (lp))
12d9289a
PA
2959 {
2960 /* The core wanted the LWP to stop. Even if it stopped
2961 cleanly (with SIGSTOP), leave the event pending. */
2962 if (debug_linux_nat)
2963 fprintf_unfiltered (gdb_stdlog,
2964 "SARC: core wanted LWP %ld stopped "
2965 "(leaving SIGSTOP pending)\n",
dfd4cc63 2966 ptid_get_lwp (lp->ptid));
12d9289a
PA
2967 lp->status = W_STOPCODE (SIGSTOP);
2968 }
2969
8a99810d 2970 if (!lwp_status_pending_p (lp))
12d9289a
PA
2971 {
2972 if (debug_linux_nat)
2973 fprintf_unfiltered (gdb_stdlog,
2974 "SARC: re-resuming LWP %ld\n",
dfd4cc63 2975 ptid_get_lwp (lp->ptid));
e5ef252a 2976 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
2977 }
2978 else
2979 {
2980 if (debug_linux_nat)
2981 fprintf_unfiltered (gdb_stdlog,
2982 "SARC: not re-resuming LWP %ld "
2983 "(has pending)\n",
dfd4cc63 2984 ptid_get_lwp (lp->ptid));
12d9289a 2985 }
25289eb2 2986 }
d6b0e80f
AC
2987 }
2988 return 0;
2989}
2990
02f3fc28 2991/* Check if we should go on and pass this event to common code.
9c02b525 2992 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 2993
02f3fc28 2994static struct lwp_info *
9c02b525 2995linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2996{
2997 struct lwp_info *lp;
89a5711c 2998 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
2999
3000 lp = find_lwp_pid (pid_to_ptid (lwpid));
3001
3002 /* Check for stop events reported by a process we didn't already
3003 know about - anything not already in our LWP list.
3004
3005 If we're expecting to receive stopped processes after
3006 fork, vfork, and clone events, then we'll just add the
3007 new one to our list and go back to waiting for the event
3008 to be reported - the stopped process might be returned
0e5bf2a8
PA
3009 from waitpid before or after the event is.
3010
3011 But note the case of a non-leader thread exec'ing after the
3012 leader having exited, and gone from our lists. The non-leader
3013 thread changes its tid to the tgid. */
3014
3015 if (WIFSTOPPED (status) && lp == NULL
89a5711c 3016 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
3017 {
3018 /* A multi-thread exec after we had seen the leader exiting. */
3019 if (debug_linux_nat)
3020 fprintf_unfiltered (gdb_stdlog,
3021 "LLW: Re-adding thread group leader LWP %d.\n",
3022 lwpid);
3023
dfd4cc63 3024 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
3025 lp->stopped = 1;
3026 lp->resumed = 1;
3027 add_thread (lp->ptid);
3028 }
3029
02f3fc28
PA
3030 if (WIFSTOPPED (status) && !lp)
3031 {
3b27ef47
PA
3032 if (debug_linux_nat)
3033 fprintf_unfiltered (gdb_stdlog,
3034 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
3035 (long) lwpid, status_to_str (status));
84636d28 3036 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3037 return NULL;
3038 }
3039
3040 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3041 our list, i.e. not part of the current process. This can happen
fd62cb89 3042 if we detach from a program we originally forked and then it
02f3fc28
PA
3043 exits. */
3044 if (!WIFSTOPPED (status) && !lp)
3045 return NULL;
3046
8817a6f2
PA
3047 /* This LWP is stopped now. (And if dead, this prevents it from
3048 ever being continued.) */
3049 lp->stopped = 1;
3050
8784d563
PA
3051 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3052 {
3053 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3054
3055 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
3056 lp->must_set_ptrace_flags = 0;
3057 }
3058
ca2163eb
PA
3059 /* Handle GNU/Linux's syscall SIGTRAPs. */
3060 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3061 {
3062 /* No longer need the sysgood bit. The ptrace event ends up
3063 recorded in lp->waitstatus if we care for it. We can carry
3064 on handling the event like a regular SIGTRAP from here
3065 on. */
3066 status = W_STOPCODE (SIGTRAP);
3067 if (linux_handle_syscall_trap (lp, 0))
3068 return NULL;
3069 }
02f3fc28 3070
ca2163eb 3071 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
3072 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3073 && linux_is_extended_waitstatus (status))
02f3fc28
PA
3074 {
3075 if (debug_linux_nat)
3076 fprintf_unfiltered (gdb_stdlog,
3077 "LLW: Handling extended status 0x%06x\n",
3078 status);
3079 if (linux_handle_extended_wait (lp, status, 0))
3080 return NULL;
3081 }
3082
3083 /* Check if the thread has exited. */
9c02b525
PA
3084 if (WIFEXITED (status) || WIFSIGNALED (status))
3085 {
3086 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 3087 {
9c02b525
PA
3088 /* If this is the main thread, we must stop all threads and
3089 verify if they are still alive. This is because in the
3090 nptl thread model on Linux 2.4, there is no signal issued
3091 for exiting LWPs other than the main thread. We only get
3092 the main thread exit signal once all child threads have
3093 already exited. If we stop all the threads and use the
3094 stop_wait_callback to check if they have exited we can
3095 determine whether this signal should be ignored or
3096 whether it means the end of the debugged application,
3097 regardless of which threading model is being used. */
3098 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
3099 {
3100 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3101 stop_and_resume_callback, NULL);
3102 }
3103
3104 if (debug_linux_nat)
3105 fprintf_unfiltered (gdb_stdlog,
3106 "LLW: %s exited.\n",
3107 target_pid_to_str (lp->ptid));
3108
3109 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
3110 {
3111 /* If there is at least one more LWP, then the exit signal
3112 was not the end of the debugged application and should be
3113 ignored. */
3114 exit_lwp (lp);
3115 return NULL;
3116 }
02f3fc28
PA
3117 }
3118
9c02b525
PA
3119 gdb_assert (lp->resumed);
3120
02f3fc28
PA
3121 if (debug_linux_nat)
3122 fprintf_unfiltered (gdb_stdlog,
9c02b525
PA
3123 "Process %ld exited\n",
3124 ptid_get_lwp (lp->ptid));
02f3fc28 3125
9c02b525
PA
3126 /* This was the last lwp in the process. Since events are
3127 serialized to GDB core, we may not be able report this one
3128 right now, but GDB core and the other target layers will want
3129 to be notified about the exit code/signal, leave the status
3130 pending for the next time we're able to report it. */
3131
3132 /* Dead LWP's aren't expected to reported a pending sigstop. */
3133 lp->signalled = 0;
3134
3135 /* Store the pending event in the waitstatus, because
3136 W_EXITCODE(0,0) == 0. */
3137 store_waitstatus (&lp->waitstatus, status);
3138 return lp;
02f3fc28
PA
3139 }
3140
3141 /* Check if the current LWP has previously exited. In the nptl
3142 thread model, LWPs other than the main thread do not issue
3143 signals when they exit so we must check whenever the thread has
3144 stopped. A similar check is made in stop_wait_callback(). */
dfd4cc63 3145 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3146 {
dfd4cc63 3147 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
d90e17a7 3148
02f3fc28
PA
3149 if (debug_linux_nat)
3150 fprintf_unfiltered (gdb_stdlog,
3151 "LLW: %s exited.\n",
3152 target_pid_to_str (lp->ptid));
3153
3154 exit_lwp (lp);
3155
3156 /* Make sure there is at least one thread running. */
d90e17a7 3157 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3158
3159 /* Discard the event. */
3160 return NULL;
3161 }
3162
3163 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3164 an attempt to stop an LWP. */
3165 if (lp->signalled
3166 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3167 {
3168 if (debug_linux_nat)
3169 fprintf_unfiltered (gdb_stdlog,
3170 "LLW: Delayed SIGSTOP caught for %s.\n",
3171 target_pid_to_str (lp->ptid));
3172
02f3fc28
PA
3173 lp->signalled = 0;
3174
25289eb2
PA
3175 if (lp->last_resume_kind != resume_stop)
3176 {
3177 /* This is a delayed SIGSTOP. */
02f3fc28 3178
8a99810d 3179 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2
PA
3180 if (debug_linux_nat)
3181 fprintf_unfiltered (gdb_stdlog,
3182 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3183 lp->step ?
3184 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3185 target_pid_to_str (lp->ptid));
02f3fc28 3186
25289eb2 3187 gdb_assert (lp->resumed);
02f3fc28 3188
25289eb2
PA
3189 /* Discard the event. */
3190 return NULL;
3191 }
02f3fc28
PA
3192 }
3193
57380f4e
DJ
3194 /* Make sure we don't report a SIGINT that we have already displayed
3195 for another thread. */
3196 if (lp->ignore_sigint
3197 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3198 {
3199 if (debug_linux_nat)
3200 fprintf_unfiltered (gdb_stdlog,
3201 "LLW: Delayed SIGINT caught for %s.\n",
3202 target_pid_to_str (lp->ptid));
3203
3204 /* This is a delayed SIGINT. */
3205 lp->ignore_sigint = 0;
3206
8a99810d 3207 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3208 if (debug_linux_nat)
3209 fprintf_unfiltered (gdb_stdlog,
3210 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3211 lp->step ?
3212 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3213 target_pid_to_str (lp->ptid));
57380f4e
DJ
3214 gdb_assert (lp->resumed);
3215
3216 /* Discard the event. */
3217 return NULL;
3218 }
3219
9c02b525
PA
3220 /* Don't report signals that GDB isn't interested in, such as
3221 signals that are neither printed nor stopped upon. Stopping all
3222 threads can be a bit time-consuming so if we want decent
3223 performance with heavily multi-threaded programs, especially when
3224 they're using a high frequency timer, we'd better avoid it if we
3225 can. */
3226 if (WIFSTOPPED (status))
3227 {
3228 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3229
3230 if (!non_stop)
3231 {
3232 /* Only do the below in all-stop, as we currently use SIGSTOP
3233 to implement target_stop (see linux_nat_stop) in
3234 non-stop. */
3235 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3236 {
3237 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3238 forwarded to the entire process group, that is, all LWPs
3239 will receive it - unless they're using CLONE_THREAD to
3240 share signals. Since we only want to report it once, we
3241 mark it as ignored for all LWPs except this one. */
3242 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3243 set_ignore_sigint, NULL);
3244 lp->ignore_sigint = 0;
3245 }
3246 else
3247 maybe_clear_ignore_sigint (lp);
3248 }
3249
3250 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3251 Otherwise, signals in pass_mask may be short-circuited
3252 except signals that might be caused by a breakpoint. */
9c02b525 3253 if (!lp->step
c9587f88
AT
3254 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3255 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3256 {
3257 linux_resume_one_lwp (lp, lp->step, signo);
3258 if (debug_linux_nat)
3259 fprintf_unfiltered (gdb_stdlog,
3260 "LLW: %s %s, %s (preempt 'handle')\n",
3261 lp->step ?
3262 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3263 target_pid_to_str (lp->ptid),
3264 (signo != GDB_SIGNAL_0
3265 ? strsignal (gdb_signal_to_host (signo))
3266 : "0"));
3267 return NULL;
3268 }
3269 }
3270
02f3fc28
PA
3271 /* An interesting event. */
3272 gdb_assert (lp);
ca2163eb 3273 lp->status = status;
9c02b525 3274 save_sigtrap (lp);
02f3fc28
PA
3275 return lp;
3276}
3277
0e5bf2a8
PA
3278/* Detect zombie thread group leaders, and "exit" them. We can't reap
3279 their exits until all other threads in the group have exited. */
3280
3281static void
3282check_zombie_leaders (void)
3283{
3284 struct inferior *inf;
3285
3286 ALL_INFERIORS (inf)
3287 {
3288 struct lwp_info *leader_lp;
3289
3290 if (inf->pid == 0)
3291 continue;
3292
3293 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3294 if (leader_lp != NULL
3295 /* Check if there are other threads in the group, as we may
3296 have raced with the inferior simply exiting. */
3297 && num_lwps (inf->pid) > 1
5f572dec 3298 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3299 {
3300 if (debug_linux_nat)
3301 fprintf_unfiltered (gdb_stdlog,
3302 "CZL: Thread group leader %d zombie "
3303 "(it exited, or another thread execd).\n",
3304 inf->pid);
3305
3306 /* A leader zombie can mean one of two things:
3307
3308 - It exited, and there's an exit status pending
3309 available, or only the leader exited (not the whole
3310 program). In the latter case, we can't waitpid the
3311 leader's exit status until all other threads are gone.
3312
3313 - There are 3 or more threads in the group, and a thread
3314 other than the leader exec'd. On an exec, the Linux
3315 kernel destroys all other threads (except the execing
3316 one) in the thread group, and resets the execing thread's
3317 tid to the tgid. No exit notification is sent for the
3318 execing thread -- from the ptracer's perspective, it
3319 appears as though the execing thread just vanishes.
3320 Until we reap all other threads except the leader and the
3321 execing thread, the leader will be zombie, and the
3322 execing thread will be in `D (disc sleep)'. As soon as
3323 all other threads are reaped, the execing thread changes
3324 it's tid to the tgid, and the previous (zombie) leader
3325 vanishes, giving place to the "new" leader. We could try
3326 distinguishing the exit and exec cases, by waiting once
3327 more, and seeing if something comes out, but it doesn't
3328 sound useful. The previous leader _does_ go away, and
3329 we'll re-add the new one once we see the exec event
3330 (which is just the same as what would happen if the
3331 previous leader did exit voluntarily before some other
3332 thread execs). */
3333
3334 if (debug_linux_nat)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "CZL: Thread group leader %d vanished.\n",
3337 inf->pid);
3338 exit_lwp (leader_lp);
3339 }
3340 }
3341}
3342
d6b0e80f 3343static ptid_t
7feb7d06 3344linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3345 ptid_t ptid, struct target_waitstatus *ourstatus,
3346 int target_options)
d6b0e80f 3347{
fc9b8e47 3348 sigset_t prev_mask;
4b60df3d 3349 enum resume_kind last_resume_kind;
12d9289a 3350 struct lwp_info *lp;
12d9289a 3351 int status;
d6b0e80f 3352
01124a23 3353 if (debug_linux_nat)
b84876c2
PA
3354 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3355
f973ed9c
DJ
3356 /* The first time we get here after starting a new inferior, we may
3357 not have added it to the LWP list yet - this is the earliest
3358 moment at which we know its PID. */
d90e17a7 3359 if (ptid_is_pid (inferior_ptid))
f973ed9c 3360 {
27c9d204
PA
3361 /* Upgrade the main thread's ptid. */
3362 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3363 ptid_build (ptid_get_pid (inferior_ptid),
3364 ptid_get_pid (inferior_ptid), 0));
27c9d204 3365
26cb8b7c 3366 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3367 lp->resumed = 1;
3368 }
3369
12696c10 3370 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3371 block_child_signals (&prev_mask);
d6b0e80f 3372
d6b0e80f 3373 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3374 lp = iterate_over_lwps (ptid, status_callback, NULL);
3375 if (lp != NULL)
d6b0e80f
AC
3376 {
3377 if (debug_linux_nat)
d6b0e80f
AC
3378 fprintf_unfiltered (gdb_stdlog,
3379 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3380 status_to_str (lp->status),
d6b0e80f 3381 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3382 }
3383
d9d41e78 3384 if (!target_is_async_p ())
b84876c2
PA
3385 {
3386 /* Causes SIGINT to be passed on to the attached process. */
3387 set_sigint_trap ();
b84876c2 3388 }
d6b0e80f 3389
9c02b525
PA
3390 /* But if we don't find a pending event, we'll have to wait. Always
3391 pull all events out of the kernel. We'll randomly select an
3392 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3393
d90e17a7 3394 while (lp == NULL)
d6b0e80f
AC
3395 {
3396 pid_t lwpid;
3397
0e5bf2a8
PA
3398 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3399 quirks:
3400
3401 - If the thread group leader exits while other threads in the
3402 thread group still exist, waitpid(TGID, ...) hangs. That
3403 waitpid won't return an exit status until the other threads
3404 in the group are reapped.
3405
3406 - When a non-leader thread execs, that thread just vanishes
3407 without reporting an exit (so we'd hang if we waited for it
3408 explicitly in that case). The exec event is reported to
3409 the TGID pid. */
3410
3411 errno = 0;
3412 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3413 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3414 lwpid = my_waitpid (-1, &status, WNOHANG);
3415
3416 if (debug_linux_nat)
3417 fprintf_unfiltered (gdb_stdlog,
3418 "LNW: waitpid(-1, ...) returned %d, %s\n",
3419 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3420
d6b0e80f
AC
3421 if (lwpid > 0)
3422 {
d6b0e80f
AC
3423 if (debug_linux_nat)
3424 {
3425 fprintf_unfiltered (gdb_stdlog,
3426 "LLW: waitpid %ld received %s\n",
3427 (long) lwpid, status_to_str (status));
3428 }
3429
9c02b525 3430 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3431 /* Retry until nothing comes out of waitpid. A single
3432 SIGCHLD can indicate more than one child stopped. */
3433 continue;
d6b0e80f
AC
3434 }
3435
20ba1ce6
PA
3436 /* Now that we've pulled all events out of the kernel, resume
3437 LWPs that don't have an interesting event to report. */
3438 iterate_over_lwps (minus_one_ptid,
3439 resume_stopped_resumed_lwps, &minus_one_ptid);
3440
3441 /* ... and find an LWP with a status to report to the core, if
3442 any. */
9c02b525
PA
3443 lp = iterate_over_lwps (ptid, status_callback, NULL);
3444 if (lp != NULL)
3445 break;
3446
0e5bf2a8
PA
3447 /* Check for zombie thread group leaders. Those can't be reaped
3448 until all other threads in the thread group are. */
3449 check_zombie_leaders ();
d6b0e80f 3450
0e5bf2a8
PA
3451 /* If there are no resumed children left, bail. We'd be stuck
3452 forever in the sigsuspend call below otherwise. */
3453 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3454 {
3455 if (debug_linux_nat)
3456 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3457
0e5bf2a8 3458 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3459
d9d41e78 3460 if (!target_is_async_p ())
0e5bf2a8 3461 clear_sigint_trap ();
b84876c2 3462
0e5bf2a8
PA
3463 restore_child_signals_mask (&prev_mask);
3464 return minus_one_ptid;
d6b0e80f 3465 }
28736962 3466
0e5bf2a8
PA
3467 /* No interesting event to report to the core. */
3468
3469 if (target_options & TARGET_WNOHANG)
3470 {
01124a23 3471 if (debug_linux_nat)
28736962
PA
3472 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3473
0e5bf2a8 3474 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3475 restore_child_signals_mask (&prev_mask);
3476 return minus_one_ptid;
3477 }
d6b0e80f
AC
3478
3479 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3480 gdb_assert (lp == NULL);
0e5bf2a8
PA
3481
3482 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3483 if (debug_linux_nat)
3484 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3485 sigsuspend (&suspend_mask);
d6b0e80f
AC
3486 }
3487
d9d41e78 3488 if (!target_is_async_p ())
d26b5354 3489 clear_sigint_trap ();
d6b0e80f
AC
3490
3491 gdb_assert (lp);
3492
ca2163eb
PA
3493 status = lp->status;
3494 lp->status = 0;
3495
4c28f408
PA
3496 if (!non_stop)
3497 {
3498 /* Now stop all other LWP's ... */
d90e17a7 3499 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3500
3501 /* ... and wait until all of them have reported back that
3502 they're no longer running. */
d90e17a7 3503 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3504 }
3505
3506 /* If we're not waiting for a specific LWP, choose an event LWP from
3507 among those that have had events. Giving equal priority to all
3508 LWPs that have had events helps prevent starvation. */
3509 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3510 select_event_lwp (ptid, &lp, &status);
3511
3512 gdb_assert (lp != NULL);
3513
3514 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3515 it was a software breakpoint, and we can't reliably support the
3516 "stopped by software breakpoint" stop reason. */
3517 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3518 && !USE_SIGTRAP_SIGINFO)
9c02b525
PA
3519 {
3520 struct regcache *regcache = get_thread_regcache (lp->ptid);
3521 struct gdbarch *gdbarch = get_regcache_arch (regcache);
527a273a 3522 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3523
9c02b525
PA
3524 if (decr_pc != 0)
3525 {
3526 CORE_ADDR pc;
d6b0e80f 3527
9c02b525
PA
3528 pc = regcache_read_pc (regcache);
3529 regcache_write_pc (regcache, pc + decr_pc);
3530 }
3531 }
e3e9f5a2 3532
9c02b525
PA
3533 /* We'll need this to determine whether to report a SIGSTOP as
3534 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3535 clears it. */
3536 last_resume_kind = lp->last_resume_kind;
4b60df3d 3537
9c02b525
PA
3538 if (!non_stop)
3539 {
e3e9f5a2
PA
3540 /* In all-stop, from the core's perspective, all LWPs are now
3541 stopped until a new resume action is sent over. */
3542 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3543 }
3544 else
25289eb2 3545 {
4b60df3d 3546 resume_clear_callback (lp, NULL);
25289eb2 3547 }
d6b0e80f 3548
26ab7092 3549 if (linux_nat_status_is_event (status))
d6b0e80f 3550 {
d6b0e80f
AC
3551 if (debug_linux_nat)
3552 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3553 "LLW: trap ptid is %s.\n",
3554 target_pid_to_str (lp->ptid));
d6b0e80f 3555 }
d6b0e80f
AC
3556
3557 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3558 {
3559 *ourstatus = lp->waitstatus;
3560 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3561 }
3562 else
3563 store_waitstatus (ourstatus, status);
3564
01124a23 3565 if (debug_linux_nat)
b84876c2
PA
3566 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3567
7feb7d06 3568 restore_child_signals_mask (&prev_mask);
1e225492 3569
4b60df3d 3570 if (last_resume_kind == resume_stop
25289eb2
PA
3571 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3572 && WSTOPSIG (status) == SIGSTOP)
3573 {
3574 /* A thread that has been requested to stop by GDB with
3575 target_stop, and it stopped cleanly, so report as SIG0. The
3576 use of SIGSTOP is an implementation detail. */
a493e3e2 3577 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3578 }
3579
1e225492
JK
3580 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3581 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3582 lp->core = -1;
3583 else
2e794194 3584 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3585
f973ed9c 3586 return lp->ptid;
d6b0e80f
AC
3587}
3588
e3e9f5a2
PA
3589/* Resume LWPs that are currently stopped without any pending status
3590 to report, but are resumed from the core's perspective. */
3591
3592static int
3593resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3594{
3595 ptid_t *wait_ptid_p = data;
3596
3597 if (lp->stopped
3598 && lp->resumed
8a99810d 3599 && !lwp_status_pending_p (lp))
e3e9f5a2 3600 {
336060f3
PA
3601 struct regcache *regcache = get_thread_regcache (lp->ptid);
3602 struct gdbarch *gdbarch = get_regcache_arch (regcache);
336060f3 3603
23f238d3 3604 TRY
e3e9f5a2 3605 {
23f238d3
PA
3606 CORE_ADDR pc = regcache_read_pc (regcache);
3607 int leave_stopped = 0;
e3e9f5a2 3608
23f238d3
PA
3609 /* Don't bother if there's a breakpoint at PC that we'd hit
3610 immediately, and we're not waiting for this LWP. */
3611 if (!ptid_match (lp->ptid, *wait_ptid_p))
3612 {
3613 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3614 leave_stopped = 1;
3615 }
e3e9f5a2 3616
23f238d3
PA
3617 if (!leave_stopped)
3618 {
3619 if (debug_linux_nat)
3620 fprintf_unfiltered (gdb_stdlog,
3621 "RSRL: resuming stopped-resumed LWP %s at "
3622 "%s: step=%d\n",
3623 target_pid_to_str (lp->ptid),
3624 paddress (gdbarch, pc),
3625 lp->step);
3626
3627 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3628 }
3629 }
3630 CATCH (ex, RETURN_MASK_ERROR)
3631 {
3632 if (!check_ptrace_stopped_lwp_gone (lp))
3633 throw_exception (ex);
3634 }
3635 END_CATCH
e3e9f5a2
PA
3636 }
3637
3638 return 0;
3639}
3640
7feb7d06
PA
3641static ptid_t
3642linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3643 ptid_t ptid, struct target_waitstatus *ourstatus,
3644 int target_options)
7feb7d06
PA
3645{
3646 ptid_t event_ptid;
3647
3648 if (debug_linux_nat)
09826ec5
PA
3649 {
3650 char *options_string;
3651
3652 options_string = target_options_to_string (target_options);
3653 fprintf_unfiltered (gdb_stdlog,
3654 "linux_nat_wait: [%s], [%s]\n",
3655 target_pid_to_str (ptid),
3656 options_string);
3657 xfree (options_string);
3658 }
7feb7d06
PA
3659
3660 /* Flush the async file first. */
d9d41e78 3661 if (target_is_async_p ())
7feb7d06
PA
3662 async_file_flush ();
3663
e3e9f5a2
PA
3664 /* Resume LWPs that are currently stopped without any pending status
3665 to report, but are resumed from the core's perspective. LWPs get
3666 in this state if we find them stopping at a time we're not
3667 interested in reporting the event (target_wait on a
3668 specific_process, for example, see linux_nat_wait_1), and
3669 meanwhile the event became uninteresting. Don't bother resuming
3670 LWPs we're not going to wait for if they'd stop immediately. */
3671 if (non_stop)
3672 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3673
47608cb1 3674 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3675
3676 /* If we requested any event, and something came out, assume there
3677 may be more. If we requested a specific lwp or process, also
3678 assume there may be more. */
d9d41e78 3679 if (target_is_async_p ()
6953d224
PA
3680 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3681 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3682 || !ptid_equal (ptid, minus_one_ptid)))
3683 async_file_mark ();
3684
7feb7d06
PA
3685 return event_ptid;
3686}
3687
d6b0e80f
AC
3688static int
3689kill_callback (struct lwp_info *lp, void *data)
3690{
ed731959
JK
3691 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3692
3693 errno = 0;
69ff6be5 3694 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
ed731959 3695 if (debug_linux_nat)
57745c90
PA
3696 {
3697 int save_errno = errno;
3698
3699 fprintf_unfiltered (gdb_stdlog,
3700 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3701 target_pid_to_str (lp->ptid),
3702 save_errno ? safe_strerror (save_errno) : "OK");
3703 }
ed731959
JK
3704
3705 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3706
d6b0e80f 3707 errno = 0;
dfd4cc63 3708 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
d6b0e80f 3709 if (debug_linux_nat)
57745c90
PA
3710 {
3711 int save_errno = errno;
3712
3713 fprintf_unfiltered (gdb_stdlog,
3714 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3715 target_pid_to_str (lp->ptid),
3716 save_errno ? safe_strerror (save_errno) : "OK");
3717 }
d6b0e80f
AC
3718
3719 return 0;
3720}
3721
3722static int
3723kill_wait_callback (struct lwp_info *lp, void *data)
3724{
3725 pid_t pid;
3726
3727 /* We must make sure that there are no pending events (delayed
3728 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3729 program doesn't interfere with any following debugging session. */
3730
3731 /* For cloned processes we must check both with __WCLONE and
3732 without, since the exit status of a cloned process isn't reported
3733 with __WCLONE. */
3734 if (lp->cloned)
3735 {
3736 do
3737 {
dfd4cc63 3738 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
e85a822c 3739 if (pid != (pid_t) -1)
d6b0e80f 3740 {
e85a822c
DJ
3741 if (debug_linux_nat)
3742 fprintf_unfiltered (gdb_stdlog,
3743 "KWC: wait %s received unknown.\n",
3744 target_pid_to_str (lp->ptid));
3745 /* The Linux kernel sometimes fails to kill a thread
3746 completely after PTRACE_KILL; that goes from the stop
3747 point in do_fork out to the one in
3748 get_signal_to_deliever and waits again. So kill it
3749 again. */
3750 kill_callback (lp, NULL);
d6b0e80f
AC
3751 }
3752 }
dfd4cc63 3753 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3754
3755 gdb_assert (pid == -1 && errno == ECHILD);
3756 }
3757
3758 do
3759 {
dfd4cc63 3760 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
e85a822c 3761 if (pid != (pid_t) -1)
d6b0e80f 3762 {
e85a822c
DJ
3763 if (debug_linux_nat)
3764 fprintf_unfiltered (gdb_stdlog,
3765 "KWC: wait %s received unk.\n",
3766 target_pid_to_str (lp->ptid));
3767 /* See the call to kill_callback above. */
3768 kill_callback (lp, NULL);
d6b0e80f
AC
3769 }
3770 }
dfd4cc63 3771 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3772
3773 gdb_assert (pid == -1 && errno == ECHILD);
3774 return 0;
3775}
3776
3777static void
7d85a9c0 3778linux_nat_kill (struct target_ops *ops)
d6b0e80f 3779{
f973ed9c
DJ
3780 struct target_waitstatus last;
3781 ptid_t last_ptid;
3782 int status;
d6b0e80f 3783
f973ed9c
DJ
3784 /* If we're stopped while forking and we haven't followed yet,
3785 kill the other task. We need to do this first because the
3786 parent will be sleeping if this is a vfork. */
d6b0e80f 3787
f973ed9c 3788 get_last_target_status (&last_ptid, &last);
d6b0e80f 3789
f973ed9c
DJ
3790 if (last.kind == TARGET_WAITKIND_FORKED
3791 || last.kind == TARGET_WAITKIND_VFORKED)
3792 {
dfd4cc63 3793 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
f973ed9c 3794 wait (&status);
26cb8b7c
PA
3795
3796 /* Let the arch-specific native code know this process is
3797 gone. */
dfd4cc63 3798 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
f973ed9c
DJ
3799 }
3800
3801 if (forks_exist_p ())
7feb7d06 3802 linux_fork_killall ();
f973ed9c
DJ
3803 else
3804 {
d90e17a7 3805 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3806
4c28f408
PA
3807 /* Stop all threads before killing them, since ptrace requires
3808 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3809 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3810 /* ... and wait until all of them have reported back that
3811 they're no longer running. */
d90e17a7 3812 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3813
f973ed9c 3814 /* Kill all LWP's ... */
d90e17a7 3815 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3816
3817 /* ... and wait until we've flushed all events. */
d90e17a7 3818 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3819 }
3820
3821 target_mourn_inferior ();
d6b0e80f
AC
3822}
3823
3824static void
136d6dae 3825linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3826{
26cb8b7c
PA
3827 int pid = ptid_get_pid (inferior_ptid);
3828
3829 purge_lwp_list (pid);
d6b0e80f 3830
f973ed9c 3831 if (! forks_exist_p ())
d90e17a7
PA
3832 /* Normal case, no other forks available. */
3833 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3834 else
3835 /* Multi-fork case. The current inferior_ptid has exited, but
3836 there are other viable forks to debug. Delete the exiting
3837 one and context-switch to the first available. */
3838 linux_fork_mourn_inferior ();
26cb8b7c
PA
3839
3840 /* Let the arch-specific native code know this process is gone. */
3841 linux_nat_forget_process (pid);
d6b0e80f
AC
3842}
3843
5b009018
PA
3844/* Convert a native/host siginfo object, into/from the siginfo in the
3845 layout of the inferiors' architecture. */
3846
3847static void
a5362b9a 3848siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3849{
3850 int done = 0;
3851
3852 if (linux_nat_siginfo_fixup != NULL)
3853 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3854
3855 /* If there was no callback, or the callback didn't do anything,
3856 then just do a straight memcpy. */
3857 if (!done)
3858 {
3859 if (direction == 1)
a5362b9a 3860 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3861 else
a5362b9a 3862 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3863 }
3864}
3865
9b409511 3866static enum target_xfer_status
4aa995e1
PA
3867linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3868 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3869 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3870 ULONGEST *xfered_len)
4aa995e1 3871{
4aa995e1 3872 int pid;
a5362b9a
TS
3873 siginfo_t siginfo;
3874 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3875
3876 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3877 gdb_assert (readbuf || writebuf);
3878
dfd4cc63 3879 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3880 if (pid == 0)
dfd4cc63 3881 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3882
3883 if (offset > sizeof (siginfo))
2ed4b548 3884 return TARGET_XFER_E_IO;
4aa995e1
PA
3885
3886 errno = 0;
3887 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3888 if (errno != 0)
2ed4b548 3889 return TARGET_XFER_E_IO;
4aa995e1 3890
5b009018
PA
3891 /* When GDB is built as a 64-bit application, ptrace writes into
3892 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3893 inferior with a 64-bit GDB should look the same as debugging it
3894 with a 32-bit GDB, we need to convert it. GDB core always sees
3895 the converted layout, so any read/write will have to be done
3896 post-conversion. */
3897 siginfo_fixup (&siginfo, inf_siginfo, 0);
3898
4aa995e1
PA
3899 if (offset + len > sizeof (siginfo))
3900 len = sizeof (siginfo) - offset;
3901
3902 if (readbuf != NULL)
5b009018 3903 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3904 else
3905 {
5b009018
PA
3906 memcpy (inf_siginfo + offset, writebuf, len);
3907
3908 /* Convert back to ptrace layout before flushing it out. */
3909 siginfo_fixup (&siginfo, inf_siginfo, 1);
3910
4aa995e1
PA
3911 errno = 0;
3912 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3913 if (errno != 0)
2ed4b548 3914 return TARGET_XFER_E_IO;
4aa995e1
PA
3915 }
3916
9b409511
YQ
3917 *xfered_len = len;
3918 return TARGET_XFER_OK;
4aa995e1
PA
3919}
3920
9b409511 3921static enum target_xfer_status
10d6c8cd
DJ
3922linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3923 const char *annex, gdb_byte *readbuf,
3924 const gdb_byte *writebuf,
9b409511 3925 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3926{
4aa995e1 3927 struct cleanup *old_chain;
9b409511 3928 enum target_xfer_status xfer;
d6b0e80f 3929
4aa995e1
PA
3930 if (object == TARGET_OBJECT_SIGNAL_INFO)
3931 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3932 offset, len, xfered_len);
4aa995e1 3933
c35b1492
PA
3934 /* The target is connected but no live inferior is selected. Pass
3935 this request down to a lower stratum (e.g., the executable
3936 file). */
3937 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3938 return TARGET_XFER_EOF;
c35b1492 3939
4aa995e1
PA
3940 old_chain = save_inferior_ptid ();
3941
dfd4cc63
LM
3942 if (ptid_lwp_p (inferior_ptid))
3943 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3944
10d6c8cd 3945 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3946 offset, len, xfered_len);
d6b0e80f
AC
3947
3948 do_cleanups (old_chain);
3949 return xfer;
3950}
3951
3952static int
28439f5e 3953linux_thread_alive (ptid_t ptid)
d6b0e80f 3954{
8c6a60d1 3955 int err, tmp_errno;
4c28f408 3956
dfd4cc63 3957 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 3958
4c28f408
PA
3959 /* Send signal 0 instead of anything ptrace, because ptracing a
3960 running thread errors out claiming that the thread doesn't
3961 exist. */
dfd4cc63 3962 err = kill_lwp (ptid_get_lwp (ptid), 0);
8c6a60d1 3963 tmp_errno = errno;
d6b0e80f
AC
3964 if (debug_linux_nat)
3965 fprintf_unfiltered (gdb_stdlog,
4c28f408 3966 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3967 target_pid_to_str (ptid),
8c6a60d1 3968 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 3969
4c28f408 3970 if (err != 0)
d6b0e80f
AC
3971 return 0;
3972
3973 return 1;
3974}
3975
28439f5e
PA
3976static int
3977linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3978{
3979 return linux_thread_alive (ptid);
3980}
3981
d6b0e80f 3982static char *
117de6a9 3983linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3984{
3985 static char buf[64];
3986
dfd4cc63
LM
3987 if (ptid_lwp_p (ptid)
3988 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3989 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3990 {
dfd4cc63 3991 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3992 return buf;
3993 }
3994
3995 return normal_pid_to_str (ptid);
3996}
3997
4694da01 3998static char *
503a628d 3999linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01
TT
4000{
4001 int pid = ptid_get_pid (thr->ptid);
4002 long lwp = ptid_get_lwp (thr->ptid);
4003#define FORMAT "/proc/%d/task/%ld/comm"
4004 char buf[sizeof (FORMAT) + 30];
4005 FILE *comm_file;
4006 char *result = NULL;
4007
4008 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
614c279d 4009 comm_file = gdb_fopen_cloexec (buf, "r");
4694da01
TT
4010 if (comm_file)
4011 {
4012 /* Not exported by the kernel, so we define it here. */
4013#define COMM_LEN 16
4014 static char line[COMM_LEN + 1];
4015
4016 if (fgets (line, sizeof (line), comm_file))
4017 {
4018 char *nl = strchr (line, '\n');
4019
4020 if (nl)
4021 *nl = '\0';
4022 if (*line != '\0')
4023 result = line;
4024 }
4025
4026 fclose (comm_file);
4027 }
4028
4029#undef COMM_LEN
4030#undef FORMAT
4031
4032 return result;
4033}
4034
dba24537
AC
4035/* Accepts an integer PID; Returns a string representing a file that
4036 can be opened to get the symbols for the child process. */
4037
6d8fd2b7 4038static char *
8dd27370 4039linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 4040{
b4ab256d
HZ
4041 static char buf[PATH_MAX];
4042 char name[PATH_MAX];
dba24537 4043
b4ab256d
HZ
4044 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
4045 memset (buf, 0, PATH_MAX);
4046 if (readlink (name, buf, PATH_MAX - 1) <= 0)
4047 strcpy (buf, name);
dba24537 4048
b4ab256d 4049 return buf;
dba24537
AC
4050}
4051
10d6c8cd
DJ
4052/* Implement the to_xfer_partial interface for memory reads using the /proc
4053 filesystem. Because we can use a single read() call for /proc, this
4054 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4055 but it doesn't support writes. */
4056
9b409511 4057static enum target_xfer_status
10d6c8cd
DJ
4058linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4059 const char *annex, gdb_byte *readbuf,
4060 const gdb_byte *writebuf,
9b409511 4061 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 4062{
10d6c8cd
DJ
4063 LONGEST ret;
4064 int fd;
dba24537
AC
4065 char filename[64];
4066
10d6c8cd 4067 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4068 return 0;
4069
4070 /* Don't bother for one word. */
4071 if (len < 3 * sizeof (long))
9b409511 4072 return TARGET_XFER_EOF;
dba24537
AC
4073
4074 /* We could keep this file open and cache it - possibly one per
4075 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
4076 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
4077 ptid_get_pid (inferior_ptid));
614c279d 4078 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 4079 if (fd == -1)
9b409511 4080 return TARGET_XFER_EOF;
dba24537
AC
4081
4082 /* If pread64 is available, use it. It's faster if the kernel
4083 supports it (only one syscall), and it's 64-bit safe even on
4084 32-bit platforms (for instance, SPARC debugging a SPARC64
4085 application). */
4086#ifdef HAVE_PREAD64
10d6c8cd 4087 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4088#else
10d6c8cd 4089 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4090#endif
4091 ret = 0;
4092 else
4093 ret = len;
4094
4095 close (fd);
9b409511
YQ
4096
4097 if (ret == 0)
4098 return TARGET_XFER_EOF;
4099 else
4100 {
4101 *xfered_len = ret;
4102 return TARGET_XFER_OK;
4103 }
dba24537
AC
4104}
4105
efcbbd14
UW
4106
4107/* Enumerate spufs IDs for process PID. */
4108static LONGEST
b55e14c7 4109spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 4110{
f5656ead 4111 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
4112 LONGEST pos = 0;
4113 LONGEST written = 0;
4114 char path[128];
4115 DIR *dir;
4116 struct dirent *entry;
4117
4118 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4119 dir = opendir (path);
4120 if (!dir)
4121 return -1;
4122
4123 rewinddir (dir);
4124 while ((entry = readdir (dir)) != NULL)
4125 {
4126 struct stat st;
4127 struct statfs stfs;
4128 int fd;
4129
4130 fd = atoi (entry->d_name);
4131 if (!fd)
4132 continue;
4133
4134 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4135 if (stat (path, &st) != 0)
4136 continue;
4137 if (!S_ISDIR (st.st_mode))
4138 continue;
4139
4140 if (statfs (path, &stfs) != 0)
4141 continue;
4142 if (stfs.f_type != SPUFS_MAGIC)
4143 continue;
4144
4145 if (pos >= offset && pos + 4 <= offset + len)
4146 {
4147 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4148 written += 4;
4149 }
4150 pos += 4;
4151 }
4152
4153 closedir (dir);
4154 return written;
4155}
4156
4157/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4158 object type, using the /proc file system. */
9b409511
YQ
4159
4160static enum target_xfer_status
efcbbd14
UW
4161linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4162 const char *annex, gdb_byte *readbuf,
4163 const gdb_byte *writebuf,
9b409511 4164 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
4165{
4166 char buf[128];
4167 int fd = 0;
4168 int ret = -1;
dfd4cc63 4169 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
4170
4171 if (!annex)
4172 {
4173 if (!readbuf)
2ed4b548 4174 return TARGET_XFER_E_IO;
efcbbd14 4175 else
9b409511
YQ
4176 {
4177 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4178
4179 if (l < 0)
4180 return TARGET_XFER_E_IO;
4181 else if (l == 0)
4182 return TARGET_XFER_EOF;
4183 else
4184 {
4185 *xfered_len = (ULONGEST) l;
4186 return TARGET_XFER_OK;
4187 }
4188 }
efcbbd14
UW
4189 }
4190
4191 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4192 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4193 if (fd <= 0)
2ed4b548 4194 return TARGET_XFER_E_IO;
efcbbd14
UW
4195
4196 if (offset != 0
4197 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4198 {
4199 close (fd);
9b409511 4200 return TARGET_XFER_EOF;
efcbbd14
UW
4201 }
4202
4203 if (writebuf)
4204 ret = write (fd, writebuf, (size_t) len);
4205 else if (readbuf)
4206 ret = read (fd, readbuf, (size_t) len);
4207
4208 close (fd);
9b409511
YQ
4209
4210 if (ret < 0)
4211 return TARGET_XFER_E_IO;
4212 else if (ret == 0)
4213 return TARGET_XFER_EOF;
4214 else
4215 {
4216 *xfered_len = (ULONGEST) ret;
4217 return TARGET_XFER_OK;
4218 }
efcbbd14
UW
4219}
4220
4221
dba24537
AC
4222/* Parse LINE as a signal set and add its set bits to SIGS. */
4223
4224static void
4225add_line_to_sigset (const char *line, sigset_t *sigs)
4226{
4227 int len = strlen (line) - 1;
4228 const char *p;
4229 int signum;
4230
4231 if (line[len] != '\n')
8a3fe4f8 4232 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4233
4234 p = line;
4235 signum = len * 4;
4236 while (len-- > 0)
4237 {
4238 int digit;
4239
4240 if (*p >= '0' && *p <= '9')
4241 digit = *p - '0';
4242 else if (*p >= 'a' && *p <= 'f')
4243 digit = *p - 'a' + 10;
4244 else
8a3fe4f8 4245 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4246
4247 signum -= 4;
4248
4249 if (digit & 1)
4250 sigaddset (sigs, signum + 1);
4251 if (digit & 2)
4252 sigaddset (sigs, signum + 2);
4253 if (digit & 4)
4254 sigaddset (sigs, signum + 3);
4255 if (digit & 8)
4256 sigaddset (sigs, signum + 4);
4257
4258 p++;
4259 }
4260}
4261
4262/* Find process PID's pending signals from /proc/pid/status and set
4263 SIGS to match. */
4264
4265void
3e43a32a
MS
4266linux_proc_pending_signals (int pid, sigset_t *pending,
4267 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4268{
4269 FILE *procfile;
d8d2a3ee 4270 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4271 struct cleanup *cleanup;
dba24537
AC
4272
4273 sigemptyset (pending);
4274 sigemptyset (blocked);
4275 sigemptyset (ignored);
cde33bf1 4276 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4277 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4278 if (procfile == NULL)
8a3fe4f8 4279 error (_("Could not open %s"), fname);
7c8a8b04 4280 cleanup = make_cleanup_fclose (procfile);
dba24537 4281
d8d2a3ee 4282 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4283 {
4284 /* Normal queued signals are on the SigPnd line in the status
4285 file. However, 2.6 kernels also have a "shared" pending
4286 queue for delivering signals to a thread group, so check for
4287 a ShdPnd line also.
4288
4289 Unfortunately some Red Hat kernels include the shared pending
4290 queue but not the ShdPnd status field. */
4291
61012eef 4292 if (startswith (buffer, "SigPnd:\t"))
dba24537 4293 add_line_to_sigset (buffer + 8, pending);
61012eef 4294 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4295 add_line_to_sigset (buffer + 8, pending);
61012eef 4296 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4297 add_line_to_sigset (buffer + 8, blocked);
61012eef 4298 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4299 add_line_to_sigset (buffer + 8, ignored);
4300 }
4301
7c8a8b04 4302 do_cleanups (cleanup);
dba24537
AC
4303}
4304
9b409511 4305static enum target_xfer_status
07e059b5 4306linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4307 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4308 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4309 ULONGEST *xfered_len)
07e059b5 4310{
07e059b5
VP
4311 gdb_assert (object == TARGET_OBJECT_OSDATA);
4312
9b409511
YQ
4313 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4314 if (*xfered_len == 0)
4315 return TARGET_XFER_EOF;
4316 else
4317 return TARGET_XFER_OK;
07e059b5
VP
4318}
4319
9b409511 4320static enum target_xfer_status
10d6c8cd
DJ
4321linux_xfer_partial (struct target_ops *ops, enum target_object object,
4322 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4323 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4324 ULONGEST *xfered_len)
10d6c8cd 4325{
9b409511 4326 enum target_xfer_status xfer;
10d6c8cd
DJ
4327
4328 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4329 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4330 offset, len, xfered_len);
10d6c8cd 4331
07e059b5
VP
4332 if (object == TARGET_OBJECT_OSDATA)
4333 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4334 offset, len, xfered_len);
07e059b5 4335
efcbbd14
UW
4336 if (object == TARGET_OBJECT_SPU)
4337 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4338 offset, len, xfered_len);
efcbbd14 4339
8f313923
JK
4340 /* GDB calculates all the addresses in possibly larget width of the address.
4341 Address width needs to be masked before its final use - either by
4342 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4343
4344 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4345
4346 if (object == TARGET_OBJECT_MEMORY)
4347 {
f5656ead 4348 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4349
4350 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4351 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4352 }
4353
10d6c8cd 4354 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4355 offset, len, xfered_len);
4356 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4357 return xfer;
4358
4359 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4360 offset, len, xfered_len);
10d6c8cd
DJ
4361}
4362
5808517f
YQ
4363static void
4364cleanup_target_stop (void *arg)
4365{
4366 ptid_t *ptid = (ptid_t *) arg;
4367
4368 gdb_assert (arg != NULL);
4369
4370 /* Unpause all */
a493e3e2 4371 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4372}
4373
4374static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4375linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4376 const char *strid)
5808517f
YQ
4377{
4378 char s[IPA_CMD_BUF_SIZE];
4379 struct cleanup *old_chain;
4380 int pid = ptid_get_pid (inferior_ptid);
4381 VEC(static_tracepoint_marker_p) *markers = NULL;
4382 struct static_tracepoint_marker *marker = NULL;
4383 char *p = s;
4384 ptid_t ptid = ptid_build (pid, 0, 0);
4385
4386 /* Pause all */
4387 target_stop (ptid);
4388
4389 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4390 s[sizeof ("qTfSTM")] = 0;
4391
42476b70 4392 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4393
4394 old_chain = make_cleanup (free_current_marker, &marker);
4395 make_cleanup (cleanup_target_stop, &ptid);
4396
4397 while (*p++ == 'm')
4398 {
4399 if (marker == NULL)
4400 marker = XCNEW (struct static_tracepoint_marker);
4401
4402 do
4403 {
4404 parse_static_tracepoint_marker_definition (p, &p, marker);
4405
4406 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4407 {
4408 VEC_safe_push (static_tracepoint_marker_p,
4409 markers, marker);
4410 marker = NULL;
4411 }
4412 else
4413 {
4414 release_static_tracepoint_marker (marker);
4415 memset (marker, 0, sizeof (*marker));
4416 }
4417 }
4418 while (*p++ == ','); /* comma-separated list */
4419
4420 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4421 s[sizeof ("qTsSTM")] = 0;
42476b70 4422 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4423 p = s;
4424 }
4425
4426 do_cleanups (old_chain);
4427
4428 return markers;
4429}
4430
e9efe249 4431/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4432 it with local methods. */
4433
910122bf
UW
4434static void
4435linux_target_install_ops (struct target_ops *t)
10d6c8cd 4436{
6d8fd2b7 4437 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4438 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4439 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4440 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4441 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4442 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4443 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4444 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4445 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4446 t->to_post_attach = linux_child_post_attach;
4447 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4448
4449 super_xfer_partial = t->to_xfer_partial;
4450 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4451
4452 t->to_static_tracepoint_markers_by_strid
4453 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4454}
4455
4456struct target_ops *
4457linux_target (void)
4458{
4459 struct target_ops *t;
4460
4461 t = inf_ptrace_target ();
4462 linux_target_install_ops (t);
4463
4464 return t;
4465}
4466
4467struct target_ops *
7714d83a 4468linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4469{
4470 struct target_ops *t;
4471
4472 t = inf_ptrace_trad_target (register_u_offset);
4473 linux_target_install_ops (t);
10d6c8cd 4474
10d6c8cd
DJ
4475 return t;
4476}
4477
b84876c2
PA
4478/* target_is_async_p implementation. */
4479
4480static int
6a109b6b 4481linux_nat_is_async_p (struct target_ops *ops)
b84876c2 4482{
198297aa 4483 return linux_is_async_p ();
b84876c2
PA
4484}
4485
4486/* target_can_async_p implementation. */
4487
4488static int
6a109b6b 4489linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4490{
4491 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4492 it explicitly with the "set target-async" command.
b84876c2 4493 Someday, linux will always be async. */
3dd5b83d 4494 return target_async_permitted;
b84876c2
PA
4495}
4496
9908b566 4497static int
2a9a2795 4498linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4499{
4500 return 1;
4501}
4502
d90e17a7
PA
4503/* True if we want to support multi-process. To be removed when GDB
4504 supports multi-exec. */
4505
2277426b 4506int linux_multi_process = 1;
d90e17a7
PA
4507
4508static int
86ce2668 4509linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4510{
4511 return linux_multi_process;
4512}
4513
03583c20 4514static int
2bfc0540 4515linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4516{
4517#ifdef HAVE_PERSONALITY
4518 return 1;
4519#else
4520 return 0;
4521#endif
4522}
4523
b84876c2
PA
4524static int async_terminal_is_ours = 1;
4525
4d4ca2a1
DE
4526/* target_terminal_inferior implementation.
4527
4528 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4529
4530static void
d2f640d4 4531linux_nat_terminal_inferior (struct target_ops *self)
b84876c2 4532{
198297aa
PA
4533 /* Like target_terminal_inferior, use target_can_async_p, not
4534 target_is_async_p, since at this point the target is not async
4535 yet. If it can async, then we know it will become async prior to
4536 resume. */
4537 if (!target_can_async_p ())
b84876c2
PA
4538 {
4539 /* Async mode is disabled. */
d6b64346 4540 child_terminal_inferior (self);
b84876c2
PA
4541 return;
4542 }
4543
d6b64346 4544 child_terminal_inferior (self);
b84876c2 4545
d9d2d8b6 4546 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4547 if (!async_terminal_is_ours)
4548 return;
4549
4550 delete_file_handler (input_fd);
4551 async_terminal_is_ours = 0;
4552 set_sigint_trap ();
4553}
4554
4d4ca2a1
DE
4555/* target_terminal_ours implementation.
4556
4557 This is a wrapper around child_terminal_ours to add async support (and
4558 implement the target_terminal_ours vs target_terminal_ours_for_output
4559 distinction). child_terminal_ours is currently no different than
4560 child_terminal_ours_for_output.
4561 We leave target_terminal_ours_for_output alone, leaving it to
4562 child_terminal_ours_for_output. */
b84876c2 4563
2c0b251b 4564static void
e3594fd1 4565linux_nat_terminal_ours (struct target_ops *self)
b84876c2 4566{
b84876c2
PA
4567 /* GDB should never give the terminal to the inferior if the
4568 inferior is running in the background (run&, continue&, etc.),
4569 but claiming it sure should. */
d6b64346 4570 child_terminal_ours (self);
b84876c2 4571
b84876c2
PA
4572 if (async_terminal_is_ours)
4573 return;
4574
4575 clear_sigint_trap ();
4576 add_file_handler (input_fd, stdin_event_handler, 0);
4577 async_terminal_is_ours = 1;
4578}
4579
4580static void (*async_client_callback) (enum inferior_event_type event_type,
4581 void *context);
4582static void *async_client_context;
4583
7feb7d06
PA
4584/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4585 so we notice when any child changes state, and notify the
4586 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4587 above to wait for the arrival of a SIGCHLD. */
4588
b84876c2 4589static void
7feb7d06 4590sigchld_handler (int signo)
b84876c2 4591{
7feb7d06
PA
4592 int old_errno = errno;
4593
01124a23
DE
4594 if (debug_linux_nat)
4595 ui_file_write_async_safe (gdb_stdlog,
4596 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4597
4598 if (signo == SIGCHLD
4599 && linux_nat_event_pipe[0] != -1)
4600 async_file_mark (); /* Let the event loop know that there are
4601 events to handle. */
4602
4603 errno = old_errno;
4604}
4605
4606/* Callback registered with the target events file descriptor. */
4607
4608static void
4609handle_target_event (int error, gdb_client_data client_data)
4610{
4611 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4612}
4613
4614/* Create/destroy the target events pipe. Returns previous state. */
4615
4616static int
4617linux_async_pipe (int enable)
4618{
198297aa 4619 int previous = linux_is_async_p ();
7feb7d06
PA
4620
4621 if (previous != enable)
4622 {
4623 sigset_t prev_mask;
4624
12696c10
PA
4625 /* Block child signals while we create/destroy the pipe, as
4626 their handler writes to it. */
7feb7d06
PA
4627 block_child_signals (&prev_mask);
4628
4629 if (enable)
4630 {
614c279d 4631 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4632 internal_error (__FILE__, __LINE__,
4633 "creating event pipe failed.");
4634
4635 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4636 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4637 }
4638 else
4639 {
4640 close (linux_nat_event_pipe[0]);
4641 close (linux_nat_event_pipe[1]);
4642 linux_nat_event_pipe[0] = -1;
4643 linux_nat_event_pipe[1] = -1;
4644 }
4645
4646 restore_child_signals_mask (&prev_mask);
4647 }
4648
4649 return previous;
b84876c2
PA
4650}
4651
4652/* target_async implementation. */
4653
4654static void
6a109b6b
TT
4655linux_nat_async (struct target_ops *ops,
4656 void (*callback) (enum inferior_event_type event_type,
4657 void *context),
4658 void *context)
b84876c2 4659{
b84876c2
PA
4660 if (callback != NULL)
4661 {
4662 async_client_callback = callback;
4663 async_client_context = context;
7feb7d06
PA
4664 if (!linux_async_pipe (1))
4665 {
4666 add_file_handler (linux_nat_event_pipe[0],
4667 handle_target_event, NULL);
4668 /* There may be pending events to handle. Tell the event loop
4669 to poll them. */
4670 async_file_mark ();
4671 }
b84876c2
PA
4672 }
4673 else
4674 {
4675 async_client_callback = callback;
4676 async_client_context = context;
b84876c2 4677 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4678 linux_async_pipe (0);
b84876c2
PA
4679 }
4680 return;
4681}
4682
a493e3e2 4683/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4684 event came out. */
4685
4c28f408 4686static int
252fbfc8 4687linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4688{
d90e17a7 4689 if (!lwp->stopped)
252fbfc8 4690 {
d90e17a7
PA
4691 if (debug_linux_nat)
4692 fprintf_unfiltered (gdb_stdlog,
4693 "LNSL: running -> suspending %s\n",
4694 target_pid_to_str (lwp->ptid));
252fbfc8 4695
252fbfc8 4696
25289eb2
PA
4697 if (lwp->last_resume_kind == resume_stop)
4698 {
4699 if (debug_linux_nat)
4700 fprintf_unfiltered (gdb_stdlog,
4701 "linux-nat: already stopping LWP %ld at "
4702 "GDB's request\n",
4703 ptid_get_lwp (lwp->ptid));
4704 return 0;
4705 }
252fbfc8 4706
25289eb2
PA
4707 stop_callback (lwp, NULL);
4708 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4709 }
4710 else
4711 {
4712 /* Already known to be stopped; do nothing. */
252fbfc8 4713
d90e17a7
PA
4714 if (debug_linux_nat)
4715 {
e09875d4 4716 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4717 fprintf_unfiltered (gdb_stdlog,
4718 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4719 target_pid_to_str (lwp->ptid));
4720 else
3e43a32a
MS
4721 fprintf_unfiltered (gdb_stdlog,
4722 "LNSL: already stopped/no "
4723 "stop_requested yet %s\n",
d90e17a7 4724 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4725 }
4726 }
4c28f408
PA
4727 return 0;
4728}
4729
4730static void
1eab8a48 4731linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408
PA
4732{
4733 if (non_stop)
d90e17a7 4734 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408 4735 else
1eab8a48 4736 linux_ops->to_stop (linux_ops, ptid);
4c28f408
PA
4737}
4738
d90e17a7 4739static void
de90e03d 4740linux_nat_close (struct target_ops *self)
d90e17a7
PA
4741{
4742 /* Unregister from the event loop. */
9debeba0
DE
4743 if (linux_nat_is_async_p (self))
4744 linux_nat_async (self, NULL, NULL);
d90e17a7 4745
d90e17a7 4746 if (linux_ops->to_close)
de90e03d 4747 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4748
4749 super_close (self);
d90e17a7
PA
4750}
4751
c0694254
PA
4752/* When requests are passed down from the linux-nat layer to the
4753 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4754 used. The address space pointer is stored in the inferior object,
4755 but the common code that is passed such ptid can't tell whether
4756 lwpid is a "main" process id or not (it assumes so). We reverse
4757 look up the "main" process id from the lwp here. */
4758
70221824 4759static struct address_space *
c0694254
PA
4760linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4761{
4762 struct lwp_info *lwp;
4763 struct inferior *inf;
4764 int pid;
4765
dfd4cc63 4766 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4767 {
4768 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4769 tgid. */
4770 lwp = find_lwp_pid (ptid);
dfd4cc63 4771 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4772 }
4773 else
4774 {
4775 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4776 pid = ptid_get_pid (ptid);
c0694254
PA
4777 }
4778
4779 inf = find_inferior_pid (pid);
4780 gdb_assert (inf != NULL);
4781 return inf->aspace;
4782}
4783
dc146f7c
VP
4784/* Return the cached value of the processor core for thread PTID. */
4785
70221824 4786static int
dc146f7c
VP
4787linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4788{
4789 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4790
dc146f7c
VP
4791 if (info)
4792 return info->core;
4793 return -1;
4794}
4795
f973ed9c
DJ
4796void
4797linux_nat_add_target (struct target_ops *t)
4798{
f973ed9c
DJ
4799 /* Save the provided single-threaded target. We save this in a separate
4800 variable because another target we've inherited from (e.g. inf-ptrace)
4801 may have saved a pointer to T; we want to use it for the final
4802 process stratum target. */
4803 linux_ops_saved = *t;
4804 linux_ops = &linux_ops_saved;
4805
4806 /* Override some methods for multithreading. */
b84876c2 4807 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4808 t->to_attach = linux_nat_attach;
4809 t->to_detach = linux_nat_detach;
4810 t->to_resume = linux_nat_resume;
4811 t->to_wait = linux_nat_wait;
2455069d 4812 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4813 t->to_xfer_partial = linux_nat_xfer_partial;
4814 t->to_kill = linux_nat_kill;
4815 t->to_mourn_inferior = linux_nat_mourn_inferior;
4816 t->to_thread_alive = linux_nat_thread_alive;
4817 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4818 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4819 t->to_has_thread_control = tc_schedlock;
c0694254 4820 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4821 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4822 t->to_stopped_data_address = linux_nat_stopped_data_address;
faf09f01
PA
4823 t->to_stopped_by_sw_breakpoint = linux_nat_stopped_by_sw_breakpoint;
4824 t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
4825 t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
4826 t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
f973ed9c 4827
b84876c2
PA
4828 t->to_can_async_p = linux_nat_can_async_p;
4829 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4830 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 4831 t->to_async = linux_nat_async;
b84876c2
PA
4832 t->to_terminal_inferior = linux_nat_terminal_inferior;
4833 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4834
4835 super_close = t->to_close;
d90e17a7 4836 t->to_close = linux_nat_close;
b84876c2 4837
4c28f408
PA
4838 /* Methods for non-stop support. */
4839 t->to_stop = linux_nat_stop;
4840
d90e17a7
PA
4841 t->to_supports_multi_process = linux_nat_supports_multi_process;
4842
03583c20
UW
4843 t->to_supports_disable_randomization
4844 = linux_nat_supports_disable_randomization;
4845
dc146f7c
VP
4846 t->to_core_of_thread = linux_nat_core_of_thread;
4847
f973ed9c
DJ
4848 /* We don't change the stratum; this target will sit at
4849 process_stratum and thread_db will set at thread_stratum. This
4850 is a little strange, since this is a multi-threaded-capable
4851 target, but we want to be on the stack below thread_db, and we
4852 also want to be used for single-threaded processes. */
4853
4854 add_target (t);
f973ed9c
DJ
4855}
4856
9f0bdab8
DJ
4857/* Register a method to call whenever a new thread is attached. */
4858void
7b50312a
PA
4859linux_nat_set_new_thread (struct target_ops *t,
4860 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4861{
4862 /* Save the pointer. We only support a single registered instance
4863 of the GNU/Linux native target, so we do not need to map this to
4864 T. */
4865 linux_nat_new_thread = new_thread;
4866}
4867
26cb8b7c
PA
4868/* See declaration in linux-nat.h. */
4869
4870void
4871linux_nat_set_new_fork (struct target_ops *t,
4872 linux_nat_new_fork_ftype *new_fork)
4873{
4874 /* Save the pointer. */
4875 linux_nat_new_fork = new_fork;
4876}
4877
4878/* See declaration in linux-nat.h. */
4879
4880void
4881linux_nat_set_forget_process (struct target_ops *t,
4882 linux_nat_forget_process_ftype *fn)
4883{
4884 /* Save the pointer. */
4885 linux_nat_forget_process_hook = fn;
4886}
4887
4888/* See declaration in linux-nat.h. */
4889
4890void
4891linux_nat_forget_process (pid_t pid)
4892{
4893 if (linux_nat_forget_process_hook != NULL)
4894 linux_nat_forget_process_hook (pid);
4895}
4896
5b009018
PA
4897/* Register a method that converts a siginfo object between the layout
4898 that ptrace returns, and the layout in the architecture of the
4899 inferior. */
4900void
4901linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4902 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4903 gdb_byte *,
4904 int))
4905{
4906 /* Save the pointer. */
4907 linux_nat_siginfo_fixup = siginfo_fixup;
4908}
4909
7b50312a
PA
4910/* Register a method to call prior to resuming a thread. */
4911
4912void
4913linux_nat_set_prepare_to_resume (struct target_ops *t,
4914 void (*prepare_to_resume) (struct lwp_info *))
4915{
4916 /* Save the pointer. */
4917 linux_nat_prepare_to_resume = prepare_to_resume;
4918}
4919
f865ee35
JK
4920/* See linux-nat.h. */
4921
4922int
4923linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4924{
da559b09 4925 int pid;
9f0bdab8 4926
dfd4cc63 4927 pid = ptid_get_lwp (ptid);
da559b09 4928 if (pid == 0)
dfd4cc63 4929 pid = ptid_get_pid (ptid);
f865ee35 4930
da559b09
JK
4931 errno = 0;
4932 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4933 if (errno != 0)
4934 {
4935 memset (siginfo, 0, sizeof (*siginfo));
4936 return 0;
4937 }
f865ee35 4938 return 1;
9f0bdab8
DJ
4939}
4940
7b669087
GB
4941/* See nat/linux-nat.h. */
4942
4943ptid_t
4944current_lwp_ptid (void)
4945{
4946 gdb_assert (ptid_lwp_p (inferior_ptid));
4947 return inferior_ptid;
4948}
4949
2c0b251b
PA
4950/* Provide a prototype to silence -Wmissing-prototypes. */
4951extern initialize_file_ftype _initialize_linux_nat;
4952
d6b0e80f
AC
4953void
4954_initialize_linux_nat (void)
4955{
ccce17b0
YQ
4956 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4957 &debug_linux_nat, _("\
b84876c2
PA
4958Set debugging of GNU/Linux lwp module."), _("\
4959Show debugging of GNU/Linux lwp module."), _("\
4960Enables printf debugging output."),
ccce17b0
YQ
4961 NULL,
4962 show_debug_linux_nat,
4963 &setdebuglist, &showdebuglist);
b84876c2 4964
b84876c2 4965 /* Save this mask as the default. */
d6b0e80f
AC
4966 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4967
7feb7d06
PA
4968 /* Install a SIGCHLD handler. */
4969 sigchld_action.sa_handler = sigchld_handler;
4970 sigemptyset (&sigchld_action.sa_mask);
4971 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4972
4973 /* Make it the default. */
7feb7d06 4974 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4975
4976 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4977 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4978 sigdelset (&suspend_mask, SIGCHLD);
4979
7feb7d06 4980 sigemptyset (&blocked_mask);
8009206a
TT
4981
4982 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4983 support read-only process state. */
4984 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4985 | PTRACE_O_TRACEVFORKDONE
4986 | PTRACE_O_TRACEVFORK
4987 | PTRACE_O_TRACEFORK
4988 | PTRACE_O_TRACEEXEC);
d6b0e80f
AC
4989}
4990\f
4991
4992/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4993 the GNU/Linux Threads library and therefore doesn't really belong
4994 here. */
4995
4996/* Read variable NAME in the target and return its value if found.
4997 Otherwise return zero. It is assumed that the type of the variable
4998 is `int'. */
4999
5000static int
5001get_signo (const char *name)
5002{
3b7344d5 5003 struct bound_minimal_symbol ms;
d6b0e80f
AC
5004 int signo;
5005
5006 ms = lookup_minimal_symbol (name, NULL, NULL);
3b7344d5 5007 if (ms.minsym == NULL)
d6b0e80f
AC
5008 return 0;
5009
77e371c0 5010 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5011 sizeof (signo)) != 0)
5012 return 0;
5013
5014 return signo;
5015}
5016
5017/* Return the set of signals used by the threads library in *SET. */
5018
5019void
5020lin_thread_get_thread_signals (sigset_t *set)
5021{
5022 struct sigaction action;
5023 int restart, cancel;
5024
b84876c2 5025 sigemptyset (&blocked_mask);
d6b0e80f
AC
5026 sigemptyset (set);
5027
5028 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5029 cancel = get_signo ("__pthread_sig_cancel");
5030
5031 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5032 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5033 not provide any way for the debugger to query the signal numbers -
5034 fortunately they don't change! */
5035
d6b0e80f 5036 if (restart == 0)
17fbb0bd 5037 restart = __SIGRTMIN;
d6b0e80f 5038
d6b0e80f 5039 if (cancel == 0)
17fbb0bd 5040 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5041
5042 sigaddset (set, restart);
5043 sigaddset (set, cancel);
5044
5045 /* The GNU/Linux Threads library makes terminating threads send a
5046 special "cancel" signal instead of SIGCHLD. Make sure we catch
5047 those (to prevent them from terminating GDB itself, which is
5048 likely to be their default action) and treat them the same way as
5049 SIGCHLD. */
5050
5051 action.sa_handler = sigchld_handler;
5052 sigemptyset (&action.sa_mask);
58aecb61 5053 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5054 sigaction (cancel, &action, NULL);
5055
5056 /* We block the "cancel" signal throughout this code ... */
5057 sigaddset (&blocked_mask, cancel);
5058 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5059
5060 /* ... except during a sigsuspend. */
5061 sigdelset (&suspend_mask, cancel);
5062}
This page took 1.474725 seconds and 4 git commands to generate.