2012-03-01 Pedro Alves <palves@redhat.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
0b302171 3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
22#include "target.h"
d6b0e80f 23#include "gdb_string.h"
3993f6b1 24#include "gdb_wait.h"
d6b0e80f
AC
25#include "gdb_assert.h"
26#ifdef HAVE_TKILL_SYSCALL
27#include <unistd.h>
28#include <sys/syscall.h>
29#endif
3993f6b1 30#include <sys/ptrace.h>
0274a8ce 31#include "linux-nat.h"
af96c192 32#include "linux-ptrace.h"
13da1c97 33#include "linux-procfs.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
10d6c8cd
DJ
39#include "inf-ptrace.h"
40#include "auxv.h"
dba24537 41#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
1777feb0 47#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
48#include "gdb_stat.h" /* for struct stat */
49#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
50#include "inf-loop.h"
51#include "event-loop.h"
52#include "event-top.h"
07e059b5
VP
53#include <pwd.h>
54#include <sys/types.h>
55#include "gdb_dirent.h"
56#include "xml-support.h"
191c4426 57#include "terminal.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
d26e3629 60#include "linux-osdata.h"
6432734d 61#include "linux-tdep.h"
7dcd53a0 62#include "symfile.h"
efcbbd14
UW
63
64#ifndef SPUFS_MAGIC
65#define SPUFS_MAGIC 0x23c9b64e
66#endif
dba24537 67
10568435
JK
68#ifdef HAVE_PERSONALITY
69# include <sys/personality.h>
70# if !HAVE_DECL_ADDR_NO_RANDOMIZE
71# define ADDR_NO_RANDOMIZE 0x0040000
72# endif
73#endif /* HAVE_PERSONALITY */
74
1777feb0 75/* This comment documents high-level logic of this file.
8a77dff3
VP
76
77Waiting for events in sync mode
78===============================
79
80When waiting for an event in a specific thread, we just use waitpid, passing
81the specific pid, and not passing WNOHANG.
82
1777feb0 83When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 84version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 85threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
86miss an event. The solution is to use non-blocking waitpid, together with
87sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 88process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
89flag to check for events in cloned processes. If nothing is found, we use
90sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
91happened to a child process -- and SIGCHLD will be delivered both for events
92in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
93an event, we get back to calling nonblocking waitpid with and without
94__WCLONED.
8a77dff3
VP
95
96Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 97so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
98blocked, the signal becomes pending and sigsuspend immediately
99notices it and returns.
100
101Waiting for events in async mode
102================================
103
7feb7d06
PA
104In async mode, GDB should always be ready to handle both user input
105and target events, so neither blocking waitpid nor sigsuspend are
106viable options. Instead, we should asynchronously notify the GDB main
107event loop whenever there's an unprocessed event from the target. We
108detect asynchronous target events by handling SIGCHLD signals. To
109notify the event loop about target events, the self-pipe trick is used
110--- a pipe is registered as waitable event source in the event loop,
111the event loop select/poll's on the read end of this pipe (as well on
112other event sources, e.g., stdin), and the SIGCHLD handler writes a
113byte to this pipe. This is more portable than relying on
114pselect/ppoll, since on kernels that lack those syscalls, libc
115emulates them with select/poll+sigprocmask, and that is racy
116(a.k.a. plain broken).
117
118Obviously, if we fail to notify the event loop if there's a target
119event, it's bad. OTOH, if we notify the event loop when there's no
120event from the target, linux_nat_wait will detect that there's no real
121event to report, and return event of type TARGET_WAITKIND_IGNORE.
122This is mostly harmless, but it will waste time and is better avoided.
123
124The main design point is that every time GDB is outside linux-nat.c,
125we have a SIGCHLD handler installed that is called when something
126happens to the target and notifies the GDB event loop. Whenever GDB
127core decides to handle the event, and calls into linux-nat.c, we
128process things as in sync mode, except that the we never block in
129sigsuspend.
130
131While processing an event, we may end up momentarily blocked in
132waitpid calls. Those waitpid calls, while blocking, are guarantied to
133return quickly. E.g., in all-stop mode, before reporting to the core
134that an LWP hit a breakpoint, all LWPs are stopped by sending them
135SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
136Note that this is different from blocking indefinitely waiting for the
137next event --- here, we're already handling an event.
8a77dff3
VP
138
139Use of signals
140==============
141
142We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
143signal is not entirely significant; we just need for a signal to be delivered,
144so that we can intercept it. SIGSTOP's advantage is that it can not be
145blocked. A disadvantage is that it is not a real-time signal, so it can only
146be queued once; we do not keep track of other sources of SIGSTOP.
147
148Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
149use them, because they have special behavior when the signal is generated -
150not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
151kills the entire thread group.
152
153A delivered SIGSTOP would stop the entire thread group, not just the thread we
154tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
155cancel it (by PTRACE_CONT without passing SIGSTOP).
156
157We could use a real-time signal instead. This would solve those problems; we
158could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
159But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
160generates it, and there are races with trying to find a signal that is not
161blocked. */
a0ef4274 162
dba24537
AC
163#ifndef O_LARGEFILE
164#define O_LARGEFILE 0
165#endif
0274a8ce 166
ca2163eb
PA
167/* Unlike other extended result codes, WSTOPSIG (status) on
168 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
169 instead SIGTRAP with bit 7 set. */
170#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
171
10d6c8cd
DJ
172/* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174static struct target_ops *linux_ops;
f973ed9c 175static struct target_ops linux_ops_saved;
10d6c8cd 176
9f0bdab8 177/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
178static void (*linux_nat_new_thread) (struct lwp_info *);
179
180/* Hook to call prior to resuming a thread. */
181static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 182
5b009018
PA
183/* The method to call, if any, when the siginfo object needs to be
184 converted between the layout returned by ptrace, and the layout in
185 the architecture of the inferior. */
186static int (*linux_nat_siginfo_fixup) (struct siginfo *,
187 gdb_byte *,
188 int);
189
ac264b3b
MS
190/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
191 Called by our to_xfer_partial. */
192static LONGEST (*super_xfer_partial) (struct target_ops *,
193 enum target_object,
194 const char *, gdb_byte *,
195 const gdb_byte *,
10d6c8cd
DJ
196 ULONGEST, LONGEST);
197
d6b0e80f 198static int debug_linux_nat;
920d2a44
AC
199static void
200show_debug_linux_nat (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
202{
203 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
204 value);
205}
d6b0e80f 206
ae087d01
DJ
207struct simple_pid_list
208{
209 int pid;
3d799a95 210 int status;
ae087d01
DJ
211 struct simple_pid_list *next;
212};
213struct simple_pid_list *stopped_pids;
214
3993f6b1
DJ
215/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
216 can not be used, 1 if it can. */
217
218static int linux_supports_tracefork_flag = -1;
219
3e43a32a
MS
220/* This variable is a tri-state flag: -1 for unknown, 0 if
221 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
222
223static int linux_supports_tracesysgood_flag = -1;
224
9016a515
DJ
225/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
226 PTRACE_O_TRACEVFORKDONE. */
227
228static int linux_supports_tracevforkdone_flag = -1;
229
a96d9b2e
SDJ
230/* Stores the current used ptrace() options. */
231static int current_ptrace_options = 0;
232
3dd5b83d
PA
233/* Async mode support. */
234
b84876c2
PA
235/* The read/write ends of the pipe registered as waitable file in the
236 event loop. */
237static int linux_nat_event_pipe[2] = { -1, -1 };
238
7feb7d06 239/* Flush the event pipe. */
b84876c2 240
7feb7d06
PA
241static void
242async_file_flush (void)
b84876c2 243{
7feb7d06
PA
244 int ret;
245 char buf;
b84876c2 246
7feb7d06 247 do
b84876c2 248 {
7feb7d06 249 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 250 }
7feb7d06 251 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
252}
253
7feb7d06
PA
254/* Put something (anything, doesn't matter what, or how much) in event
255 pipe, so that the select/poll in the event-loop realizes we have
256 something to process. */
252fbfc8 257
b84876c2 258static void
7feb7d06 259async_file_mark (void)
b84876c2 260{
7feb7d06 261 int ret;
b84876c2 262
7feb7d06
PA
263 /* It doesn't really matter what the pipe contains, as long we end
264 up with something in it. Might as well flush the previous
265 left-overs. */
266 async_file_flush ();
b84876c2 267
7feb7d06 268 do
b84876c2 269 {
7feb7d06 270 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 271 }
7feb7d06 272 while (ret == -1 && errno == EINTR);
b84876c2 273
7feb7d06
PA
274 /* Ignore EAGAIN. If the pipe is full, the event loop will already
275 be awakened anyway. */
b84876c2
PA
276}
277
7feb7d06 278static void linux_nat_async (void (*callback)
3e43a32a
MS
279 (enum inferior_event_type event_type,
280 void *context),
7feb7d06 281 void *context);
7feb7d06
PA
282static int kill_lwp (int lwpid, int signo);
283
284static int stop_callback (struct lwp_info *lp, void *data);
285
286static void block_child_signals (sigset_t *prev_mask);
287static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
288
289struct lwp_info;
290static struct lwp_info *add_lwp (ptid_t ptid);
291static void purge_lwp_list (int pid);
4403d8e9 292static void delete_lwp (ptid_t ptid);
2277426b
PA
293static struct lwp_info *find_lwp_pid (ptid_t ptid);
294
ae087d01
DJ
295\f
296/* Trivial list manipulation functions to keep track of a list of
297 new stopped processes. */
298static void
3d799a95 299add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
300{
301 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 302
ae087d01 303 new_pid->pid = pid;
3d799a95 304 new_pid->status = status;
ae087d01
DJ
305 new_pid->next = *listp;
306 *listp = new_pid;
307}
308
84636d28
PA
309static int
310in_pid_list_p (struct simple_pid_list *list, int pid)
311{
312 struct simple_pid_list *p;
313
314 for (p = list; p != NULL; p = p->next)
315 if (p->pid == pid)
316 return 1;
317 return 0;
318}
319
ae087d01 320static int
46a96992 321pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
322{
323 struct simple_pid_list **p;
324
325 for (p = listp; *p != NULL; p = &(*p)->next)
326 if ((*p)->pid == pid)
327 {
328 struct simple_pid_list *next = (*p)->next;
e0881a8e 329
46a96992 330 *statusp = (*p)->status;
ae087d01
DJ
331 xfree (*p);
332 *p = next;
333 return 1;
334 }
335 return 0;
336}
337
3993f6b1
DJ
338\f
339/* A helper function for linux_test_for_tracefork, called after fork (). */
340
341static void
342linux_tracefork_child (void)
343{
3993f6b1
DJ
344 ptrace (PTRACE_TRACEME, 0, 0, 0);
345 kill (getpid (), SIGSTOP);
346 fork ();
48bb3cce 347 _exit (0);
3993f6b1
DJ
348}
349
7feb7d06 350/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
351
352static int
46a96992 353my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
354{
355 int ret;
b84876c2 356
b957e937
DJ
357 do
358 {
46a96992 359 ret = waitpid (pid, statusp, flags);
b957e937
DJ
360 }
361 while (ret == -1 && errno == EINTR);
362
363 return ret;
364}
365
366/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
367
368 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
369 we know that the feature is not available. This may change the tracing
370 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
371
372 However, if it succeeds, we don't know for sure that the feature is
373 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 374 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
375 fork tracing, and let it fork. If the process exits, we assume that we
376 can't use TRACEFORK; if we get the fork notification, and we can extract
377 the new child's PID, then we assume that we can. */
3993f6b1
DJ
378
379static void
b957e937 380linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
381{
382 int child_pid, ret, status;
383 long second_pid;
7feb7d06 384 sigset_t prev_mask;
4c28f408 385
7feb7d06
PA
386 /* We don't want those ptrace calls to be interrupted. */
387 block_child_signals (&prev_mask);
3993f6b1 388
b957e937
DJ
389 linux_supports_tracefork_flag = 0;
390 linux_supports_tracevforkdone_flag = 0;
391
392 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
393 if (ret != 0)
7feb7d06
PA
394 {
395 restore_child_signals_mask (&prev_mask);
396 return;
397 }
b957e937 398
3993f6b1
DJ
399 child_pid = fork ();
400 if (child_pid == -1)
e2e0b3e5 401 perror_with_name (("fork"));
3993f6b1
DJ
402
403 if (child_pid == 0)
404 linux_tracefork_child ();
405
b957e937 406 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 407 if (ret == -1)
e2e0b3e5 408 perror_with_name (("waitpid"));
3993f6b1 409 else if (ret != child_pid)
8a3fe4f8 410 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 411 if (! WIFSTOPPED (status))
3e43a32a
MS
412 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
413 status);
3993f6b1 414
3993f6b1
DJ
415 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
416 if (ret != 0)
417 {
b957e937
DJ
418 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
419 if (ret != 0)
420 {
8a3fe4f8 421 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 422 restore_child_signals_mask (&prev_mask);
b957e937
DJ
423 return;
424 }
425
426 ret = my_waitpid (child_pid, &status, 0);
427 if (ret != child_pid)
3e43a32a
MS
428 warning (_("linux_test_for_tracefork: failed "
429 "to wait for killed child"));
b957e937 430 else if (!WIFSIGNALED (status))
3e43a32a
MS
431 warning (_("linux_test_for_tracefork: unexpected "
432 "wait status 0x%x from killed child"), status);
b957e937 433
7feb7d06 434 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
435 return;
436 }
437
9016a515
DJ
438 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
439 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
440 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
441 linux_supports_tracevforkdone_flag = (ret == 0);
442
b957e937
DJ
443 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
444 if (ret != 0)
8a3fe4f8 445 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
446
447 ret = my_waitpid (child_pid, &status, 0);
448
3993f6b1
DJ
449 if (ret == child_pid && WIFSTOPPED (status)
450 && status >> 16 == PTRACE_EVENT_FORK)
451 {
452 second_pid = 0;
453 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
454 if (ret == 0 && second_pid != 0)
455 {
456 int second_status;
457
458 linux_supports_tracefork_flag = 1;
b957e937
DJ
459 my_waitpid (second_pid, &second_status, 0);
460 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
461 if (ret != 0)
3e43a32a
MS
462 warning (_("linux_test_for_tracefork: "
463 "failed to kill second child"));
97725dc4 464 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
465 }
466 }
b957e937 467 else
8a3fe4f8
AC
468 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
469 "(%d, status 0x%x)"), ret, status);
3993f6b1 470
b957e937
DJ
471 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
472 if (ret != 0)
8a3fe4f8 473 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 474 my_waitpid (child_pid, &status, 0);
4c28f408 475
7feb7d06 476 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
477}
478
a96d9b2e
SDJ
479/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
480
481 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
482 we know that the feature is not available. This may change the tracing
483 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
484
485static void
486linux_test_for_tracesysgood (int original_pid)
487{
488 int ret;
489 sigset_t prev_mask;
490
491 /* We don't want those ptrace calls to be interrupted. */
492 block_child_signals (&prev_mask);
493
494 linux_supports_tracesysgood_flag = 0;
495
496 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
497 if (ret != 0)
498 goto out;
499
500 linux_supports_tracesysgood_flag = 1;
501out:
502 restore_child_signals_mask (&prev_mask);
503}
504
505/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
506 This function also sets linux_supports_tracesysgood_flag. */
507
508static int
509linux_supports_tracesysgood (int pid)
510{
511 if (linux_supports_tracesysgood_flag == -1)
512 linux_test_for_tracesysgood (pid);
513 return linux_supports_tracesysgood_flag;
514}
515
3993f6b1
DJ
516/* Return non-zero iff we have tracefork functionality available.
517 This function also sets linux_supports_tracefork_flag. */
518
519static int
b957e937 520linux_supports_tracefork (int pid)
3993f6b1
DJ
521{
522 if (linux_supports_tracefork_flag == -1)
b957e937 523 linux_test_for_tracefork (pid);
3993f6b1
DJ
524 return linux_supports_tracefork_flag;
525}
526
9016a515 527static int
b957e937 528linux_supports_tracevforkdone (int pid)
9016a515
DJ
529{
530 if (linux_supports_tracefork_flag == -1)
b957e937 531 linux_test_for_tracefork (pid);
9016a515
DJ
532 return linux_supports_tracevforkdone_flag;
533}
534
a96d9b2e
SDJ
535static void
536linux_enable_tracesysgood (ptid_t ptid)
537{
538 int pid = ptid_get_lwp (ptid);
539
540 if (pid == 0)
541 pid = ptid_get_pid (ptid);
542
543 if (linux_supports_tracesysgood (pid) == 0)
544 return;
545
546 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
547
548 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
549}
550
3993f6b1 551\f
4de4c07c
DJ
552void
553linux_enable_event_reporting (ptid_t ptid)
554{
d3587048 555 int pid = ptid_get_lwp (ptid);
4de4c07c 556
d3587048
DJ
557 if (pid == 0)
558 pid = ptid_get_pid (ptid);
559
b957e937 560 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
561 return;
562
a96d9b2e
SDJ
563 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
564 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
565
b957e937 566 if (linux_supports_tracevforkdone (pid))
a96d9b2e 567 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
568
569 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
570 read-only process state. */
4de4c07c 571
a96d9b2e 572 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
573}
574
6d8fd2b7
UW
575static void
576linux_child_post_attach (int pid)
4de4c07c
DJ
577{
578 linux_enable_event_reporting (pid_to_ptid (pid));
a96d9b2e 579 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
580}
581
10d6c8cd 582static void
4de4c07c
DJ
583linux_child_post_startup_inferior (ptid_t ptid)
584{
585 linux_enable_event_reporting (ptid);
a96d9b2e 586 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
587}
588
4403d8e9
JK
589/* Return the number of known LWPs in the tgid given by PID. */
590
591static int
592num_lwps (int pid)
593{
594 int count = 0;
595 struct lwp_info *lp;
596
597 for (lp = lwp_list; lp; lp = lp->next)
598 if (ptid_get_pid (lp->ptid) == pid)
599 count++;
600
601 return count;
602}
603
604/* Call delete_lwp with prototype compatible for make_cleanup. */
605
606static void
607delete_lwp_cleanup (void *lp_voidp)
608{
609 struct lwp_info *lp = lp_voidp;
610
611 delete_lwp (lp->ptid);
612}
613
6d8fd2b7
UW
614static int
615linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 616{
7feb7d06 617 sigset_t prev_mask;
9016a515 618 int has_vforked;
4de4c07c
DJ
619 int parent_pid, child_pid;
620
7feb7d06 621 block_child_signals (&prev_mask);
b84876c2 622
e58b0e63
PA
623 has_vforked = (inferior_thread ()->pending_follow.kind
624 == TARGET_WAITKIND_VFORKED);
625 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 626 if (parent_pid == 0)
e58b0e63
PA
627 parent_pid = ptid_get_pid (inferior_ptid);
628 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 629
2277426b
PA
630 if (!detach_fork)
631 linux_enable_event_reporting (pid_to_ptid (child_pid));
632
6c95b8df
PA
633 if (has_vforked
634 && !non_stop /* Non-stop always resumes both branches. */
635 && (!target_is_async_p () || sync_execution)
636 && !(follow_child || detach_fork || sched_multi))
637 {
638 /* The parent stays blocked inside the vfork syscall until the
639 child execs or exits. If we don't let the child run, then
640 the parent stays blocked. If we're telling the parent to run
641 in the foreground, the user will not be able to ctrl-c to get
642 back the terminal, effectively hanging the debug session. */
ac74f770
MS
643 fprintf_filtered (gdb_stderr, _("\
644Can not resume the parent process over vfork in the foreground while\n\
645holding the child stopped. Try \"set detach-on-fork\" or \
646\"set schedule-multiple\".\n"));
647 /* FIXME output string > 80 columns. */
6c95b8df
PA
648 return 1;
649 }
650
4de4c07c
DJ
651 if (! follow_child)
652 {
6c95b8df 653 struct lwp_info *child_lp = NULL;
4de4c07c 654
1777feb0 655 /* We're already attached to the parent, by default. */
4de4c07c 656
ac264b3b
MS
657 /* Detach new forked process? */
658 if (detach_fork)
f75c00e4 659 {
4403d8e9
JK
660 struct cleanup *old_chain;
661
6c95b8df
PA
662 /* Before detaching from the child, remove all breakpoints
663 from it. If we forked, then this has already been taken
664 care of by infrun.c. If we vforked however, any
665 breakpoint inserted in the parent is visible in the
666 child, even those added while stopped in a vfork
667 catchpoint. This will remove the breakpoints from the
668 parent also, but they'll be reinserted below. */
669 if (has_vforked)
670 {
671 /* keep breakpoints list in sync. */
672 remove_breakpoints_pid (GET_PID (inferior_ptid));
673 }
674
e85a822c 675 if (info_verbose || debug_linux_nat)
ac264b3b
MS
676 {
677 target_terminal_ours ();
678 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
679 "Detaching after fork from "
680 "child process %d.\n",
ac264b3b
MS
681 child_pid);
682 }
4de4c07c 683
4403d8e9
JK
684 old_chain = save_inferior_ptid ();
685 inferior_ptid = ptid_build (child_pid, child_pid, 0);
686
687 child_lp = add_lwp (inferior_ptid);
688 child_lp->stopped = 1;
689 child_lp->last_resume_kind = resume_stop;
690 make_cleanup (delete_lwp_cleanup, child_lp);
691
692 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
693 See i386_inferior_data_get for the Linux kernel specifics.
694 Ensure linux_nat_prepare_to_resume will reset the hardware debug
695 registers. It is done by the linux_nat_new_thread call, which is
696 being skipped in add_lwp above for the first lwp of a pid. */
697 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
698 if (linux_nat_new_thread != NULL)
699 linux_nat_new_thread (child_lp);
700
701 if (linux_nat_prepare_to_resume != NULL)
702 linux_nat_prepare_to_resume (child_lp);
ac264b3b 703 ptrace (PTRACE_DETACH, child_pid, 0, 0);
4403d8e9
JK
704
705 do_cleanups (old_chain);
ac264b3b
MS
706 }
707 else
708 {
77435e4c 709 struct inferior *parent_inf, *child_inf;
2277426b 710 struct cleanup *old_chain;
7f9f62ba
PA
711
712 /* Add process to GDB's tables. */
77435e4c
PA
713 child_inf = add_inferior (child_pid);
714
e58b0e63 715 parent_inf = current_inferior ();
77435e4c 716 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 717 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 718
2277426b 719 old_chain = save_inferior_ptid ();
6c95b8df 720 save_current_program_space ();
2277426b
PA
721
722 inferior_ptid = ptid_build (child_pid, child_pid, 0);
723 add_thread (inferior_ptid);
6c95b8df
PA
724 child_lp = add_lwp (inferior_ptid);
725 child_lp->stopped = 1;
25289eb2 726 child_lp->last_resume_kind = resume_stop;
7dcd53a0 727 child_inf->symfile_flags = SYMFILE_NO_READ;
2277426b 728
6c95b8df
PA
729 /* If this is a vfork child, then the address-space is
730 shared with the parent. */
731 if (has_vforked)
732 {
733 child_inf->pspace = parent_inf->pspace;
734 child_inf->aspace = parent_inf->aspace;
735
736 /* The parent will be frozen until the child is done
737 with the shared region. Keep track of the
738 parent. */
739 child_inf->vfork_parent = parent_inf;
740 child_inf->pending_detach = 0;
741 parent_inf->vfork_child = child_inf;
742 parent_inf->pending_detach = 0;
743 }
744 else
745 {
746 child_inf->aspace = new_address_space ();
747 child_inf->pspace = add_program_space (child_inf->aspace);
748 child_inf->removable = 1;
749 set_current_program_space (child_inf->pspace);
750 clone_program_space (child_inf->pspace, parent_inf->pspace);
751
752 /* Let the shared library layer (solib-svr4) learn about
753 this new process, relocate the cloned exec, pull in
754 shared libraries, and install the solib event
755 breakpoint. If a "cloned-VM" event was propagated
756 better throughout the core, this wouldn't be
757 required. */
268a4a75 758 solib_create_inferior_hook (0);
6c95b8df
PA
759 }
760
761 /* Let the thread_db layer learn about this new process. */
2277426b
PA
762 check_for_thread_db ();
763
764 do_cleanups (old_chain);
ac264b3b 765 }
9016a515
DJ
766
767 if (has_vforked)
768 {
3ced3da4 769 struct lwp_info *parent_lp;
6c95b8df
PA
770 struct inferior *parent_inf;
771
772 parent_inf = current_inferior ();
773
774 /* If we detached from the child, then we have to be careful
775 to not insert breakpoints in the parent until the child
776 is done with the shared memory region. However, if we're
777 staying attached to the child, then we can and should
778 insert breakpoints, so that we can debug it. A
779 subsequent child exec or exit is enough to know when does
780 the child stops using the parent's address space. */
781 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 782 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df 783
3ced3da4 784 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937 785 gdb_assert (linux_supports_tracefork_flag >= 0);
3ced3da4 786
b957e937 787 if (linux_supports_tracevforkdone (0))
9016a515 788 {
6c95b8df
PA
789 if (debug_linux_nat)
790 fprintf_unfiltered (gdb_stdlog,
791 "LCFF: waiting for VFORK_DONE on %d\n",
792 parent_pid);
3ced3da4 793 parent_lp->stopped = 1;
9016a515 794
6c95b8df
PA
795 /* We'll handle the VFORK_DONE event like any other
796 event, in target_wait. */
9016a515
DJ
797 }
798 else
799 {
800 /* We can't insert breakpoints until the child has
801 finished with the shared memory region. We need to
802 wait until that happens. Ideal would be to just
803 call:
804 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
805 - waitpid (parent_pid, &status, __WALL);
806 However, most architectures can't handle a syscall
807 being traced on the way out if it wasn't traced on
808 the way in.
809
810 We might also think to loop, continuing the child
811 until it exits or gets a SIGTRAP. One problem is
812 that the child might call ptrace with PTRACE_TRACEME.
813
814 There's no simple and reliable way to figure out when
815 the vforked child will be done with its copy of the
816 shared memory. We could step it out of the syscall,
817 two instructions, let it go, and then single-step the
818 parent once. When we have hardware single-step, this
819 would work; with software single-step it could still
820 be made to work but we'd have to be able to insert
821 single-step breakpoints in the child, and we'd have
822 to insert -just- the single-step breakpoint in the
823 parent. Very awkward.
824
825 In the end, the best we can do is to make sure it
826 runs for a little while. Hopefully it will be out of
827 range of any breakpoints we reinsert. Usually this
828 is only the single-step breakpoint at vfork's return
829 point. */
830
6c95b8df
PA
831 if (debug_linux_nat)
832 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
833 "LCFF: no VFORK_DONE "
834 "support, sleeping a bit\n");
6c95b8df 835
9016a515 836 usleep (10000);
9016a515 837
6c95b8df
PA
838 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
839 and leave it pending. The next linux_nat_resume call
840 will notice a pending event, and bypasses actually
841 resuming the inferior. */
3ced3da4
PA
842 parent_lp->status = 0;
843 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
844 parent_lp->stopped = 1;
6c95b8df
PA
845
846 /* If we're in async mode, need to tell the event loop
847 there's something here to process. */
848 if (target_can_async_p ())
849 async_file_mark ();
850 }
9016a515 851 }
4de4c07c 852 }
3993f6b1 853 else
4de4c07c 854 {
77435e4c 855 struct inferior *parent_inf, *child_inf;
3ced3da4 856 struct lwp_info *child_lp;
6c95b8df 857 struct program_space *parent_pspace;
4de4c07c 858
e85a822c 859 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
860 {
861 target_terminal_ours ();
6c95b8df 862 if (has_vforked)
3e43a32a
MS
863 fprintf_filtered (gdb_stdlog,
864 _("Attaching after process %d "
865 "vfork to child process %d.\n"),
6c95b8df
PA
866 parent_pid, child_pid);
867 else
3e43a32a
MS
868 fprintf_filtered (gdb_stdlog,
869 _("Attaching after process %d "
870 "fork to child process %d.\n"),
6c95b8df 871 parent_pid, child_pid);
f75c00e4 872 }
4de4c07c 873
7a7d3353
PA
874 /* Add the new inferior first, so that the target_detach below
875 doesn't unpush the target. */
876
77435e4c
PA
877 child_inf = add_inferior (child_pid);
878
e58b0e63 879 parent_inf = current_inferior ();
77435e4c 880 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 881 copy_terminal_info (child_inf, parent_inf);
7a7d3353 882
6c95b8df 883 parent_pspace = parent_inf->pspace;
9016a515 884
6c95b8df
PA
885 /* If we're vforking, we want to hold on to the parent until the
886 child exits or execs. At child exec or exit time we can
887 remove the old breakpoints from the parent and detach or
888 resume debugging it. Otherwise, detach the parent now; we'll
889 want to reuse it's program/address spaces, but we can't set
890 them to the child before removing breakpoints from the
891 parent, otherwise, the breakpoints module could decide to
892 remove breakpoints from the wrong process (since they'd be
893 assigned to the same address space). */
9016a515
DJ
894
895 if (has_vforked)
7f9f62ba 896 {
6c95b8df
PA
897 gdb_assert (child_inf->vfork_parent == NULL);
898 gdb_assert (parent_inf->vfork_child == NULL);
899 child_inf->vfork_parent = parent_inf;
900 child_inf->pending_detach = 0;
901 parent_inf->vfork_child = child_inf;
902 parent_inf->pending_detach = detach_fork;
903 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 904 }
2277426b 905 else if (detach_fork)
b84876c2 906 target_detach (NULL, 0);
4de4c07c 907
6c95b8df
PA
908 /* Note that the detach above makes PARENT_INF dangling. */
909
910 /* Add the child thread to the appropriate lists, and switch to
911 this new thread, before cloning the program space, and
912 informing the solib layer about this new process. */
913
9f0bdab8 914 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b 915 add_thread (inferior_ptid);
3ced3da4
PA
916 child_lp = add_lwp (inferior_ptid);
917 child_lp->stopped = 1;
25289eb2 918 child_lp->last_resume_kind = resume_stop;
6c95b8df
PA
919
920 /* If this is a vfork child, then the address-space is shared
921 with the parent. If we detached from the parent, then we can
922 reuse the parent's program/address spaces. */
923 if (has_vforked || detach_fork)
924 {
925 child_inf->pspace = parent_pspace;
926 child_inf->aspace = child_inf->pspace->aspace;
927 }
928 else
929 {
930 child_inf->aspace = new_address_space ();
931 child_inf->pspace = add_program_space (child_inf->aspace);
932 child_inf->removable = 1;
7dcd53a0 933 child_inf->symfile_flags = SYMFILE_NO_READ;
6c95b8df
PA
934 set_current_program_space (child_inf->pspace);
935 clone_program_space (child_inf->pspace, parent_pspace);
936
937 /* Let the shared library layer (solib-svr4) learn about
938 this new process, relocate the cloned exec, pull in
939 shared libraries, and install the solib event breakpoint.
940 If a "cloned-VM" event was propagated better throughout
941 the core, this wouldn't be required. */
268a4a75 942 solib_create_inferior_hook (0);
6c95b8df 943 }
ac264b3b 944
6c95b8df 945 /* Let the thread_db layer learn about this new process. */
ef29ce1a 946 check_for_thread_db ();
4de4c07c
DJ
947 }
948
7feb7d06 949 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
950 return 0;
951}
952
4de4c07c 953\f
77b06cd7 954static int
6d8fd2b7 955linux_child_insert_fork_catchpoint (int pid)
4de4c07c 956{
77b06cd7 957 return !linux_supports_tracefork (pid);
3993f6b1
DJ
958}
959
eb73ad13
PA
960static int
961linux_child_remove_fork_catchpoint (int pid)
962{
963 return 0;
964}
965
77b06cd7 966static int
6d8fd2b7 967linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 968{
77b06cd7 969 return !linux_supports_tracefork (pid);
3993f6b1
DJ
970}
971
eb73ad13
PA
972static int
973linux_child_remove_vfork_catchpoint (int pid)
974{
975 return 0;
976}
977
77b06cd7 978static int
6d8fd2b7 979linux_child_insert_exec_catchpoint (int pid)
3993f6b1 980{
77b06cd7 981 return !linux_supports_tracefork (pid);
3993f6b1
DJ
982}
983
eb73ad13
PA
984static int
985linux_child_remove_exec_catchpoint (int pid)
986{
987 return 0;
988}
989
a96d9b2e
SDJ
990static int
991linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
992 int table_size, int *table)
993{
77b06cd7
TJB
994 if (!linux_supports_tracesysgood (pid))
995 return 1;
996
a96d9b2e
SDJ
997 /* On GNU/Linux, we ignore the arguments. It means that we only
998 enable the syscall catchpoints, but do not disable them.
77b06cd7 999
a96d9b2e
SDJ
1000 Also, we do not use the `table' information because we do not
1001 filter system calls here. We let GDB do the logic for us. */
1002 return 0;
1003}
1004
d6b0e80f
AC
1005/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1006 are processes sharing the same VM space. A multi-threaded process
1007 is basically a group of such processes. However, such a grouping
1008 is almost entirely a user-space issue; the kernel doesn't enforce
1009 such a grouping at all (this might change in the future). In
1010 general, we'll rely on the threads library (i.e. the GNU/Linux
1011 Threads library) to provide such a grouping.
1012
1013 It is perfectly well possible to write a multi-threaded application
1014 without the assistance of a threads library, by using the clone
1015 system call directly. This module should be able to give some
1016 rudimentary support for debugging such applications if developers
1017 specify the CLONE_PTRACE flag in the clone system call, and are
1018 using the Linux kernel 2.4 or above.
1019
1020 Note that there are some peculiarities in GNU/Linux that affect
1021 this code:
1022
1023 - In general one should specify the __WCLONE flag to waitpid in
1024 order to make it report events for any of the cloned processes
1025 (and leave it out for the initial process). However, if a cloned
1026 process has exited the exit status is only reported if the
1027 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1028 we cannot use it since GDB must work on older systems too.
1029
1030 - When a traced, cloned process exits and is waited for by the
1031 debugger, the kernel reassigns it to the original parent and
1032 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1033 library doesn't notice this, which leads to the "zombie problem":
1034 When debugged a multi-threaded process that spawns a lot of
1035 threads will run out of processes, even if the threads exit,
1036 because the "zombies" stay around. */
1037
1038/* List of known LWPs. */
9f0bdab8 1039struct lwp_info *lwp_list;
d6b0e80f
AC
1040\f
1041
d6b0e80f
AC
1042/* Original signal mask. */
1043static sigset_t normal_mask;
1044
1045/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1046 _initialize_linux_nat. */
1047static sigset_t suspend_mask;
1048
7feb7d06
PA
1049/* Signals to block to make that sigsuspend work. */
1050static sigset_t blocked_mask;
1051
1052/* SIGCHLD action. */
1053struct sigaction sigchld_action;
b84876c2 1054
7feb7d06
PA
1055/* Block child signals (SIGCHLD and linux threads signals), and store
1056 the previous mask in PREV_MASK. */
84e46146 1057
7feb7d06
PA
1058static void
1059block_child_signals (sigset_t *prev_mask)
1060{
1061 /* Make sure SIGCHLD is blocked. */
1062 if (!sigismember (&blocked_mask, SIGCHLD))
1063 sigaddset (&blocked_mask, SIGCHLD);
1064
1065 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1066}
1067
1068/* Restore child signals mask, previously returned by
1069 block_child_signals. */
1070
1071static void
1072restore_child_signals_mask (sigset_t *prev_mask)
1073{
1074 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1075}
2455069d
UW
1076
1077/* Mask of signals to pass directly to the inferior. */
1078static sigset_t pass_mask;
1079
1080/* Update signals to pass to the inferior. */
1081static void
1082linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1083{
1084 int signo;
1085
1086 sigemptyset (&pass_mask);
1087
1088 for (signo = 1; signo < NSIG; signo++)
1089 {
1090 int target_signo = target_signal_from_host (signo);
1091 if (target_signo < numsigs && pass_signals[target_signo])
1092 sigaddset (&pass_mask, signo);
1093 }
1094}
1095
d6b0e80f
AC
1096\f
1097
1098/* Prototypes for local functions. */
1099static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1100static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1101static char *linux_child_pid_to_exec_file (int pid);
710151dd 1102
d6b0e80f
AC
1103\f
1104/* Convert wait status STATUS to a string. Used for printing debug
1105 messages only. */
1106
1107static char *
1108status_to_str (int status)
1109{
1110 static char buf[64];
1111
1112 if (WIFSTOPPED (status))
206aa767 1113 {
ca2163eb 1114 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1115 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1116 strsignal (SIGTRAP));
1117 else
1118 snprintf (buf, sizeof (buf), "%s (stopped)",
1119 strsignal (WSTOPSIG (status)));
1120 }
d6b0e80f
AC
1121 else if (WIFSIGNALED (status))
1122 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1123 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1124 else
1125 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1126
1127 return buf;
1128}
1129
7b50312a
PA
1130/* Destroy and free LP. */
1131
1132static void
1133lwp_free (struct lwp_info *lp)
1134{
1135 xfree (lp->arch_private);
1136 xfree (lp);
1137}
1138
d90e17a7
PA
1139/* Remove all LWPs belong to PID from the lwp list. */
1140
1141static void
1142purge_lwp_list (int pid)
1143{
1144 struct lwp_info *lp, *lpprev, *lpnext;
1145
1146 lpprev = NULL;
1147
1148 for (lp = lwp_list; lp; lp = lpnext)
1149 {
1150 lpnext = lp->next;
1151
1152 if (ptid_get_pid (lp->ptid) == pid)
1153 {
1154 if (lp == lwp_list)
1155 lwp_list = lp->next;
1156 else
1157 lpprev->next = lp->next;
1158
7b50312a 1159 lwp_free (lp);
d90e17a7
PA
1160 }
1161 else
1162 lpprev = lp;
1163 }
1164}
1165
f973ed9c 1166/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1167 structure describing the new LWP. The LWP should already be stopped
1168 (with an exception for the very first LWP). */
d6b0e80f
AC
1169
1170static struct lwp_info *
1171add_lwp (ptid_t ptid)
1172{
1173 struct lwp_info *lp;
1174
1175 gdb_assert (is_lwp (ptid));
1176
1177 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1178
1179 memset (lp, 0, sizeof (struct lwp_info));
1180
25289eb2 1181 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1182 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1183
1184 lp->ptid = ptid;
dc146f7c 1185 lp->core = -1;
d6b0e80f
AC
1186
1187 lp->next = lwp_list;
1188 lwp_list = lp;
d6b0e80f 1189
6e012a6c
PA
1190 /* Let the arch specific bits know about this new thread. Current
1191 clients of this callback take the opportunity to install
1192 watchpoints in the new thread. Don't do this for the first
1193 thread though. If we're spawning a child ("run"), the thread
1194 executes the shell wrapper first, and we shouldn't touch it until
1195 it execs the program we want to debug. For "attach", it'd be
1196 okay to call the callback, but it's not necessary, because
1197 watchpoints can't yet have been inserted into the inferior. */
1198 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
7b50312a 1199 linux_nat_new_thread (lp);
9f0bdab8 1200
d6b0e80f
AC
1201 return lp;
1202}
1203
1204/* Remove the LWP specified by PID from the list. */
1205
1206static void
1207delete_lwp (ptid_t ptid)
1208{
1209 struct lwp_info *lp, *lpprev;
1210
1211 lpprev = NULL;
1212
1213 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1214 if (ptid_equal (lp->ptid, ptid))
1215 break;
1216
1217 if (!lp)
1218 return;
1219
d6b0e80f
AC
1220 if (lpprev)
1221 lpprev->next = lp->next;
1222 else
1223 lwp_list = lp->next;
1224
7b50312a 1225 lwp_free (lp);
d6b0e80f
AC
1226}
1227
1228/* Return a pointer to the structure describing the LWP corresponding
1229 to PID. If no corresponding LWP could be found, return NULL. */
1230
1231static struct lwp_info *
1232find_lwp_pid (ptid_t ptid)
1233{
1234 struct lwp_info *lp;
1235 int lwp;
1236
1237 if (is_lwp (ptid))
1238 lwp = GET_LWP (ptid);
1239 else
1240 lwp = GET_PID (ptid);
1241
1242 for (lp = lwp_list; lp; lp = lp->next)
1243 if (lwp == GET_LWP (lp->ptid))
1244 return lp;
1245
1246 return NULL;
1247}
1248
1249/* Call CALLBACK with its second argument set to DATA for every LWP in
1250 the list. If CALLBACK returns 1 for a particular LWP, return a
1251 pointer to the structure describing that LWP immediately.
1252 Otherwise return NULL. */
1253
1254struct lwp_info *
d90e17a7
PA
1255iterate_over_lwps (ptid_t filter,
1256 int (*callback) (struct lwp_info *, void *),
1257 void *data)
d6b0e80f
AC
1258{
1259 struct lwp_info *lp, *lpnext;
1260
1261 for (lp = lwp_list; lp; lp = lpnext)
1262 {
1263 lpnext = lp->next;
d90e17a7
PA
1264
1265 if (ptid_match (lp->ptid, filter))
1266 {
1267 if ((*callback) (lp, data))
1268 return lp;
1269 }
d6b0e80f
AC
1270 }
1271
1272 return NULL;
1273}
1274
4403d8e9
JK
1275/* Iterate like iterate_over_lwps does except when forking-off a child call
1276 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1277
1278void
1279linux_nat_iterate_watchpoint_lwps
1280 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1281{
1282 int inferior_pid = ptid_get_pid (inferior_ptid);
1283 struct inferior *inf = current_inferior ();
1284
1285 if (inf->pid == inferior_pid)
1286 {
1287 /* Iterate all the threads of the current inferior. Without specifying
1288 INFERIOR_PID it would iterate all threads of all inferiors, which is
1289 inappropriate for watchpoints. */
1290
1291 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1292 }
1293 else
1294 {
1295 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1296
1297 struct lwp_info *child_lp;
1298 struct cleanup *old_chain;
1299 pid_t child_pid = GET_PID (inferior_ptid);
1300 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1301
1302 gdb_assert (!is_lwp (inferior_ptid));
1303 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1304 child_lp = add_lwp (child_ptid);
1305 child_lp->stopped = 1;
1306 child_lp->last_resume_kind = resume_stop;
1307 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1308
1309 callback (child_lp, callback_data);
1310
1311 do_cleanups (old_chain);
1312 }
1313}
1314
2277426b
PA
1315/* Update our internal state when changing from one checkpoint to
1316 another indicated by NEW_PTID. We can only switch single-threaded
1317 applications, so we only create one new LWP, and the previous list
1318 is discarded. */
f973ed9c
DJ
1319
1320void
1321linux_nat_switch_fork (ptid_t new_ptid)
1322{
1323 struct lwp_info *lp;
1324
2277426b
PA
1325 purge_lwp_list (GET_PID (inferior_ptid));
1326
f973ed9c
DJ
1327 lp = add_lwp (new_ptid);
1328 lp->stopped = 1;
e26af52f 1329
2277426b
PA
1330 /* This changes the thread's ptid while preserving the gdb thread
1331 num. Also changes the inferior pid, while preserving the
1332 inferior num. */
1333 thread_change_ptid (inferior_ptid, new_ptid);
1334
1335 /* We've just told GDB core that the thread changed target id, but,
1336 in fact, it really is a different thread, with different register
1337 contents. */
1338 registers_changed ();
e26af52f
DJ
1339}
1340
e26af52f
DJ
1341/* Handle the exit of a single thread LP. */
1342
1343static void
1344exit_lwp (struct lwp_info *lp)
1345{
e09875d4 1346 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1347
1348 if (th)
e26af52f 1349 {
17faa917
DJ
1350 if (print_thread_events)
1351 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1352
4f8d22e3 1353 delete_thread (lp->ptid);
e26af52f
DJ
1354 }
1355
1356 delete_lwp (lp->ptid);
1357}
1358
a0ef4274
DJ
1359/* Wait for the LWP specified by LP, which we have just attached to.
1360 Returns a wait status for that LWP, to cache. */
1361
1362static int
1363linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1364 int *signalled)
1365{
1366 pid_t new_pid, pid = GET_LWP (ptid);
1367 int status;
1368
644cebc9 1369 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
1370 {
1371 if (debug_linux_nat)
1372 fprintf_unfiltered (gdb_stdlog,
1373 "LNPAW: Attaching to a stopped process\n");
1374
1375 /* The process is definitely stopped. It is in a job control
1376 stop, unless the kernel predates the TASK_STOPPED /
1377 TASK_TRACED distinction, in which case it might be in a
1378 ptrace stop. Make sure it is in a ptrace stop; from there we
1379 can kill it, signal it, et cetera.
1380
1381 First make sure there is a pending SIGSTOP. Since we are
1382 already attached, the process can not transition from stopped
1383 to running without a PTRACE_CONT; so we know this signal will
1384 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1385 probably already in the queue (unless this kernel is old
1386 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1387 is not an RT signal, it can only be queued once. */
1388 kill_lwp (pid, SIGSTOP);
1389
1390 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1391 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1392 ptrace (PTRACE_CONT, pid, 0, 0);
1393 }
1394
1395 /* Make sure the initial process is stopped. The user-level threads
1396 layer might want to poke around in the inferior, and that won't
1397 work if things haven't stabilized yet. */
1398 new_pid = my_waitpid (pid, &status, 0);
1399 if (new_pid == -1 && errno == ECHILD)
1400 {
1401 if (first)
1402 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1403
1404 /* Try again with __WCLONE to check cloned processes. */
1405 new_pid = my_waitpid (pid, &status, __WCLONE);
1406 *cloned = 1;
1407 }
1408
dacc9cb2
PP
1409 gdb_assert (pid == new_pid);
1410
1411 if (!WIFSTOPPED (status))
1412 {
1413 /* The pid we tried to attach has apparently just exited. */
1414 if (debug_linux_nat)
1415 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1416 pid, status_to_str (status));
1417 return status;
1418 }
a0ef4274
DJ
1419
1420 if (WSTOPSIG (status) != SIGSTOP)
1421 {
1422 *signalled = 1;
1423 if (debug_linux_nat)
1424 fprintf_unfiltered (gdb_stdlog,
1425 "LNPAW: Received %s after attaching\n",
1426 status_to_str (status));
1427 }
1428
1429 return status;
1430}
1431
84636d28
PA
1432/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1433 the new LWP could not be attached, or 1 if we're already auto
1434 attached to this thread, but haven't processed the
1435 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1436 its existance, without considering it an error. */
d6b0e80f 1437
9ee57c33 1438int
93815fbf 1439lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1440{
9ee57c33 1441 struct lwp_info *lp;
7feb7d06 1442 sigset_t prev_mask;
84636d28 1443 int lwpid;
d6b0e80f
AC
1444
1445 gdb_assert (is_lwp (ptid));
1446
7feb7d06 1447 block_child_signals (&prev_mask);
d6b0e80f 1448
9ee57c33 1449 lp = find_lwp_pid (ptid);
84636d28 1450 lwpid = GET_LWP (ptid);
d6b0e80f
AC
1451
1452 /* We assume that we're already attached to any LWP that has an id
1453 equal to the overall process id, and to any LWP that is already
1454 in our list of LWPs. If we're not seeing exit events from threads
1455 and we've had PID wraparound since we last tried to stop all threads,
1456 this assumption might be wrong; fortunately, this is very unlikely
1457 to happen. */
84636d28 1458 if (lwpid != GET_PID (ptid) && lp == NULL)
d6b0e80f 1459 {
a0ef4274 1460 int status, cloned = 0, signalled = 0;
d6b0e80f 1461
84636d28 1462 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1463 {
84636d28
PA
1464 if (linux_supports_tracefork_flag)
1465 {
1466 /* If we haven't stopped all threads when we get here,
1467 we may have seen a thread listed in thread_db's list,
1468 but not processed the PTRACE_EVENT_CLONE yet. If
1469 that's the case, ignore this new thread, and let
1470 normal event handling discover it later. */
1471 if (in_pid_list_p (stopped_pids, lwpid))
1472 {
1473 /* We've already seen this thread stop, but we
1474 haven't seen the PTRACE_EVENT_CLONE extended
1475 event yet. */
1476 restore_child_signals_mask (&prev_mask);
1477 return 0;
1478 }
1479 else
1480 {
1481 int new_pid;
1482 int status;
1483
1484 /* See if we've got a stop for this new child
1485 pending. If so, we're already attached. */
1486 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1487 if (new_pid == -1 && errno == ECHILD)
1488 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1489 if (new_pid != -1)
1490 {
1491 if (WIFSTOPPED (status))
1492 add_to_pid_list (&stopped_pids, lwpid, status);
1493
1494 restore_child_signals_mask (&prev_mask);
1495 return 1;
1496 }
1497 }
1498 }
1499
9ee57c33
DJ
1500 /* If we fail to attach to the thread, issue a warning,
1501 but continue. One way this can happen is if thread
e9efe249 1502 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1503 bug may place threads in the thread list and then fail
1504 to create them. */
1505 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1506 safe_strerror (errno));
7feb7d06 1507 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1508 return -1;
1509 }
1510
d6b0e80f
AC
1511 if (debug_linux_nat)
1512 fprintf_unfiltered (gdb_stdlog,
1513 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1514 target_pid_to_str (ptid));
1515
a0ef4274 1516 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1517 if (!WIFSTOPPED (status))
673c2bbe
DE
1518 {
1519 restore_child_signals_mask (&prev_mask);
f687d035 1520 return 1;
673c2bbe 1521 }
dacc9cb2 1522
a0ef4274
DJ
1523 lp = add_lwp (ptid);
1524 lp->stopped = 1;
1525 lp->cloned = cloned;
1526 lp->signalled = signalled;
1527 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1528 {
a0ef4274
DJ
1529 lp->resumed = 1;
1530 lp->status = status;
d6b0e80f
AC
1531 }
1532
a0ef4274 1533 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1534
1535 if (debug_linux_nat)
1536 {
1537 fprintf_unfiltered (gdb_stdlog,
1538 "LLAL: waitpid %s received %s\n",
1539 target_pid_to_str (ptid),
1540 status_to_str (status));
1541 }
1542 }
1543 else
1544 {
1545 /* We assume that the LWP representing the original process is
1546 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1547 that the GNU/linux ptrace layer uses to keep track of
1548 threads. Note that this won't have already been done since
1549 the main thread will have, we assume, been stopped by an
1550 attach from a different layer. */
9ee57c33
DJ
1551 if (lp == NULL)
1552 lp = add_lwp (ptid);
d6b0e80f
AC
1553 lp->stopped = 1;
1554 }
9ee57c33 1555
25289eb2 1556 lp->last_resume_kind = resume_stop;
7feb7d06 1557 restore_child_signals_mask (&prev_mask);
9ee57c33 1558 return 0;
d6b0e80f
AC
1559}
1560
b84876c2 1561static void
136d6dae
VP
1562linux_nat_create_inferior (struct target_ops *ops,
1563 char *exec_file, char *allargs, char **env,
b84876c2
PA
1564 int from_tty)
1565{
10568435
JK
1566#ifdef HAVE_PERSONALITY
1567 int personality_orig = 0, personality_set = 0;
1568#endif /* HAVE_PERSONALITY */
b84876c2
PA
1569
1570 /* The fork_child mechanism is synchronous and calls target_wait, so
1571 we have to mask the async mode. */
1572
10568435
JK
1573#ifdef HAVE_PERSONALITY
1574 if (disable_randomization)
1575 {
1576 errno = 0;
1577 personality_orig = personality (0xffffffff);
1578 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1579 {
1580 personality_set = 1;
1581 personality (personality_orig | ADDR_NO_RANDOMIZE);
1582 }
1583 if (errno != 0 || (personality_set
1584 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1585 warning (_("Error disabling address space randomization: %s"),
1586 safe_strerror (errno));
1587 }
1588#endif /* HAVE_PERSONALITY */
1589
2455069d
UW
1590 /* Make sure we report all signals during startup. */
1591 linux_nat_pass_signals (0, NULL);
1592
136d6dae 1593 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1594
10568435
JK
1595#ifdef HAVE_PERSONALITY
1596 if (personality_set)
1597 {
1598 errno = 0;
1599 personality (personality_orig);
1600 if (errno != 0)
1601 warning (_("Error restoring address space randomization: %s"),
1602 safe_strerror (errno));
1603 }
1604#endif /* HAVE_PERSONALITY */
b84876c2
PA
1605}
1606
d6b0e80f 1607static void
136d6dae 1608linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1609{
1610 struct lwp_info *lp;
d6b0e80f 1611 int status;
af990527 1612 ptid_t ptid;
d6b0e80f 1613
2455069d
UW
1614 /* Make sure we report all signals during attach. */
1615 linux_nat_pass_signals (0, NULL);
1616
136d6dae 1617 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1618
af990527
PA
1619 /* The ptrace base target adds the main thread with (pid,0,0)
1620 format. Decorate it with lwp info. */
1621 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1622 thread_change_ptid (inferior_ptid, ptid);
1623
9f0bdab8 1624 /* Add the initial process as the first LWP to the list. */
af990527 1625 lp = add_lwp (ptid);
a0ef4274
DJ
1626
1627 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1628 &lp->signalled);
dacc9cb2
PP
1629 if (!WIFSTOPPED (status))
1630 {
1631 if (WIFEXITED (status))
1632 {
1633 int exit_code = WEXITSTATUS (status);
1634
1635 target_terminal_ours ();
1636 target_mourn_inferior ();
1637 if (exit_code == 0)
1638 error (_("Unable to attach: program exited normally."));
1639 else
1640 error (_("Unable to attach: program exited with code %d."),
1641 exit_code);
1642 }
1643 else if (WIFSIGNALED (status))
1644 {
1645 enum target_signal signo;
1646
1647 target_terminal_ours ();
1648 target_mourn_inferior ();
1649
1650 signo = target_signal_from_host (WTERMSIG (status));
1651 error (_("Unable to attach: program terminated with signal "
1652 "%s, %s."),
1653 target_signal_to_name (signo),
1654 target_signal_to_string (signo));
1655 }
1656
1657 internal_error (__FILE__, __LINE__,
1658 _("unexpected status %d for PID %ld"),
1659 status, (long) GET_LWP (ptid));
1660 }
1661
a0ef4274 1662 lp->stopped = 1;
9f0bdab8 1663
a0ef4274 1664 /* Save the wait status to report later. */
d6b0e80f 1665 lp->resumed = 1;
a0ef4274
DJ
1666 if (debug_linux_nat)
1667 fprintf_unfiltered (gdb_stdlog,
1668 "LNA: waitpid %ld, saving status %s\n",
1669 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1670
7feb7d06
PA
1671 lp->status = status;
1672
1673 if (target_can_async_p ())
1674 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1675}
1676
a0ef4274
DJ
1677/* Get pending status of LP. */
1678static int
1679get_pending_status (struct lwp_info *lp, int *status)
1680{
ca2163eb
PA
1681 enum target_signal signo = TARGET_SIGNAL_0;
1682
1683 /* If we paused threads momentarily, we may have stored pending
1684 events in lp->status or lp->waitstatus (see stop_wait_callback),
1685 and GDB core hasn't seen any signal for those threads.
1686 Otherwise, the last signal reported to the core is found in the
1687 thread object's stop_signal.
1688
1689 There's a corner case that isn't handled here at present. Only
1690 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1691 stop_signal make sense as a real signal to pass to the inferior.
1692 Some catchpoint related events, like
1693 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1694 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1695 those traps are debug API (ptrace in our case) related and
1696 induced; the inferior wouldn't see them if it wasn't being
1697 traced. Hence, we should never pass them to the inferior, even
1698 when set to pass state. Since this corner case isn't handled by
1699 infrun.c when proceeding with a signal, for consistency, neither
1700 do we handle it here (or elsewhere in the file we check for
1701 signal pass state). Normally SIGTRAP isn't set to pass state, so
1702 this is really a corner case. */
1703
1704 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1705 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1706 else if (lp->status)
1707 signo = target_signal_from_host (WSTOPSIG (lp->status));
1708 else if (non_stop && !is_executing (lp->ptid))
1709 {
1710 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1711
16c381f0 1712 signo = tp->suspend.stop_signal;
ca2163eb
PA
1713 }
1714 else if (!non_stop)
a0ef4274 1715 {
ca2163eb
PA
1716 struct target_waitstatus last;
1717 ptid_t last_ptid;
4c28f408 1718
ca2163eb 1719 get_last_target_status (&last_ptid, &last);
4c28f408 1720
ca2163eb
PA
1721 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1722 {
e09875d4 1723 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1724
16c381f0 1725 signo = tp->suspend.stop_signal;
4c28f408 1726 }
ca2163eb 1727 }
4c28f408 1728
ca2163eb 1729 *status = 0;
4c28f408 1730
ca2163eb
PA
1731 if (signo == TARGET_SIGNAL_0)
1732 {
1733 if (debug_linux_nat)
1734 fprintf_unfiltered (gdb_stdlog,
1735 "GPT: lwp %s has no pending signal\n",
1736 target_pid_to_str (lp->ptid));
1737 }
1738 else if (!signal_pass_state (signo))
1739 {
1740 if (debug_linux_nat)
3e43a32a
MS
1741 fprintf_unfiltered (gdb_stdlog,
1742 "GPT: lwp %s had signal %s, "
1743 "but it is in no pass state\n",
ca2163eb
PA
1744 target_pid_to_str (lp->ptid),
1745 target_signal_to_string (signo));
a0ef4274 1746 }
a0ef4274 1747 else
4c28f408 1748 {
ca2163eb
PA
1749 *status = W_STOPCODE (target_signal_to_host (signo));
1750
1751 if (debug_linux_nat)
1752 fprintf_unfiltered (gdb_stdlog,
1753 "GPT: lwp %s has pending signal %s\n",
1754 target_pid_to_str (lp->ptid),
1755 target_signal_to_string (signo));
4c28f408 1756 }
a0ef4274
DJ
1757
1758 return 0;
1759}
1760
d6b0e80f
AC
1761static int
1762detach_callback (struct lwp_info *lp, void *data)
1763{
1764 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1765
1766 if (debug_linux_nat && lp->status)
1767 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1768 strsignal (WSTOPSIG (lp->status)),
1769 target_pid_to_str (lp->ptid));
1770
a0ef4274
DJ
1771 /* If there is a pending SIGSTOP, get rid of it. */
1772 if (lp->signalled)
d6b0e80f 1773 {
d6b0e80f
AC
1774 if (debug_linux_nat)
1775 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1776 "DC: Sending SIGCONT to %s\n",
1777 target_pid_to_str (lp->ptid));
d6b0e80f 1778
a0ef4274 1779 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1780 lp->signalled = 0;
d6b0e80f
AC
1781 }
1782
1783 /* We don't actually detach from the LWP that has an id equal to the
1784 overall process id just yet. */
1785 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1786 {
a0ef4274
DJ
1787 int status = 0;
1788
1789 /* Pass on any pending signal for this LWP. */
1790 get_pending_status (lp, &status);
1791
7b50312a
PA
1792 if (linux_nat_prepare_to_resume != NULL)
1793 linux_nat_prepare_to_resume (lp);
d6b0e80f
AC
1794 errno = 0;
1795 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1796 WSTOPSIG (status)) < 0)
8a3fe4f8 1797 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1798 safe_strerror (errno));
1799
1800 if (debug_linux_nat)
1801 fprintf_unfiltered (gdb_stdlog,
1802 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1803 target_pid_to_str (lp->ptid),
7feb7d06 1804 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1805
1806 delete_lwp (lp->ptid);
1807 }
1808
1809 return 0;
1810}
1811
1812static void
136d6dae 1813linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1814{
b84876c2 1815 int pid;
a0ef4274 1816 int status;
d90e17a7
PA
1817 struct lwp_info *main_lwp;
1818
1819 pid = GET_PID (inferior_ptid);
a0ef4274 1820
b84876c2
PA
1821 if (target_can_async_p ())
1822 linux_nat_async (NULL, 0);
1823
4c28f408
PA
1824 /* Stop all threads before detaching. ptrace requires that the
1825 thread is stopped to sucessfully detach. */
d90e17a7 1826 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1827 /* ... and wait until all of them have reported back that
1828 they're no longer running. */
d90e17a7 1829 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1830
d90e17a7 1831 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1832
1833 /* Only the initial process should be left right now. */
d90e17a7
PA
1834 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1835
1836 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1837
a0ef4274
DJ
1838 /* Pass on any pending signal for the last LWP. */
1839 if ((args == NULL || *args == '\0')
d90e17a7 1840 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1841 && WIFSTOPPED (status))
1842 {
1843 /* Put the signal number in ARGS so that inf_ptrace_detach will
1844 pass it along with PTRACE_DETACH. */
1845 args = alloca (8);
1846 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1847 if (debug_linux_nat)
1848 fprintf_unfiltered (gdb_stdlog,
1849 "LND: Sending signal %s to %s\n",
1850 args,
1851 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1852 }
1853
7b50312a
PA
1854 if (linux_nat_prepare_to_resume != NULL)
1855 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1856 delete_lwp (main_lwp->ptid);
b84876c2 1857
7a7d3353
PA
1858 if (forks_exist_p ())
1859 {
1860 /* Multi-fork case. The current inferior_ptid is being detached
1861 from, but there are other viable forks to debug. Detach from
1862 the current fork, and context-switch to the first
1863 available. */
1864 linux_fork_detach (args, from_tty);
1865
1866 if (non_stop && target_can_async_p ())
1867 target_async (inferior_event_handler, 0);
1868 }
1869 else
1870 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1871}
1872
1873/* Resume LP. */
1874
25289eb2
PA
1875static void
1876resume_lwp (struct lwp_info *lp, int step)
d6b0e80f 1877{
25289eb2 1878 if (lp->stopped)
6c95b8df 1879 {
25289eb2
PA
1880 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1881
1882 if (inf->vfork_child != NULL)
1883 {
1884 if (debug_linux_nat)
1885 fprintf_unfiltered (gdb_stdlog,
1886 "RC: Not resuming %s (vfork parent)\n",
1887 target_pid_to_str (lp->ptid));
1888 }
1889 else if (lp->status == 0
1890 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1891 {
1892 if (debug_linux_nat)
1893 fprintf_unfiltered (gdb_stdlog,
1894 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1895 target_pid_to_str (lp->ptid));
1896
7b50312a
PA
1897 if (linux_nat_prepare_to_resume != NULL)
1898 linux_nat_prepare_to_resume (lp);
25289eb2
PA
1899 linux_ops->to_resume (linux_ops,
1900 pid_to_ptid (GET_LWP (lp->ptid)),
1901 step, TARGET_SIGNAL_0);
25289eb2
PA
1902 lp->stopped = 0;
1903 lp->step = step;
1904 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1905 lp->stopped_by_watchpoint = 0;
1906 }
1907 else
1908 {
1909 if (debug_linux_nat)
1910 fprintf_unfiltered (gdb_stdlog,
1911 "RC: Not resuming sibling %s (has pending)\n",
1912 target_pid_to_str (lp->ptid));
1913 }
6c95b8df 1914 }
25289eb2 1915 else
d6b0e80f 1916 {
d90e17a7
PA
1917 if (debug_linux_nat)
1918 fprintf_unfiltered (gdb_stdlog,
25289eb2 1919 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1920 target_pid_to_str (lp->ptid));
d6b0e80f 1921 }
25289eb2 1922}
d6b0e80f 1923
25289eb2
PA
1924static int
1925resume_callback (struct lwp_info *lp, void *data)
1926{
1927 resume_lwp (lp, 0);
d6b0e80f
AC
1928 return 0;
1929}
1930
1931static int
1932resume_clear_callback (struct lwp_info *lp, void *data)
1933{
1934 lp->resumed = 0;
25289eb2 1935 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1936 return 0;
1937}
1938
1939static int
1940resume_set_callback (struct lwp_info *lp, void *data)
1941{
1942 lp->resumed = 1;
25289eb2 1943 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1944 return 0;
1945}
1946
1947static void
28439f5e
PA
1948linux_nat_resume (struct target_ops *ops,
1949 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1950{
7feb7d06 1951 sigset_t prev_mask;
d6b0e80f 1952 struct lwp_info *lp;
d90e17a7 1953 int resume_many;
d6b0e80f 1954
76f50ad1
DJ
1955 if (debug_linux_nat)
1956 fprintf_unfiltered (gdb_stdlog,
1957 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1958 step ? "step" : "resume",
1959 target_pid_to_str (ptid),
423ec54c
JK
1960 (signo != TARGET_SIGNAL_0
1961 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1962 target_pid_to_str (inferior_ptid));
1963
7feb7d06 1964 block_child_signals (&prev_mask);
b84876c2 1965
d6b0e80f 1966 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1967 resume_many = (ptid_equal (minus_one_ptid, ptid)
1968 || ptid_is_pid (ptid));
4c28f408 1969
e3e9f5a2
PA
1970 /* Mark the lwps we're resuming as resumed. */
1971 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1972
d90e17a7
PA
1973 /* See if it's the current inferior that should be handled
1974 specially. */
1975 if (resume_many)
1976 lp = find_lwp_pid (inferior_ptid);
1977 else
1978 lp = find_lwp_pid (ptid);
9f0bdab8 1979 gdb_assert (lp != NULL);
d6b0e80f 1980
9f0bdab8
DJ
1981 /* Remember if we're stepping. */
1982 lp->step = step;
25289eb2 1983 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1984
9f0bdab8
DJ
1985 /* If we have a pending wait status for this thread, there is no
1986 point in resuming the process. But first make sure that
1987 linux_nat_wait won't preemptively handle the event - we
1988 should never take this short-circuit if we are going to
1989 leave LP running, since we have skipped resuming all the
1990 other threads. This bit of code needs to be synchronized
1991 with linux_nat_wait. */
76f50ad1 1992
9f0bdab8
DJ
1993 if (lp->status && WIFSTOPPED (lp->status))
1994 {
2455069d
UW
1995 if (!lp->step
1996 && WSTOPSIG (lp->status)
1997 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1998 {
9f0bdab8
DJ
1999 if (debug_linux_nat)
2000 fprintf_unfiltered (gdb_stdlog,
2001 "LLR: Not short circuiting for ignored "
2002 "status 0x%x\n", lp->status);
2003
d6b0e80f
AC
2004 /* FIXME: What should we do if we are supposed to continue
2005 this thread with a signal? */
2006 gdb_assert (signo == TARGET_SIGNAL_0);
2455069d 2007 signo = target_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
2008 lp->status = 0;
2009 }
2010 }
76f50ad1 2011
6c95b8df 2012 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
2013 {
2014 /* FIXME: What should we do if we are supposed to continue
2015 this thread with a signal? */
2016 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 2017
9f0bdab8
DJ
2018 if (debug_linux_nat)
2019 fprintf_unfiltered (gdb_stdlog,
2020 "LLR: Short circuiting for status 0x%x\n",
2021 lp->status);
d6b0e80f 2022
7feb7d06
PA
2023 restore_child_signals_mask (&prev_mask);
2024 if (target_can_async_p ())
2025 {
2026 target_async (inferior_event_handler, 0);
2027 /* Tell the event loop we have something to process. */
2028 async_file_mark ();
2029 }
9f0bdab8 2030 return;
d6b0e80f
AC
2031 }
2032
9f0bdab8
DJ
2033 /* Mark LWP as not stopped to prevent it from being continued by
2034 resume_callback. */
2035 lp->stopped = 0;
2036
d90e17a7
PA
2037 if (resume_many)
2038 iterate_over_lwps (ptid, resume_callback, NULL);
2039
2040 /* Convert to something the lower layer understands. */
2041 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 2042
7b50312a
PA
2043 if (linux_nat_prepare_to_resume != NULL)
2044 linux_nat_prepare_to_resume (lp);
28439f5e 2045 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 2046 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 2047 lp->stopped_by_watchpoint = 0;
9f0bdab8 2048
d6b0e80f
AC
2049 if (debug_linux_nat)
2050 fprintf_unfiltered (gdb_stdlog,
2051 "LLR: %s %s, %s (resume event thread)\n",
2052 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2053 target_pid_to_str (ptid),
423ec54c
JK
2054 (signo != TARGET_SIGNAL_0
2055 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 2056
7feb7d06 2057 restore_child_signals_mask (&prev_mask);
b84876c2 2058 if (target_can_async_p ())
8ea051c5 2059 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2060}
2061
c5f62d5f 2062/* Send a signal to an LWP. */
d6b0e80f
AC
2063
2064static int
2065kill_lwp (int lwpid, int signo)
2066{
c5f62d5f
DE
2067 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2068 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2069
2070#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2071 {
2072 static int tkill_failed;
2073
2074 if (!tkill_failed)
2075 {
2076 int ret;
2077
2078 errno = 0;
2079 ret = syscall (__NR_tkill, lwpid, signo);
2080 if (errno != ENOSYS)
2081 return ret;
2082 tkill_failed = 1;
2083 }
2084 }
d6b0e80f
AC
2085#endif
2086
2087 return kill (lwpid, signo);
2088}
2089
ca2163eb
PA
2090/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2091 event, check if the core is interested in it: if not, ignore the
2092 event, and keep waiting; otherwise, we need to toggle the LWP's
2093 syscall entry/exit status, since the ptrace event itself doesn't
2094 indicate it, and report the trap to higher layers. */
2095
2096static int
2097linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2098{
2099 struct target_waitstatus *ourstatus = &lp->waitstatus;
2100 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2101 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2102
2103 if (stopping)
2104 {
2105 /* If we're stopping threads, there's a SIGSTOP pending, which
2106 makes it so that the LWP reports an immediate syscall return,
2107 followed by the SIGSTOP. Skip seeing that "return" using
2108 PTRACE_CONT directly, and let stop_wait_callback collect the
2109 SIGSTOP. Later when the thread is resumed, a new syscall
2110 entry event. If we didn't do this (and returned 0), we'd
2111 leave a syscall entry pending, and our caller, by using
2112 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2113 itself. Later, when the user re-resumes this LWP, we'd see
2114 another syscall entry event and we'd mistake it for a return.
2115
2116 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2117 (leaving immediately with LWP->signalled set, without issuing
2118 a PTRACE_CONT), it would still be problematic to leave this
2119 syscall enter pending, as later when the thread is resumed,
2120 it would then see the same syscall exit mentioned above,
2121 followed by the delayed SIGSTOP, while the syscall didn't
2122 actually get to execute. It seems it would be even more
2123 confusing to the user. */
2124
2125 if (debug_linux_nat)
2126 fprintf_unfiltered (gdb_stdlog,
2127 "LHST: ignoring syscall %d "
2128 "for LWP %ld (stopping threads), "
2129 "resuming with PTRACE_CONT for SIGSTOP\n",
2130 syscall_number,
2131 GET_LWP (lp->ptid));
2132
2133 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2134 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2135 return 1;
2136 }
2137
2138 if (catch_syscall_enabled ())
2139 {
2140 /* Always update the entry/return state, even if this particular
2141 syscall isn't interesting to the core now. In async mode,
2142 the user could install a new catchpoint for this syscall
2143 between syscall enter/return, and we'll need to know to
2144 report a syscall return if that happens. */
2145 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2146 ? TARGET_WAITKIND_SYSCALL_RETURN
2147 : TARGET_WAITKIND_SYSCALL_ENTRY);
2148
2149 if (catching_syscall_number (syscall_number))
2150 {
2151 /* Alright, an event to report. */
2152 ourstatus->kind = lp->syscall_state;
2153 ourstatus->value.syscall_number = syscall_number;
2154
2155 if (debug_linux_nat)
2156 fprintf_unfiltered (gdb_stdlog,
2157 "LHST: stopping for %s of syscall %d"
2158 " for LWP %ld\n",
3e43a32a
MS
2159 lp->syscall_state
2160 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2161 ? "entry" : "return",
2162 syscall_number,
2163 GET_LWP (lp->ptid));
2164 return 0;
2165 }
2166
2167 if (debug_linux_nat)
2168 fprintf_unfiltered (gdb_stdlog,
2169 "LHST: ignoring %s of syscall %d "
2170 "for LWP %ld\n",
2171 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2172 ? "entry" : "return",
2173 syscall_number,
2174 GET_LWP (lp->ptid));
2175 }
2176 else
2177 {
2178 /* If we had been syscall tracing, and hence used PT_SYSCALL
2179 before on this LWP, it could happen that the user removes all
2180 syscall catchpoints before we get to process this event.
2181 There are two noteworthy issues here:
2182
2183 - When stopped at a syscall entry event, resuming with
2184 PT_STEP still resumes executing the syscall and reports a
2185 syscall return.
2186
2187 - Only PT_SYSCALL catches syscall enters. If we last
2188 single-stepped this thread, then this event can't be a
2189 syscall enter. If we last single-stepped this thread, this
2190 has to be a syscall exit.
2191
2192 The points above mean that the next resume, be it PT_STEP or
2193 PT_CONTINUE, can not trigger a syscall trace event. */
2194 if (debug_linux_nat)
2195 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2196 "LHST: caught syscall event "
2197 "with no syscall catchpoints."
ca2163eb
PA
2198 " %d for LWP %ld, ignoring\n",
2199 syscall_number,
2200 GET_LWP (lp->ptid));
2201 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2202 }
2203
2204 /* The core isn't interested in this event. For efficiency, avoid
2205 stopping all threads only to have the core resume them all again.
2206 Since we're not stopping threads, if we're still syscall tracing
2207 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2208 subsequent syscall. Simply resume using the inf-ptrace layer,
2209 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2210
2211 /* Note that gdbarch_get_syscall_number may access registers, hence
2212 fill a regcache. */
2213 registers_changed ();
7b50312a
PA
2214 if (linux_nat_prepare_to_resume != NULL)
2215 linux_nat_prepare_to_resume (lp);
ca2163eb
PA
2216 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2217 lp->step, TARGET_SIGNAL_0);
2218 return 1;
2219}
2220
3d799a95
DJ
2221/* Handle a GNU/Linux extended wait response. If we see a clone
2222 event, we need to add the new LWP to our list (and not report the
2223 trap to higher layers). This function returns non-zero if the
2224 event should be ignored and we should wait again. If STOPPING is
2225 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2226
2227static int
3d799a95
DJ
2228linux_handle_extended_wait (struct lwp_info *lp, int status,
2229 int stopping)
d6b0e80f 2230{
3d799a95
DJ
2231 int pid = GET_LWP (lp->ptid);
2232 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2233 int event = status >> 16;
d6b0e80f 2234
3d799a95
DJ
2235 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2236 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2237 {
3d799a95
DJ
2238 unsigned long new_pid;
2239 int ret;
2240
2241 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2242
3d799a95
DJ
2243 /* If we haven't already seen the new PID stop, wait for it now. */
2244 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2245 {
2246 /* The new child has a pending SIGSTOP. We can't affect it until it
2247 hits the SIGSTOP, but we're already attached. */
2248 ret = my_waitpid (new_pid, &status,
2249 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2250 if (ret == -1)
2251 perror_with_name (_("waiting for new child"));
2252 else if (ret != new_pid)
2253 internal_error (__FILE__, __LINE__,
2254 _("wait returned unexpected PID %d"), ret);
2255 else if (!WIFSTOPPED (status))
2256 internal_error (__FILE__, __LINE__,
2257 _("wait returned unexpected status 0x%x"), status);
2258 }
2259
3a3e9ee3 2260 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2261
2277426b
PA
2262 if (event == PTRACE_EVENT_FORK
2263 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2264 {
2277426b
PA
2265 /* Handle checkpointing by linux-fork.c here as a special
2266 case. We don't want the follow-fork-mode or 'catch fork'
2267 to interfere with this. */
2268
2269 /* This won't actually modify the breakpoint list, but will
2270 physically remove the breakpoints from the child. */
2271 detach_breakpoints (new_pid);
2272
2273 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2274 if (!find_fork_pid (new_pid))
2275 add_fork (new_pid);
2277426b
PA
2276
2277 /* Report as spurious, so that infrun doesn't want to follow
2278 this fork. We're actually doing an infcall in
2279 linux-fork.c. */
2280 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2281 linux_enable_event_reporting (pid_to_ptid (new_pid));
2282
2283 /* Report the stop to the core. */
2284 return 0;
2285 }
2286
3d799a95
DJ
2287 if (event == PTRACE_EVENT_FORK)
2288 ourstatus->kind = TARGET_WAITKIND_FORKED;
2289 else if (event == PTRACE_EVENT_VFORK)
2290 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2291 else
3d799a95 2292 {
78768c4a
JK
2293 struct lwp_info *new_lp;
2294
3d799a95 2295 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2296
3c4d7e12
PA
2297 if (debug_linux_nat)
2298 fprintf_unfiltered (gdb_stdlog,
2299 "LHEW: Got clone event "
2300 "from LWP %d, new child is LWP %ld\n",
2301 pid, new_pid);
2302
d90e17a7 2303 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2304 new_lp->cloned = 1;
4c28f408 2305 new_lp->stopped = 1;
d6b0e80f 2306
3d799a95
DJ
2307 if (WSTOPSIG (status) != SIGSTOP)
2308 {
2309 /* This can happen if someone starts sending signals to
2310 the new thread before it gets a chance to run, which
2311 have a lower number than SIGSTOP (e.g. SIGUSR1).
2312 This is an unlikely case, and harder to handle for
2313 fork / vfork than for clone, so we do not try - but
2314 we handle it for clone events here. We'll send
2315 the other signal on to the thread below. */
2316
2317 new_lp->signalled = 1;
2318 }
2319 else
79395f92
PA
2320 {
2321 struct thread_info *tp;
2322
2323 /* When we stop for an event in some other thread, and
2324 pull the thread list just as this thread has cloned,
2325 we'll have seen the new thread in the thread_db list
2326 before handling the CLONE event (glibc's
2327 pthread_create adds the new thread to the thread list
2328 before clone'ing, and has the kernel fill in the
2329 thread's tid on the clone call with
2330 CLONE_PARENT_SETTID). If that happened, and the core
2331 had requested the new thread to stop, we'll have
2332 killed it with SIGSTOP. But since SIGSTOP is not an
2333 RT signal, it can only be queued once. We need to be
2334 careful to not resume the LWP if we wanted it to
2335 stop. In that case, we'll leave the SIGSTOP pending.
2336 It will later be reported as TARGET_SIGNAL_0. */
2337 tp = find_thread_ptid (new_lp->ptid);
2338 if (tp != NULL && tp->stop_requested)
2339 new_lp->last_resume_kind = resume_stop;
2340 else
2341 status = 0;
2342 }
d6b0e80f 2343
4c28f408 2344 if (non_stop)
3d799a95 2345 {
4c28f408
PA
2346 /* Add the new thread to GDB's lists as soon as possible
2347 so that:
2348
2349 1) the frontend doesn't have to wait for a stop to
2350 display them, and,
2351
2352 2) we tag it with the correct running state. */
2353
2354 /* If the thread_db layer is active, let it know about
2355 this new thread, and add it to GDB's list. */
2356 if (!thread_db_attach_lwp (new_lp->ptid))
2357 {
2358 /* We're not using thread_db. Add it to GDB's
2359 list. */
2360 target_post_attach (GET_LWP (new_lp->ptid));
2361 add_thread (new_lp->ptid);
2362 }
2363
2364 if (!stopping)
2365 {
2366 set_running (new_lp->ptid, 1);
2367 set_executing (new_lp->ptid, 1);
e21ffe51
PA
2368 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2369 resume_stop. */
2370 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2371 }
2372 }
2373
79395f92
PA
2374 if (status != 0)
2375 {
2376 /* We created NEW_LP so it cannot yet contain STATUS. */
2377 gdb_assert (new_lp->status == 0);
2378
2379 /* Save the wait status to report later. */
2380 if (debug_linux_nat)
2381 fprintf_unfiltered (gdb_stdlog,
2382 "LHEW: waitpid of new LWP %ld, "
2383 "saving status %s\n",
2384 (long) GET_LWP (new_lp->ptid),
2385 status_to_str (status));
2386 new_lp->status = status;
2387 }
2388
ca2163eb
PA
2389 /* Note the need to use the low target ops to resume, to
2390 handle resuming with PT_SYSCALL if we have syscall
2391 catchpoints. */
4c28f408
PA
2392 if (!stopping)
2393 {
3d799a95 2394 new_lp->resumed = 1;
ca2163eb 2395
79395f92 2396 if (status == 0)
ad34eb2f 2397 {
e21ffe51 2398 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
2399 if (debug_linux_nat)
2400 fprintf_unfiltered (gdb_stdlog,
79395f92
PA
2401 "LHEW: resuming new LWP %ld\n",
2402 GET_LWP (new_lp->ptid));
7b50312a
PA
2403 if (linux_nat_prepare_to_resume != NULL)
2404 linux_nat_prepare_to_resume (new_lp);
79395f92
PA
2405 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2406 0, TARGET_SIGNAL_0);
2407 new_lp->stopped = 0;
ad34eb2f
JK
2408 }
2409 }
d6b0e80f 2410
3d799a95
DJ
2411 if (debug_linux_nat)
2412 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 2413 "LHEW: resuming parent LWP %d\n", pid);
7b50312a
PA
2414 if (linux_nat_prepare_to_resume != NULL)
2415 linux_nat_prepare_to_resume (lp);
ca2163eb
PA
2416 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2417 0, TARGET_SIGNAL_0);
3d799a95
DJ
2418
2419 return 1;
2420 }
2421
2422 return 0;
d6b0e80f
AC
2423 }
2424
3d799a95
DJ
2425 if (event == PTRACE_EVENT_EXEC)
2426 {
a75724bc
PA
2427 if (debug_linux_nat)
2428 fprintf_unfiltered (gdb_stdlog,
2429 "LHEW: Got exec event from LWP %ld\n",
2430 GET_LWP (lp->ptid));
2431
3d799a95
DJ
2432 ourstatus->kind = TARGET_WAITKIND_EXECD;
2433 ourstatus->value.execd_pathname
6d8fd2b7 2434 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2435
6c95b8df
PA
2436 return 0;
2437 }
2438
2439 if (event == PTRACE_EVENT_VFORK_DONE)
2440 {
2441 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2442 {
6c95b8df 2443 if (debug_linux_nat)
3e43a32a
MS
2444 fprintf_unfiltered (gdb_stdlog,
2445 "LHEW: Got expected PTRACE_EVENT_"
2446 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2447 GET_LWP (lp->ptid));
3d799a95 2448
6c95b8df
PA
2449 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2450 return 0;
3d799a95
DJ
2451 }
2452
6c95b8df 2453 if (debug_linux_nat)
3e43a32a
MS
2454 fprintf_unfiltered (gdb_stdlog,
2455 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2456 "from LWP %ld: resuming\n",
6c95b8df
PA
2457 GET_LWP (lp->ptid));
2458 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2459 return 1;
3d799a95
DJ
2460 }
2461
2462 internal_error (__FILE__, __LINE__,
2463 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2464}
2465
432b4d03
JK
2466/* Return non-zero if LWP is a zombie. */
2467
2468static int
2469linux_lwp_is_zombie (long lwp)
2470{
2471 char buffer[MAXPATHLEN];
2472 FILE *procfile;
ea23808b
PA
2473 int retval;
2474 int have_state;
432b4d03 2475
07e78767 2476 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
432b4d03
JK
2477 procfile = fopen (buffer, "r");
2478 if (procfile == NULL)
2479 {
2480 warning (_("unable to open /proc file '%s'"), buffer);
2481 return 0;
2482 }
ea23808b
PA
2483
2484 have_state = 0;
432b4d03 2485 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
ea23808b 2486 if (strncmp (buffer, "State:", 6) == 0)
432b4d03 2487 {
ea23808b 2488 have_state = 1;
432b4d03
JK
2489 break;
2490 }
ea23808b
PA
2491 retval = (have_state
2492 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
432b4d03 2493 fclose (procfile);
432b4d03
JK
2494 return retval;
2495}
2496
d6b0e80f
AC
2497/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2498 exited. */
2499
2500static int
2501wait_lwp (struct lwp_info *lp)
2502{
2503 pid_t pid;
432b4d03 2504 int status = 0;
d6b0e80f 2505 int thread_dead = 0;
432b4d03 2506 sigset_t prev_mask;
d6b0e80f
AC
2507
2508 gdb_assert (!lp->stopped);
2509 gdb_assert (lp->status == 0);
2510
432b4d03
JK
2511 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2512 block_child_signals (&prev_mask);
2513
2514 for (;;)
d6b0e80f 2515 {
432b4d03
JK
2516 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2517 was right and we should just call sigsuspend. */
2518
2519 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
d6b0e80f 2520 if (pid == -1 && errno == ECHILD)
432b4d03 2521 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2522 if (pid == -1 && errno == ECHILD)
2523 {
2524 /* The thread has previously exited. We need to delete it
2525 now because, for some vendor 2.4 kernels with NPTL
2526 support backported, there won't be an exit event unless
2527 it is the main thread. 2.6 kernels will report an exit
2528 event for each thread that exits, as expected. */
2529 thread_dead = 1;
2530 if (debug_linux_nat)
2531 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2532 target_pid_to_str (lp->ptid));
2533 }
432b4d03
JK
2534 if (pid != 0)
2535 break;
2536
2537 /* Bugs 10970, 12702.
2538 Thread group leader may have exited in which case we'll lock up in
2539 waitpid if there are other threads, even if they are all zombies too.
2540 Basically, we're not supposed to use waitpid this way.
2541 __WCLONE is not applicable for the leader so we can't use that.
2542 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2543 process; it gets ESRCH both for the zombie and for running processes.
2544
2545 As a workaround, check if we're waiting for the thread group leader and
2546 if it's a zombie, and avoid calling waitpid if it is.
2547
2548 This is racy, what if the tgl becomes a zombie right after we check?
2549 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2550 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2551
2552 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2553 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
d6b0e80f 2554 {
d6b0e80f
AC
2555 thread_dead = 1;
2556 if (debug_linux_nat)
432b4d03
JK
2557 fprintf_unfiltered (gdb_stdlog,
2558 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2559 target_pid_to_str (lp->ptid));
432b4d03 2560 break;
d6b0e80f 2561 }
432b4d03
JK
2562
2563 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2564 get invoked despite our caller had them intentionally blocked by
2565 block_child_signals. This is sensitive only to the loop of
2566 linux_nat_wait_1 and there if we get called my_waitpid gets called
2567 again before it gets to sigsuspend so we can safely let the handlers
2568 get executed here. */
2569
2570 sigsuspend (&suspend_mask);
2571 }
2572
2573 restore_child_signals_mask (&prev_mask);
2574
d6b0e80f
AC
2575 if (!thread_dead)
2576 {
2577 gdb_assert (pid == GET_LWP (lp->ptid));
2578
2579 if (debug_linux_nat)
2580 {
2581 fprintf_unfiltered (gdb_stdlog,
2582 "WL: waitpid %s received %s\n",
2583 target_pid_to_str (lp->ptid),
2584 status_to_str (status));
2585 }
d6b0e80f 2586
a9f4bb21
PA
2587 /* Check if the thread has exited. */
2588 if (WIFEXITED (status) || WIFSIGNALED (status))
2589 {
2590 thread_dead = 1;
2591 if (debug_linux_nat)
2592 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2593 target_pid_to_str (lp->ptid));
2594 }
d6b0e80f
AC
2595 }
2596
2597 if (thread_dead)
2598 {
e26af52f 2599 exit_lwp (lp);
d6b0e80f
AC
2600 return 0;
2601 }
2602
2603 gdb_assert (WIFSTOPPED (status));
2604
ca2163eb
PA
2605 /* Handle GNU/Linux's syscall SIGTRAPs. */
2606 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2607 {
2608 /* No longer need the sysgood bit. The ptrace event ends up
2609 recorded in lp->waitstatus if we care for it. We can carry
2610 on handling the event like a regular SIGTRAP from here
2611 on. */
2612 status = W_STOPCODE (SIGTRAP);
2613 if (linux_handle_syscall_trap (lp, 1))
2614 return wait_lwp (lp);
2615 }
2616
d6b0e80f
AC
2617 /* Handle GNU/Linux's extended waitstatus for trace events. */
2618 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2619 {
2620 if (debug_linux_nat)
2621 fprintf_unfiltered (gdb_stdlog,
2622 "WL: Handling extended status 0x%06x\n",
2623 status);
3d799a95 2624 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2625 return wait_lwp (lp);
2626 }
2627
2628 return status;
2629}
2630
9f0bdab8
DJ
2631/* Save the most recent siginfo for LP. This is currently only called
2632 for SIGTRAP; some ports use the si_addr field for
2633 target_stopped_data_address. In the future, it may also be used to
2634 restore the siginfo of requeued signals. */
2635
2636static void
2637save_siginfo (struct lwp_info *lp)
2638{
2639 errno = 0;
2640 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2641 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2642
2643 if (errno != 0)
2644 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2645}
2646
d6b0e80f
AC
2647/* Send a SIGSTOP to LP. */
2648
2649static int
2650stop_callback (struct lwp_info *lp, void *data)
2651{
2652 if (!lp->stopped && !lp->signalled)
2653 {
2654 int ret;
2655
2656 if (debug_linux_nat)
2657 {
2658 fprintf_unfiltered (gdb_stdlog,
2659 "SC: kill %s **<SIGSTOP>**\n",
2660 target_pid_to_str (lp->ptid));
2661 }
2662 errno = 0;
2663 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2664 if (debug_linux_nat)
2665 {
2666 fprintf_unfiltered (gdb_stdlog,
2667 "SC: lwp kill %d %s\n",
2668 ret,
2669 errno ? safe_strerror (errno) : "ERRNO-OK");
2670 }
2671
2672 lp->signalled = 1;
2673 gdb_assert (lp->status == 0);
2674 }
2675
2676 return 0;
2677}
2678
7b50312a
PA
2679/* Request a stop on LWP. */
2680
2681void
2682linux_stop_lwp (struct lwp_info *lwp)
2683{
2684 stop_callback (lwp, NULL);
2685}
2686
57380f4e 2687/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2688
2689static int
57380f4e
DJ
2690linux_nat_has_pending_sigint (int pid)
2691{
2692 sigset_t pending, blocked, ignored;
57380f4e
DJ
2693
2694 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2695
2696 if (sigismember (&pending, SIGINT)
2697 && !sigismember (&ignored, SIGINT))
2698 return 1;
2699
2700 return 0;
2701}
2702
2703/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2704
2705static int
2706set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2707{
57380f4e
DJ
2708 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2709 flag to consume the next one. */
2710 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2711 && WSTOPSIG (lp->status) == SIGINT)
2712 lp->status = 0;
2713 else
2714 lp->ignore_sigint = 1;
2715
2716 return 0;
2717}
2718
2719/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2720 This function is called after we know the LWP has stopped; if the LWP
2721 stopped before the expected SIGINT was delivered, then it will never have
2722 arrived. Also, if the signal was delivered to a shared queue and consumed
2723 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2724
57380f4e
DJ
2725static void
2726maybe_clear_ignore_sigint (struct lwp_info *lp)
2727{
2728 if (!lp->ignore_sigint)
2729 return;
2730
2731 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2732 {
2733 if (debug_linux_nat)
2734 fprintf_unfiltered (gdb_stdlog,
2735 "MCIS: Clearing bogus flag for %s\n",
2736 target_pid_to_str (lp->ptid));
2737 lp->ignore_sigint = 0;
2738 }
2739}
2740
ebec9a0f
PA
2741/* Fetch the possible triggered data watchpoint info and store it in
2742 LP.
2743
2744 On some archs, like x86, that use debug registers to set
2745 watchpoints, it's possible that the way to know which watched
2746 address trapped, is to check the register that is used to select
2747 which address to watch. Problem is, between setting the watchpoint
2748 and reading back which data address trapped, the user may change
2749 the set of watchpoints, and, as a consequence, GDB changes the
2750 debug registers in the inferior. To avoid reading back a stale
2751 stopped-data-address when that happens, we cache in LP the fact
2752 that a watchpoint trapped, and the corresponding data address, as
2753 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2754 registers meanwhile, we have the cached data we can rely on. */
2755
2756static void
2757save_sigtrap (struct lwp_info *lp)
2758{
2759 struct cleanup *old_chain;
2760
2761 if (linux_ops->to_stopped_by_watchpoint == NULL)
2762 {
2763 lp->stopped_by_watchpoint = 0;
2764 return;
2765 }
2766
2767 old_chain = save_inferior_ptid ();
2768 inferior_ptid = lp->ptid;
2769
2770 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2771
2772 if (lp->stopped_by_watchpoint)
2773 {
2774 if (linux_ops->to_stopped_data_address != NULL)
2775 lp->stopped_data_address_p =
2776 linux_ops->to_stopped_data_address (&current_target,
2777 &lp->stopped_data_address);
2778 else
2779 lp->stopped_data_address_p = 0;
2780 }
2781
2782 do_cleanups (old_chain);
2783}
2784
2785/* See save_sigtrap. */
2786
2787static int
2788linux_nat_stopped_by_watchpoint (void)
2789{
2790 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2791
2792 gdb_assert (lp != NULL);
2793
2794 return lp->stopped_by_watchpoint;
2795}
2796
2797static int
2798linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2799{
2800 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2801
2802 gdb_assert (lp != NULL);
2803
2804 *addr_p = lp->stopped_data_address;
2805
2806 return lp->stopped_data_address_p;
2807}
2808
26ab7092
JK
2809/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2810
2811static int
2812sigtrap_is_event (int status)
2813{
2814 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2815}
2816
2817/* SIGTRAP-like events recognizer. */
2818
2819static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2820
00390b84
JK
2821/* Check for SIGTRAP-like events in LP. */
2822
2823static int
2824linux_nat_lp_status_is_event (struct lwp_info *lp)
2825{
2826 /* We check for lp->waitstatus in addition to lp->status, because we can
2827 have pending process exits recorded in lp->status
2828 and W_EXITCODE(0,0) == 0. We should probably have an additional
2829 lp->status_p flag. */
2830
2831 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2832 && linux_nat_status_is_event (lp->status));
2833}
2834
26ab7092
JK
2835/* Set alternative SIGTRAP-like events recognizer. If
2836 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2837 applied. */
2838
2839void
2840linux_nat_set_status_is_event (struct target_ops *t,
2841 int (*status_is_event) (int status))
2842{
2843 linux_nat_status_is_event = status_is_event;
2844}
2845
57380f4e
DJ
2846/* Wait until LP is stopped. */
2847
2848static int
2849stop_wait_callback (struct lwp_info *lp, void *data)
2850{
6c95b8df
PA
2851 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2852
2853 /* If this is a vfork parent, bail out, it is not going to report
2854 any SIGSTOP until the vfork is done with. */
2855 if (inf->vfork_child != NULL)
2856 return 0;
2857
d6b0e80f
AC
2858 if (!lp->stopped)
2859 {
2860 int status;
2861
2862 status = wait_lwp (lp);
2863 if (status == 0)
2864 return 0;
2865
57380f4e
DJ
2866 if (lp->ignore_sigint && WIFSTOPPED (status)
2867 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2868 {
57380f4e 2869 lp->ignore_sigint = 0;
d6b0e80f
AC
2870
2871 errno = 0;
2872 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2873 if (debug_linux_nat)
2874 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2875 "PTRACE_CONT %s, 0, 0 (%s) "
2876 "(discarding SIGINT)\n",
d6b0e80f
AC
2877 target_pid_to_str (lp->ptid),
2878 errno ? safe_strerror (errno) : "OK");
2879
57380f4e 2880 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2881 }
2882
57380f4e
DJ
2883 maybe_clear_ignore_sigint (lp);
2884
d6b0e80f
AC
2885 if (WSTOPSIG (status) != SIGSTOP)
2886 {
26ab7092 2887 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2888 {
2889 /* If a LWP other than the LWP that we're reporting an
2890 event for has hit a GDB breakpoint (as opposed to
2891 some random trap signal), then just arrange for it to
2892 hit it again later. We don't keep the SIGTRAP status
2893 and don't forward the SIGTRAP signal to the LWP. We
2894 will handle the current event, eventually we will
2895 resume all LWPs, and this one will get its breakpoint
2896 trap again.
2897
2898 If we do not do this, then we run the risk that the
2899 user will delete or disable the breakpoint, but the
2900 thread will have already tripped on it. */
2901
9f0bdab8
DJ
2902 /* Save the trap's siginfo in case we need it later. */
2903 save_siginfo (lp);
2904
ebec9a0f
PA
2905 save_sigtrap (lp);
2906
1777feb0 2907 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2908 errno = 0;
2909 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2910 if (debug_linux_nat)
2911 {
2912 fprintf_unfiltered (gdb_stdlog,
2913 "PTRACE_CONT %s, 0, 0 (%s)\n",
2914 target_pid_to_str (lp->ptid),
2915 errno ? safe_strerror (errno) : "OK");
2916
2917 fprintf_unfiltered (gdb_stdlog,
2918 "SWC: Candidate SIGTRAP event in %s\n",
2919 target_pid_to_str (lp->ptid));
2920 }
710151dd 2921 /* Hold this event/waitstatus while we check to see if
1777feb0 2922 there are any more (we still want to get that SIGSTOP). */
57380f4e 2923 stop_wait_callback (lp, NULL);
710151dd 2924
7feb7d06
PA
2925 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2926 there's another event, throw it back into the
1777feb0 2927 queue. */
7feb7d06 2928 if (lp->status)
710151dd 2929 {
7feb7d06
PA
2930 if (debug_linux_nat)
2931 fprintf_unfiltered (gdb_stdlog,
2932 "SWC: kill %s, %s\n",
2933 target_pid_to_str (lp->ptid),
2934 status_to_str ((int) status));
2935 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2936 }
7feb7d06 2937
1777feb0 2938 /* Save the sigtrap event. */
7feb7d06 2939 lp->status = status;
d6b0e80f
AC
2940 return 0;
2941 }
2942 else
2943 {
2944 /* The thread was stopped with a signal other than
1777feb0 2945 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2946
2947 if (debug_linux_nat)
2948 {
2949 fprintf_unfiltered (gdb_stdlog,
2950 "SWC: Pending event %s in %s\n",
2951 status_to_str ((int) status),
2952 target_pid_to_str (lp->ptid));
2953 }
1777feb0 2954 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2955 errno = 0;
2956 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2957 if (debug_linux_nat)
2958 fprintf_unfiltered (gdb_stdlog,
2959 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2960 target_pid_to_str (lp->ptid),
2961 errno ? safe_strerror (errno) : "OK");
2962
2963 /* Hold this event/waitstatus while we check to see if
1777feb0 2964 there are any more (we still want to get that SIGSTOP). */
57380f4e 2965 stop_wait_callback (lp, NULL);
710151dd
PA
2966
2967 /* If the lp->status field is still empty, use it to
2968 hold this event. If not, then this event must be
2969 returned to the event queue of the LWP. */
7feb7d06 2970 if (lp->status)
d6b0e80f
AC
2971 {
2972 if (debug_linux_nat)
2973 {
2974 fprintf_unfiltered (gdb_stdlog,
2975 "SWC: kill %s, %s\n",
2976 target_pid_to_str (lp->ptid),
2977 status_to_str ((int) status));
2978 }
2979 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2980 }
710151dd
PA
2981 else
2982 lp->status = status;
d6b0e80f
AC
2983 return 0;
2984 }
2985 }
2986 else
2987 {
2988 /* We caught the SIGSTOP that we intended to catch, so
2989 there's no SIGSTOP pending. */
2990 lp->stopped = 1;
2991 lp->signalled = 0;
2992 }
2993 }
2994
2995 return 0;
2996}
2997
d6b0e80f
AC
2998/* Return non-zero if LP has a wait status pending. */
2999
3000static int
3001status_callback (struct lwp_info *lp, void *data)
3002{
3003 /* Only report a pending wait status if we pretend that this has
3004 indeed been resumed. */
ca2163eb
PA
3005 if (!lp->resumed)
3006 return 0;
3007
3008 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3009 {
3010 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 3011 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
3012 0', so a clean process exit can not be stored pending in
3013 lp->status, it is indistinguishable from
3014 no-pending-status. */
3015 return 1;
3016 }
3017
3018 if (lp->status != 0)
3019 return 1;
3020
3021 return 0;
d6b0e80f
AC
3022}
3023
3024/* Return non-zero if LP isn't stopped. */
3025
3026static int
3027running_callback (struct lwp_info *lp, void *data)
3028{
25289eb2
PA
3029 return (!lp->stopped
3030 || ((lp->status != 0
3031 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3032 && lp->resumed));
d6b0e80f
AC
3033}
3034
3035/* Count the LWP's that have had events. */
3036
3037static int
3038count_events_callback (struct lwp_info *lp, void *data)
3039{
3040 int *count = data;
3041
3042 gdb_assert (count != NULL);
3043
e09490f1 3044 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 3045 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
3046 (*count)++;
3047
3048 return 0;
3049}
3050
3051/* Select the LWP (if any) that is currently being single-stepped. */
3052
3053static int
3054select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3055{
25289eb2
PA
3056 if (lp->last_resume_kind == resume_step
3057 && lp->status != 0)
d6b0e80f
AC
3058 return 1;
3059 else
3060 return 0;
3061}
3062
3063/* Select the Nth LWP that has had a SIGTRAP event. */
3064
3065static int
3066select_event_lwp_callback (struct lwp_info *lp, void *data)
3067{
3068 int *selector = data;
3069
3070 gdb_assert (selector != NULL);
3071
1777feb0 3072 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 3073 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
3074 if ((*selector)-- == 0)
3075 return 1;
3076
3077 return 0;
3078}
3079
710151dd
PA
3080static int
3081cancel_breakpoint (struct lwp_info *lp)
3082{
3083 /* Arrange for a breakpoint to be hit again later. We don't keep
3084 the SIGTRAP status and don't forward the SIGTRAP signal to the
3085 LWP. We will handle the current event, eventually we will resume
3086 this LWP, and this breakpoint will trap again.
3087
3088 If we do not do this, then we run the risk that the user will
3089 delete or disable the breakpoint, but the LWP will have already
3090 tripped on it. */
3091
515630c5
UW
3092 struct regcache *regcache = get_thread_regcache (lp->ptid);
3093 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3094 CORE_ADDR pc;
3095
3096 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 3097 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
3098 {
3099 if (debug_linux_nat)
3100 fprintf_unfiltered (gdb_stdlog,
3101 "CB: Push back breakpoint for %s\n",
3102 target_pid_to_str (lp->ptid));
3103
3104 /* Back up the PC if necessary. */
515630c5
UW
3105 if (gdbarch_decr_pc_after_break (gdbarch))
3106 regcache_write_pc (regcache, pc);
3107
710151dd
PA
3108 return 1;
3109 }
3110 return 0;
3111}
3112
d6b0e80f
AC
3113static int
3114cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3115{
3116 struct lwp_info *event_lp = data;
3117
3118 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3119 if (lp == event_lp)
3120 return 0;
3121
3122 /* If a LWP other than the LWP that we're reporting an event for has
3123 hit a GDB breakpoint (as opposed to some random trap signal),
3124 then just arrange for it to hit it again later. We don't keep
3125 the SIGTRAP status and don't forward the SIGTRAP signal to the
3126 LWP. We will handle the current event, eventually we will resume
3127 all LWPs, and this one will get its breakpoint trap again.
3128
3129 If we do not do this, then we run the risk that the user will
3130 delete or disable the breakpoint, but the LWP will have already
3131 tripped on it. */
3132
00390b84 3133 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
3134 && cancel_breakpoint (lp))
3135 /* Throw away the SIGTRAP. */
3136 lp->status = 0;
d6b0e80f
AC
3137
3138 return 0;
3139}
3140
3141/* Select one LWP out of those that have events pending. */
3142
3143static void
d90e17a7 3144select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
3145{
3146 int num_events = 0;
3147 int random_selector;
3148 struct lwp_info *event_lp;
3149
ac264b3b 3150 /* Record the wait status for the original LWP. */
d6b0e80f
AC
3151 (*orig_lp)->status = *status;
3152
3153 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
3154 event_lp = iterate_over_lwps (filter,
3155 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
3156 if (event_lp != NULL)
3157 {
3158 if (debug_linux_nat)
3159 fprintf_unfiltered (gdb_stdlog,
3160 "SEL: Select single-step %s\n",
3161 target_pid_to_str (event_lp->ptid));
3162 }
3163 else
3164 {
3165 /* No single-stepping LWP. Select one at random, out of those
3166 which have had SIGTRAP events. */
3167
3168 /* First see how many SIGTRAP events we have. */
d90e17a7 3169 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
3170
3171 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3172 random_selector = (int)
3173 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3174
3175 if (debug_linux_nat && num_events > 1)
3176 fprintf_unfiltered (gdb_stdlog,
3177 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3178 num_events, random_selector);
3179
d90e17a7
PA
3180 event_lp = iterate_over_lwps (filter,
3181 select_event_lwp_callback,
d6b0e80f
AC
3182 &random_selector);
3183 }
3184
3185 if (event_lp != NULL)
3186 {
3187 /* Switch the event LWP. */
3188 *orig_lp = event_lp;
3189 *status = event_lp->status;
3190 }
3191
3192 /* Flush the wait status for the event LWP. */
3193 (*orig_lp)->status = 0;
3194}
3195
3196/* Return non-zero if LP has been resumed. */
3197
3198static int
3199resumed_callback (struct lwp_info *lp, void *data)
3200{
3201 return lp->resumed;
3202}
3203
12d9289a
PA
3204/* Stop an active thread, verify it still exists, then resume it. If
3205 the thread ends up with a pending status, then it is not resumed,
3206 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
3207
3208static int
3209stop_and_resume_callback (struct lwp_info *lp, void *data)
3210{
12d9289a
PA
3211 int *new_pending_p = data;
3212
25289eb2 3213 if (!lp->stopped)
d6b0e80f 3214 {
25289eb2
PA
3215 ptid_t ptid = lp->ptid;
3216
d6b0e80f
AC
3217 stop_callback (lp, NULL);
3218 stop_wait_callback (lp, NULL);
25289eb2
PA
3219
3220 /* Resume if the lwp still exists, and the core wanted it
3221 running. */
12d9289a
PA
3222 lp = find_lwp_pid (ptid);
3223 if (lp != NULL)
25289eb2 3224 {
12d9289a
PA
3225 if (lp->last_resume_kind == resume_stop
3226 && lp->status == 0)
3227 {
3228 /* The core wanted the LWP to stop. Even if it stopped
3229 cleanly (with SIGSTOP), leave the event pending. */
3230 if (debug_linux_nat)
3231 fprintf_unfiltered (gdb_stdlog,
3232 "SARC: core wanted LWP %ld stopped "
3233 "(leaving SIGSTOP pending)\n",
3234 GET_LWP (lp->ptid));
3235 lp->status = W_STOPCODE (SIGSTOP);
3236 }
3237
3238 if (lp->status == 0)
3239 {
3240 if (debug_linux_nat)
3241 fprintf_unfiltered (gdb_stdlog,
3242 "SARC: re-resuming LWP %ld\n",
3243 GET_LWP (lp->ptid));
3244 resume_lwp (lp, lp->step);
3245 }
3246 else
3247 {
3248 if (debug_linux_nat)
3249 fprintf_unfiltered (gdb_stdlog,
3250 "SARC: not re-resuming LWP %ld "
3251 "(has pending)\n",
3252 GET_LWP (lp->ptid));
3253 if (new_pending_p)
3254 *new_pending_p = 1;
3255 }
25289eb2 3256 }
d6b0e80f
AC
3257 }
3258 return 0;
3259}
3260
02f3fc28 3261/* Check if we should go on and pass this event to common code.
12d9289a
PA
3262 Return the affected lwp if we are, or NULL otherwise. If we stop
3263 all lwps temporarily, we may end up with new pending events in some
3264 other lwp. In that case set *NEW_PENDING_P to true. */
3265
02f3fc28 3266static struct lwp_info *
0e5bf2a8 3267linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
3268{
3269 struct lwp_info *lp;
3270
12d9289a
PA
3271 *new_pending_p = 0;
3272
02f3fc28
PA
3273 lp = find_lwp_pid (pid_to_ptid (lwpid));
3274
3275 /* Check for stop events reported by a process we didn't already
3276 know about - anything not already in our LWP list.
3277
3278 If we're expecting to receive stopped processes after
3279 fork, vfork, and clone events, then we'll just add the
3280 new one to our list and go back to waiting for the event
3281 to be reported - the stopped process might be returned
0e5bf2a8
PA
3282 from waitpid before or after the event is.
3283
3284 But note the case of a non-leader thread exec'ing after the
3285 leader having exited, and gone from our lists. The non-leader
3286 thread changes its tid to the tgid. */
3287
3288 if (WIFSTOPPED (status) && lp == NULL
3289 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3290 {
3291 /* A multi-thread exec after we had seen the leader exiting. */
3292 if (debug_linux_nat)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "LLW: Re-adding thread group leader LWP %d.\n",
3295 lwpid);
3296
3297 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3298 lp->stopped = 1;
3299 lp->resumed = 1;
3300 add_thread (lp->ptid);
3301 }
3302
02f3fc28
PA
3303 if (WIFSTOPPED (status) && !lp)
3304 {
84636d28 3305 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3306 return NULL;
3307 }
3308
3309 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3310 our list, i.e. not part of the current process. This can happen
fd62cb89 3311 if we detach from a program we originally forked and then it
02f3fc28
PA
3312 exits. */
3313 if (!WIFSTOPPED (status) && !lp)
3314 return NULL;
3315
ca2163eb
PA
3316 /* Handle GNU/Linux's syscall SIGTRAPs. */
3317 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3318 {
3319 /* No longer need the sysgood bit. The ptrace event ends up
3320 recorded in lp->waitstatus if we care for it. We can carry
3321 on handling the event like a regular SIGTRAP from here
3322 on. */
3323 status = W_STOPCODE (SIGTRAP);
3324 if (linux_handle_syscall_trap (lp, 0))
3325 return NULL;
3326 }
02f3fc28 3327
ca2163eb
PA
3328 /* Handle GNU/Linux's extended waitstatus for trace events. */
3329 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3330 {
3331 if (debug_linux_nat)
3332 fprintf_unfiltered (gdb_stdlog,
3333 "LLW: Handling extended status 0x%06x\n",
3334 status);
3335 if (linux_handle_extended_wait (lp, status, 0))
3336 return NULL;
3337 }
3338
26ab7092 3339 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3340 {
3341 /* Save the trap's siginfo in case we need it later. */
3342 save_siginfo (lp);
3343
3344 save_sigtrap (lp);
3345 }
ca2163eb 3346
02f3fc28 3347 /* Check if the thread has exited. */
d90e17a7
PA
3348 if ((WIFEXITED (status) || WIFSIGNALED (status))
3349 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3350 {
9db03742
JB
3351 /* If this is the main thread, we must stop all threads and verify
3352 if they are still alive. This is because in the nptl thread model
3353 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3354 other than the main thread. We only get the main thread exit
3355 signal once all child threads have already exited. If we
3356 stop all the threads and use the stop_wait_callback to check
3357 if they have exited we can determine whether this signal
3358 should be ignored or whether it means the end of the debugged
3359 application, regardless of which threading model is being
5d3b6af6 3360 used. */
02f3fc28
PA
3361 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3362 {
3363 lp->stopped = 1;
d90e17a7 3364 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
12d9289a 3365 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
3366 }
3367
3368 if (debug_linux_nat)
3369 fprintf_unfiltered (gdb_stdlog,
3370 "LLW: %s exited.\n",
3371 target_pid_to_str (lp->ptid));
3372
d90e17a7 3373 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3374 {
3375 /* If there is at least one more LWP, then the exit signal
3376 was not the end of the debugged application and should be
3377 ignored. */
3378 exit_lwp (lp);
3379 return NULL;
3380 }
02f3fc28
PA
3381 }
3382
3383 /* Check if the current LWP has previously exited. In the nptl
3384 thread model, LWPs other than the main thread do not issue
3385 signals when they exit so we must check whenever the thread has
3386 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3387 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3388 {
d90e17a7
PA
3389 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3390
02f3fc28
PA
3391 if (debug_linux_nat)
3392 fprintf_unfiltered (gdb_stdlog,
3393 "LLW: %s exited.\n",
3394 target_pid_to_str (lp->ptid));
3395
3396 exit_lwp (lp);
3397
3398 /* Make sure there is at least one thread running. */
d90e17a7 3399 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3400
3401 /* Discard the event. */
3402 return NULL;
3403 }
3404
3405 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3406 an attempt to stop an LWP. */
3407 if (lp->signalled
3408 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3409 {
3410 if (debug_linux_nat)
3411 fprintf_unfiltered (gdb_stdlog,
3412 "LLW: Delayed SIGSTOP caught for %s.\n",
3413 target_pid_to_str (lp->ptid));
3414
02f3fc28
PA
3415 lp->signalled = 0;
3416
25289eb2
PA
3417 if (lp->last_resume_kind != resume_stop)
3418 {
3419 /* This is a delayed SIGSTOP. */
02f3fc28 3420
25289eb2
PA
3421 registers_changed ();
3422
7b50312a
PA
3423 if (linux_nat_prepare_to_resume != NULL)
3424 linux_nat_prepare_to_resume (lp);
25289eb2 3425 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28 3426 lp->step, TARGET_SIGNAL_0);
25289eb2
PA
3427 if (debug_linux_nat)
3428 fprintf_unfiltered (gdb_stdlog,
3429 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3430 lp->step ?
3431 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3432 target_pid_to_str (lp->ptid));
02f3fc28 3433
25289eb2
PA
3434 lp->stopped = 0;
3435 gdb_assert (lp->resumed);
02f3fc28 3436
25289eb2
PA
3437 /* Discard the event. */
3438 return NULL;
3439 }
02f3fc28
PA
3440 }
3441
57380f4e
DJ
3442 /* Make sure we don't report a SIGINT that we have already displayed
3443 for another thread. */
3444 if (lp->ignore_sigint
3445 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3446 {
3447 if (debug_linux_nat)
3448 fprintf_unfiltered (gdb_stdlog,
3449 "LLW: Delayed SIGINT caught for %s.\n",
3450 target_pid_to_str (lp->ptid));
3451
3452 /* This is a delayed SIGINT. */
3453 lp->ignore_sigint = 0;
3454
3455 registers_changed ();
7b50312a
PA
3456 if (linux_nat_prepare_to_resume != NULL)
3457 linux_nat_prepare_to_resume (lp);
28439f5e 3458 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3459 lp->step, TARGET_SIGNAL_0);
3460 if (debug_linux_nat)
3461 fprintf_unfiltered (gdb_stdlog,
3462 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3463 lp->step ?
3464 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3465 target_pid_to_str (lp->ptid));
3466
3467 lp->stopped = 0;
3468 gdb_assert (lp->resumed);
3469
3470 /* Discard the event. */
3471 return NULL;
3472 }
3473
02f3fc28
PA
3474 /* An interesting event. */
3475 gdb_assert (lp);
ca2163eb 3476 lp->status = status;
02f3fc28
PA
3477 return lp;
3478}
3479
0e5bf2a8
PA
3480/* Detect zombie thread group leaders, and "exit" them. We can't reap
3481 their exits until all other threads in the group have exited. */
3482
3483static void
3484check_zombie_leaders (void)
3485{
3486 struct inferior *inf;
3487
3488 ALL_INFERIORS (inf)
3489 {
3490 struct lwp_info *leader_lp;
3491
3492 if (inf->pid == 0)
3493 continue;
3494
3495 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3496 if (leader_lp != NULL
3497 /* Check if there are other threads in the group, as we may
3498 have raced with the inferior simply exiting. */
3499 && num_lwps (inf->pid) > 1
3500 && linux_lwp_is_zombie (inf->pid))
3501 {
3502 if (debug_linux_nat)
3503 fprintf_unfiltered (gdb_stdlog,
3504 "CZL: Thread group leader %d zombie "
3505 "(it exited, or another thread execd).\n",
3506 inf->pid);
3507
3508 /* A leader zombie can mean one of two things:
3509
3510 - It exited, and there's an exit status pending
3511 available, or only the leader exited (not the whole
3512 program). In the latter case, we can't waitpid the
3513 leader's exit status until all other threads are gone.
3514
3515 - There are 3 or more threads in the group, and a thread
3516 other than the leader exec'd. On an exec, the Linux
3517 kernel destroys all other threads (except the execing
3518 one) in the thread group, and resets the execing thread's
3519 tid to the tgid. No exit notification is sent for the
3520 execing thread -- from the ptracer's perspective, it
3521 appears as though the execing thread just vanishes.
3522 Until we reap all other threads except the leader and the
3523 execing thread, the leader will be zombie, and the
3524 execing thread will be in `D (disc sleep)'. As soon as
3525 all other threads are reaped, the execing thread changes
3526 it's tid to the tgid, and the previous (zombie) leader
3527 vanishes, giving place to the "new" leader. We could try
3528 distinguishing the exit and exec cases, by waiting once
3529 more, and seeing if something comes out, but it doesn't
3530 sound useful. The previous leader _does_ go away, and
3531 we'll re-add the new one once we see the exec event
3532 (which is just the same as what would happen if the
3533 previous leader did exit voluntarily before some other
3534 thread execs). */
3535
3536 if (debug_linux_nat)
3537 fprintf_unfiltered (gdb_stdlog,
3538 "CZL: Thread group leader %d vanished.\n",
3539 inf->pid);
3540 exit_lwp (leader_lp);
3541 }
3542 }
3543}
3544
d6b0e80f 3545static ptid_t
7feb7d06 3546linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3547 ptid_t ptid, struct target_waitstatus *ourstatus,
3548 int target_options)
d6b0e80f 3549{
7feb7d06 3550 static sigset_t prev_mask;
4b60df3d 3551 enum resume_kind last_resume_kind;
12d9289a 3552 struct lwp_info *lp;
12d9289a 3553 int status;
d6b0e80f 3554
01124a23 3555 if (debug_linux_nat)
b84876c2
PA
3556 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3557
f973ed9c
DJ
3558 /* The first time we get here after starting a new inferior, we may
3559 not have added it to the LWP list yet - this is the earliest
3560 moment at which we know its PID. */
d90e17a7 3561 if (ptid_is_pid (inferior_ptid))
f973ed9c 3562 {
27c9d204
PA
3563 /* Upgrade the main thread's ptid. */
3564 thread_change_ptid (inferior_ptid,
3565 BUILD_LWP (GET_PID (inferior_ptid),
3566 GET_PID (inferior_ptid)));
3567
f973ed9c
DJ
3568 lp = add_lwp (inferior_ptid);
3569 lp->resumed = 1;
3570 }
3571
7feb7d06
PA
3572 /* Make sure SIGCHLD is blocked. */
3573 block_child_signals (&prev_mask);
d6b0e80f
AC
3574
3575retry:
d90e17a7
PA
3576 lp = NULL;
3577 status = 0;
d6b0e80f
AC
3578
3579 /* First check if there is a LWP with a wait status pending. */
0e5bf2a8 3580 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d6b0e80f 3581 {
0e5bf2a8 3582 /* Any LWP in the PTID group that's been resumed will do. */
d90e17a7 3583 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3584 if (lp)
3585 {
ca2163eb 3586 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3587 fprintf_unfiltered (gdb_stdlog,
3588 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3589 status_to_str (lp->status),
d6b0e80f
AC
3590 target_pid_to_str (lp->ptid));
3591 }
d6b0e80f
AC
3592 }
3593 else if (is_lwp (ptid))
3594 {
3595 if (debug_linux_nat)
3596 fprintf_unfiltered (gdb_stdlog,
3597 "LLW: Waiting for specific LWP %s.\n",
3598 target_pid_to_str (ptid));
3599
3600 /* We have a specific LWP to check. */
3601 lp = find_lwp_pid (ptid);
3602 gdb_assert (lp);
d6b0e80f 3603
ca2163eb 3604 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3605 fprintf_unfiltered (gdb_stdlog,
3606 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3607 status_to_str (lp->status),
d6b0e80f
AC
3608 target_pid_to_str (lp->ptid));
3609
d90e17a7
PA
3610 /* We check for lp->waitstatus in addition to lp->status,
3611 because we can have pending process exits recorded in
3612 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3613 an additional lp->status_p flag. */
ca2163eb 3614 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3615 lp = NULL;
d6b0e80f
AC
3616 }
3617
25289eb2 3618 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
d6b0e80f
AC
3619 {
3620 /* A pending SIGSTOP may interfere with the normal stream of
3621 events. In a typical case where interference is a problem,
3622 we have a SIGSTOP signal pending for LWP A while
3623 single-stepping it, encounter an event in LWP B, and take the
3624 pending SIGSTOP while trying to stop LWP A. After processing
3625 the event in LWP B, LWP A is continued, and we'll never see
3626 the SIGTRAP associated with the last time we were
3627 single-stepping LWP A. */
3628
3629 /* Resume the thread. It should halt immediately returning the
3630 pending SIGSTOP. */
3631 registers_changed ();
7b50312a
PA
3632 if (linux_nat_prepare_to_resume != NULL)
3633 linux_nat_prepare_to_resume (lp);
28439f5e 3634 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3635 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3636 if (debug_linux_nat)
3637 fprintf_unfiltered (gdb_stdlog,
3638 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3639 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3640 target_pid_to_str (lp->ptid));
3641 lp->stopped = 0;
3642 gdb_assert (lp->resumed);
3643
ca2163eb
PA
3644 /* Catch the pending SIGSTOP. */
3645 status = lp->status;
3646 lp->status = 0;
3647
d6b0e80f 3648 stop_wait_callback (lp, NULL);
ca2163eb
PA
3649
3650 /* If the lp->status field isn't empty, we caught another signal
3651 while flushing the SIGSTOP. Return it back to the event
3652 queue of the LWP, as we already have an event to handle. */
3653 if (lp->status)
3654 {
3655 if (debug_linux_nat)
3656 fprintf_unfiltered (gdb_stdlog,
3657 "LLW: kill %s, %s\n",
3658 target_pid_to_str (lp->ptid),
3659 status_to_str (lp->status));
3660 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3661 }
3662
3663 lp->status = status;
d6b0e80f
AC
3664 }
3665
b84876c2
PA
3666 if (!target_can_async_p ())
3667 {
3668 /* Causes SIGINT to be passed on to the attached process. */
3669 set_sigint_trap ();
b84876c2 3670 }
d6b0e80f 3671
0e5bf2a8 3672 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3673
d90e17a7 3674 while (lp == NULL)
d6b0e80f
AC
3675 {
3676 pid_t lwpid;
3677
0e5bf2a8
PA
3678 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3679 quirks:
3680
3681 - If the thread group leader exits while other threads in the
3682 thread group still exist, waitpid(TGID, ...) hangs. That
3683 waitpid won't return an exit status until the other threads
3684 in the group are reapped.
3685
3686 - When a non-leader thread execs, that thread just vanishes
3687 without reporting an exit (so we'd hang if we waited for it
3688 explicitly in that case). The exec event is reported to
3689 the TGID pid. */
3690
3691 errno = 0;
3692 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3693 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3694 lwpid = my_waitpid (-1, &status, WNOHANG);
3695
3696 if (debug_linux_nat)
3697 fprintf_unfiltered (gdb_stdlog,
3698 "LNW: waitpid(-1, ...) returned %d, %s\n",
3699 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3700
d6b0e80f
AC
3701 if (lwpid > 0)
3702 {
12d9289a
PA
3703 /* If this is true, then we paused LWPs momentarily, and may
3704 now have pending events to handle. */
3705 int new_pending;
3706
d6b0e80f
AC
3707 if (debug_linux_nat)
3708 {
3709 fprintf_unfiltered (gdb_stdlog,
3710 "LLW: waitpid %ld received %s\n",
3711 (long) lwpid, status_to_str (status));
3712 }
3713
0e5bf2a8 3714 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3715
33355866
JK
3716 /* STATUS is now no longer valid, use LP->STATUS instead. */
3717 status = 0;
3718
0e5bf2a8 3719 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3720 {
e3e9f5a2
PA
3721 gdb_assert (lp->resumed);
3722
d90e17a7 3723 if (debug_linux_nat)
3e43a32a
MS
3724 fprintf (stderr,
3725 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3726 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3727
ca2163eb 3728 if (WIFSTOPPED (lp->status))
d90e17a7 3729 {
ca2163eb 3730 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3731 {
e3e9f5a2
PA
3732 /* Cancel breakpoint hits. The breakpoint may
3733 be removed before we fetch events from this
3734 process to report to the core. It is best
3735 not to assume the moribund breakpoints
3736 heuristic always handles these cases --- it
3737 could be too many events go through to the
3738 core before this one is handled. All-stop
3739 always cancels breakpoint hits in all
3740 threads. */
3741 if (non_stop
00390b84 3742 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3743 && cancel_breakpoint (lp))
3744 {
3745 /* Throw away the SIGTRAP. */
3746 lp->status = 0;
3747
3748 if (debug_linux_nat)
3749 fprintf (stderr,
3e43a32a
MS
3750 "LLW: LWP %ld hit a breakpoint while"
3751 " waiting for another process;"
3752 " cancelled it\n",
e3e9f5a2
PA
3753 ptid_get_lwp (lp->ptid));
3754 }
3755 lp->stopped = 1;
d90e17a7
PA
3756 }
3757 else
3758 {
3759 lp->stopped = 1;
3760 lp->signalled = 0;
3761 }
3762 }
33355866 3763 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3764 {
3765 if (debug_linux_nat)
3e43a32a
MS
3766 fprintf (stderr,
3767 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3768 ptid_get_lwp (lp->ptid));
3769
3770 /* This was the last lwp in the process. Since
3771 events are serialized to GDB core, and we can't
3772 report this one right now, but GDB core and the
3773 other target layers will want to be notified
3774 about the exit code/signal, leave the status
3775 pending for the next time we're able to report
3776 it. */
d90e17a7
PA
3777
3778 /* Prevent trying to stop this thread again. We'll
3779 never try to resume it because it has a pending
3780 status. */
3781 lp->stopped = 1;
3782
3783 /* Dead LWP's aren't expected to reported a pending
3784 sigstop. */
3785 lp->signalled = 0;
3786
3787 /* Store the pending event in the waitstatus as
3788 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3789 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3790 }
3791
3792 /* Keep looking. */
3793 lp = NULL;
d6b0e80f
AC
3794 }
3795
0e5bf2a8 3796 if (new_pending)
d90e17a7 3797 {
0e5bf2a8
PA
3798 /* Some LWP now has a pending event. Go all the way
3799 back to check it. */
3800 goto retry;
3801 }
12d9289a 3802
0e5bf2a8
PA
3803 if (lp)
3804 {
3805 /* We got an event to report to the core. */
3806 break;
d90e17a7 3807 }
0e5bf2a8
PA
3808
3809 /* Retry until nothing comes out of waitpid. A single
3810 SIGCHLD can indicate more than one child stopped. */
3811 continue;
d6b0e80f
AC
3812 }
3813
0e5bf2a8
PA
3814 /* Check for zombie thread group leaders. Those can't be reaped
3815 until all other threads in the thread group are. */
3816 check_zombie_leaders ();
d6b0e80f 3817
0e5bf2a8
PA
3818 /* If there are no resumed children left, bail. We'd be stuck
3819 forever in the sigsuspend call below otherwise. */
3820 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3821 {
3822 if (debug_linux_nat)
3823 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3824
0e5bf2a8 3825 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3826
0e5bf2a8
PA
3827 if (!target_can_async_p ())
3828 clear_sigint_trap ();
b84876c2 3829
0e5bf2a8
PA
3830 restore_child_signals_mask (&prev_mask);
3831 return minus_one_ptid;
d6b0e80f 3832 }
28736962 3833
0e5bf2a8
PA
3834 /* No interesting event to report to the core. */
3835
3836 if (target_options & TARGET_WNOHANG)
3837 {
01124a23 3838 if (debug_linux_nat)
28736962
PA
3839 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3840
0e5bf2a8 3841 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3842 restore_child_signals_mask (&prev_mask);
3843 return minus_one_ptid;
3844 }
d6b0e80f
AC
3845
3846 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3847 gdb_assert (lp == NULL);
0e5bf2a8
PA
3848
3849 /* Block until we get an event reported with SIGCHLD. */
3850 sigsuspend (&suspend_mask);
d6b0e80f
AC
3851 }
3852
b84876c2 3853 if (!target_can_async_p ())
d26b5354 3854 clear_sigint_trap ();
d6b0e80f
AC
3855
3856 gdb_assert (lp);
3857
ca2163eb
PA
3858 status = lp->status;
3859 lp->status = 0;
3860
d6b0e80f
AC
3861 /* Don't report signals that GDB isn't interested in, such as
3862 signals that are neither printed nor stopped upon. Stopping all
3863 threads can be a bit time-consuming so if we want decent
3864 performance with heavily multi-threaded programs, especially when
3865 they're using a high frequency timer, we'd better avoid it if we
3866 can. */
3867
3868 if (WIFSTOPPED (status))
3869 {
423ec54c 3870 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b0e80f 3871
2455069d
UW
3872 /* When using hardware single-step, we need to report every signal.
3873 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3874 if (!lp->step
2455069d 3875 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3876 {
3877 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3878 here? It is not clear we should. GDB may not expect
3879 other threads to run. On the other hand, not resuming
3880 newly attached threads may cause an unwanted delay in
3881 getting them running. */
3882 registers_changed ();
7b50312a
PA
3883 if (linux_nat_prepare_to_resume != NULL)
3884 linux_nat_prepare_to_resume (lp);
28439f5e 3885 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3886 lp->step, signo);
d6b0e80f
AC
3887 if (debug_linux_nat)
3888 fprintf_unfiltered (gdb_stdlog,
3889 "LLW: %s %s, %s (preempt 'handle')\n",
3890 lp->step ?
3891 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3892 target_pid_to_str (lp->ptid),
423ec54c
JK
3893 (signo != TARGET_SIGNAL_0
3894 ? strsignal (target_signal_to_host (signo))
3895 : "0"));
d6b0e80f 3896 lp->stopped = 0;
d6b0e80f
AC
3897 goto retry;
3898 }
3899
1ad15515 3900 if (!non_stop)
d6b0e80f 3901 {
1ad15515
PA
3902 /* Only do the below in all-stop, as we currently use SIGINT
3903 to implement target_stop (see linux_nat_stop) in
3904 non-stop. */
3905 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3906 {
3907 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3908 forwarded to the entire process group, that is, all LWPs
3909 will receive it - unless they're using CLONE_THREAD to
3910 share signals. Since we only want to report it once, we
3911 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3912 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3913 set_ignore_sigint, NULL);
1ad15515
PA
3914 lp->ignore_sigint = 0;
3915 }
3916 else
3917 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3918 }
3919 }
3920
3921 /* This LWP is stopped now. */
3922 lp->stopped = 1;
3923
3924 if (debug_linux_nat)
3925 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3926 status_to_str (status), target_pid_to_str (lp->ptid));
3927
4c28f408
PA
3928 if (!non_stop)
3929 {
3930 /* Now stop all other LWP's ... */
d90e17a7 3931 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3932
3933 /* ... and wait until all of them have reported back that
3934 they're no longer running. */
d90e17a7 3935 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3936
3937 /* If we're not waiting for a specific LWP, choose an event LWP
3938 from among those that have had events. Giving equal priority
3939 to all LWPs that have had events helps prevent
3940 starvation. */
0e5bf2a8 3941 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3942 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3943
e3e9f5a2
PA
3944 /* Now that we've selected our final event LWP, cancel any
3945 breakpoints in other LWPs that have hit a GDB breakpoint.
3946 See the comment in cancel_breakpoints_callback to find out
3947 why. */
3948 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3949
4b60df3d
PA
3950 /* We'll need this to determine whether to report a SIGSTOP as
3951 TARGET_WAITKIND_0. Need to take a copy because
3952 resume_clear_callback clears it. */
3953 last_resume_kind = lp->last_resume_kind;
3954
e3e9f5a2
PA
3955 /* In all-stop, from the core's perspective, all LWPs are now
3956 stopped until a new resume action is sent over. */
3957 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3958 }
3959 else
25289eb2 3960 {
4b60df3d
PA
3961 /* See above. */
3962 last_resume_kind = lp->last_resume_kind;
3963 resume_clear_callback (lp, NULL);
25289eb2 3964 }
d6b0e80f 3965
26ab7092 3966 if (linux_nat_status_is_event (status))
d6b0e80f 3967 {
d6b0e80f
AC
3968 if (debug_linux_nat)
3969 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3970 "LLW: trap ptid is %s.\n",
3971 target_pid_to_str (lp->ptid));
d6b0e80f 3972 }
d6b0e80f
AC
3973
3974 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3975 {
3976 *ourstatus = lp->waitstatus;
3977 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3978 }
3979 else
3980 store_waitstatus (ourstatus, status);
3981
01124a23 3982 if (debug_linux_nat)
b84876c2
PA
3983 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3984
7feb7d06 3985 restore_child_signals_mask (&prev_mask);
1e225492 3986
4b60df3d 3987 if (last_resume_kind == resume_stop
25289eb2
PA
3988 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3989 && WSTOPSIG (status) == SIGSTOP)
3990 {
3991 /* A thread that has been requested to stop by GDB with
3992 target_stop, and it stopped cleanly, so report as SIG0. The
3993 use of SIGSTOP is an implementation detail. */
3994 ourstatus->value.sig = TARGET_SIGNAL_0;
3995 }
3996
1e225492
JK
3997 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3998 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3999 lp->core = -1;
4000 else
4001 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
4002
f973ed9c 4003 return lp->ptid;
d6b0e80f
AC
4004}
4005
e3e9f5a2
PA
4006/* Resume LWPs that are currently stopped without any pending status
4007 to report, but are resumed from the core's perspective. */
4008
4009static int
4010resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
4011{
4012 ptid_t *wait_ptid_p = data;
4013
4014 if (lp->stopped
4015 && lp->resumed
4016 && lp->status == 0
4017 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
4018 {
336060f3
PA
4019 struct regcache *regcache = get_thread_regcache (lp->ptid);
4020 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4021 CORE_ADDR pc = regcache_read_pc (regcache);
4022
e3e9f5a2
PA
4023 gdb_assert (is_executing (lp->ptid));
4024
4025 /* Don't bother if there's a breakpoint at PC that we'd hit
4026 immediately, and we're not waiting for this LWP. */
4027 if (!ptid_match (lp->ptid, *wait_ptid_p))
4028 {
e3e9f5a2
PA
4029 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
4030 return 0;
4031 }
4032
4033 if (debug_linux_nat)
4034 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
4035 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
4036 target_pid_to_str (lp->ptid),
4037 paddress (gdbarch, pc),
4038 lp->step);
e3e9f5a2 4039
336060f3 4040 registers_changed ();
7b50312a
PA
4041 if (linux_nat_prepare_to_resume != NULL)
4042 linux_nat_prepare_to_resume (lp);
e3e9f5a2
PA
4043 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
4044 lp->step, TARGET_SIGNAL_0);
4045 lp->stopped = 0;
4046 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
4047 lp->stopped_by_watchpoint = 0;
4048 }
4049
4050 return 0;
4051}
4052
7feb7d06
PA
4053static ptid_t
4054linux_nat_wait (struct target_ops *ops,
47608cb1
PA
4055 ptid_t ptid, struct target_waitstatus *ourstatus,
4056 int target_options)
7feb7d06
PA
4057{
4058 ptid_t event_ptid;
4059
4060 if (debug_linux_nat)
3e43a32a
MS
4061 fprintf_unfiltered (gdb_stdlog,
4062 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
4063
4064 /* Flush the async file first. */
4065 if (target_can_async_p ())
4066 async_file_flush ();
4067
e3e9f5a2
PA
4068 /* Resume LWPs that are currently stopped without any pending status
4069 to report, but are resumed from the core's perspective. LWPs get
4070 in this state if we find them stopping at a time we're not
4071 interested in reporting the event (target_wait on a
4072 specific_process, for example, see linux_nat_wait_1), and
4073 meanwhile the event became uninteresting. Don't bother resuming
4074 LWPs we're not going to wait for if they'd stop immediately. */
4075 if (non_stop)
4076 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4077
47608cb1 4078 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
4079
4080 /* If we requested any event, and something came out, assume there
4081 may be more. If we requested a specific lwp or process, also
4082 assume there may be more. */
4083 if (target_can_async_p ()
6953d224
PA
4084 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4085 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
4086 || !ptid_equal (ptid, minus_one_ptid)))
4087 async_file_mark ();
4088
4089 /* Get ready for the next event. */
4090 if (target_can_async_p ())
4091 target_async (inferior_event_handler, 0);
4092
4093 return event_ptid;
4094}
4095
d6b0e80f
AC
4096static int
4097kill_callback (struct lwp_info *lp, void *data)
4098{
ed731959
JK
4099 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4100
4101 errno = 0;
4102 kill (GET_LWP (lp->ptid), SIGKILL);
4103 if (debug_linux_nat)
4104 fprintf_unfiltered (gdb_stdlog,
4105 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4106 target_pid_to_str (lp->ptid),
4107 errno ? safe_strerror (errno) : "OK");
4108
4109 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4110
d6b0e80f
AC
4111 errno = 0;
4112 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4113 if (debug_linux_nat)
4114 fprintf_unfiltered (gdb_stdlog,
4115 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4116 target_pid_to_str (lp->ptid),
4117 errno ? safe_strerror (errno) : "OK");
4118
4119 return 0;
4120}
4121
4122static int
4123kill_wait_callback (struct lwp_info *lp, void *data)
4124{
4125 pid_t pid;
4126
4127 /* We must make sure that there are no pending events (delayed
4128 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4129 program doesn't interfere with any following debugging session. */
4130
4131 /* For cloned processes we must check both with __WCLONE and
4132 without, since the exit status of a cloned process isn't reported
4133 with __WCLONE. */
4134 if (lp->cloned)
4135 {
4136 do
4137 {
58aecb61 4138 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 4139 if (pid != (pid_t) -1)
d6b0e80f 4140 {
e85a822c
DJ
4141 if (debug_linux_nat)
4142 fprintf_unfiltered (gdb_stdlog,
4143 "KWC: wait %s received unknown.\n",
4144 target_pid_to_str (lp->ptid));
4145 /* The Linux kernel sometimes fails to kill a thread
4146 completely after PTRACE_KILL; that goes from the stop
4147 point in do_fork out to the one in
4148 get_signal_to_deliever and waits again. So kill it
4149 again. */
4150 kill_callback (lp, NULL);
d6b0e80f
AC
4151 }
4152 }
4153 while (pid == GET_LWP (lp->ptid));
4154
4155 gdb_assert (pid == -1 && errno == ECHILD);
4156 }
4157
4158 do
4159 {
58aecb61 4160 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 4161 if (pid != (pid_t) -1)
d6b0e80f 4162 {
e85a822c
DJ
4163 if (debug_linux_nat)
4164 fprintf_unfiltered (gdb_stdlog,
4165 "KWC: wait %s received unk.\n",
4166 target_pid_to_str (lp->ptid));
4167 /* See the call to kill_callback above. */
4168 kill_callback (lp, NULL);
d6b0e80f
AC
4169 }
4170 }
4171 while (pid == GET_LWP (lp->ptid));
4172
4173 gdb_assert (pid == -1 && errno == ECHILD);
4174 return 0;
4175}
4176
4177static void
7d85a9c0 4178linux_nat_kill (struct target_ops *ops)
d6b0e80f 4179{
f973ed9c
DJ
4180 struct target_waitstatus last;
4181 ptid_t last_ptid;
4182 int status;
d6b0e80f 4183
f973ed9c
DJ
4184 /* If we're stopped while forking and we haven't followed yet,
4185 kill the other task. We need to do this first because the
4186 parent will be sleeping if this is a vfork. */
d6b0e80f 4187
f973ed9c 4188 get_last_target_status (&last_ptid, &last);
d6b0e80f 4189
f973ed9c
DJ
4190 if (last.kind == TARGET_WAITKIND_FORKED
4191 || last.kind == TARGET_WAITKIND_VFORKED)
4192 {
3a3e9ee3 4193 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
4194 wait (&status);
4195 }
4196
4197 if (forks_exist_p ())
7feb7d06 4198 linux_fork_killall ();
f973ed9c
DJ
4199 else
4200 {
d90e17a7 4201 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 4202
4c28f408
PA
4203 /* Stop all threads before killing them, since ptrace requires
4204 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 4205 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
4206 /* ... and wait until all of them have reported back that
4207 they're no longer running. */
d90e17a7 4208 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 4209
f973ed9c 4210 /* Kill all LWP's ... */
d90e17a7 4211 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
4212
4213 /* ... and wait until we've flushed all events. */
d90e17a7 4214 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
4215 }
4216
4217 target_mourn_inferior ();
d6b0e80f
AC
4218}
4219
4220static void
136d6dae 4221linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 4222{
d90e17a7 4223 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 4224
f973ed9c 4225 if (! forks_exist_p ())
d90e17a7
PA
4226 /* Normal case, no other forks available. */
4227 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
4228 else
4229 /* Multi-fork case. The current inferior_ptid has exited, but
4230 there are other viable forks to debug. Delete the exiting
4231 one and context-switch to the first available. */
4232 linux_fork_mourn_inferior ();
d6b0e80f
AC
4233}
4234
5b009018
PA
4235/* Convert a native/host siginfo object, into/from the siginfo in the
4236 layout of the inferiors' architecture. */
4237
4238static void
4239siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4240{
4241 int done = 0;
4242
4243 if (linux_nat_siginfo_fixup != NULL)
4244 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4245
4246 /* If there was no callback, or the callback didn't do anything,
4247 then just do a straight memcpy. */
4248 if (!done)
4249 {
4250 if (direction == 1)
4251 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4252 else
4253 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4254 }
4255}
4256
4aa995e1
PA
4257static LONGEST
4258linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4259 const char *annex, gdb_byte *readbuf,
4260 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4261{
4aa995e1
PA
4262 int pid;
4263 struct siginfo siginfo;
5b009018 4264 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4265
4266 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4267 gdb_assert (readbuf || writebuf);
4268
4269 pid = GET_LWP (inferior_ptid);
4270 if (pid == 0)
4271 pid = GET_PID (inferior_ptid);
4272
4273 if (offset > sizeof (siginfo))
4274 return -1;
4275
4276 errno = 0;
4277 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4278 if (errno != 0)
4279 return -1;
4280
5b009018
PA
4281 /* When GDB is built as a 64-bit application, ptrace writes into
4282 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4283 inferior with a 64-bit GDB should look the same as debugging it
4284 with a 32-bit GDB, we need to convert it. GDB core always sees
4285 the converted layout, so any read/write will have to be done
4286 post-conversion. */
4287 siginfo_fixup (&siginfo, inf_siginfo, 0);
4288
4aa995e1
PA
4289 if (offset + len > sizeof (siginfo))
4290 len = sizeof (siginfo) - offset;
4291
4292 if (readbuf != NULL)
5b009018 4293 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4294 else
4295 {
5b009018
PA
4296 memcpy (inf_siginfo + offset, writebuf, len);
4297
4298 /* Convert back to ptrace layout before flushing it out. */
4299 siginfo_fixup (&siginfo, inf_siginfo, 1);
4300
4aa995e1
PA
4301 errno = 0;
4302 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4303 if (errno != 0)
4304 return -1;
4305 }
4306
4307 return len;
4308}
4309
10d6c8cd
DJ
4310static LONGEST
4311linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4312 const char *annex, gdb_byte *readbuf,
4313 const gdb_byte *writebuf,
4314 ULONGEST offset, LONGEST len)
d6b0e80f 4315{
4aa995e1 4316 struct cleanup *old_chain;
10d6c8cd 4317 LONGEST xfer;
d6b0e80f 4318
4aa995e1
PA
4319 if (object == TARGET_OBJECT_SIGNAL_INFO)
4320 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4321 offset, len);
4322
c35b1492
PA
4323 /* The target is connected but no live inferior is selected. Pass
4324 this request down to a lower stratum (e.g., the executable
4325 file). */
4326 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4327 return 0;
4328
4aa995e1
PA
4329 old_chain = save_inferior_ptid ();
4330
d6b0e80f
AC
4331 if (is_lwp (inferior_ptid))
4332 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4333
10d6c8cd
DJ
4334 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4335 offset, len);
d6b0e80f
AC
4336
4337 do_cleanups (old_chain);
4338 return xfer;
4339}
4340
4341static int
28439f5e 4342linux_thread_alive (ptid_t ptid)
d6b0e80f 4343{
8c6a60d1 4344 int err, tmp_errno;
4c28f408 4345
d6b0e80f
AC
4346 gdb_assert (is_lwp (ptid));
4347
4c28f408
PA
4348 /* Send signal 0 instead of anything ptrace, because ptracing a
4349 running thread errors out claiming that the thread doesn't
4350 exist. */
4351 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4352 tmp_errno = errno;
d6b0e80f
AC
4353 if (debug_linux_nat)
4354 fprintf_unfiltered (gdb_stdlog,
4c28f408 4355 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4356 target_pid_to_str (ptid),
8c6a60d1 4357 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4358
4c28f408 4359 if (err != 0)
d6b0e80f
AC
4360 return 0;
4361
4362 return 1;
4363}
4364
28439f5e
PA
4365static int
4366linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4367{
4368 return linux_thread_alive (ptid);
4369}
4370
d6b0e80f 4371static char *
117de6a9 4372linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4373{
4374 static char buf[64];
4375
a0ef4274 4376 if (is_lwp (ptid)
d90e17a7
PA
4377 && (GET_PID (ptid) != GET_LWP (ptid)
4378 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4379 {
4380 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4381 return buf;
4382 }
4383
4384 return normal_pid_to_str (ptid);
4385}
4386
4694da01
TT
4387static char *
4388linux_nat_thread_name (struct thread_info *thr)
4389{
4390 int pid = ptid_get_pid (thr->ptid);
4391 long lwp = ptid_get_lwp (thr->ptid);
4392#define FORMAT "/proc/%d/task/%ld/comm"
4393 char buf[sizeof (FORMAT) + 30];
4394 FILE *comm_file;
4395 char *result = NULL;
4396
4397 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4398 comm_file = fopen (buf, "r");
4399 if (comm_file)
4400 {
4401 /* Not exported by the kernel, so we define it here. */
4402#define COMM_LEN 16
4403 static char line[COMM_LEN + 1];
4404
4405 if (fgets (line, sizeof (line), comm_file))
4406 {
4407 char *nl = strchr (line, '\n');
4408
4409 if (nl)
4410 *nl = '\0';
4411 if (*line != '\0')
4412 result = line;
4413 }
4414
4415 fclose (comm_file);
4416 }
4417
4418#undef COMM_LEN
4419#undef FORMAT
4420
4421 return result;
4422}
4423
dba24537
AC
4424/* Accepts an integer PID; Returns a string representing a file that
4425 can be opened to get the symbols for the child process. */
4426
6d8fd2b7
UW
4427static char *
4428linux_child_pid_to_exec_file (int pid)
dba24537
AC
4429{
4430 char *name1, *name2;
4431
4432 name1 = xmalloc (MAXPATHLEN);
4433 name2 = xmalloc (MAXPATHLEN);
4434 make_cleanup (xfree, name1);
4435 make_cleanup (xfree, name2);
4436 memset (name2, 0, MAXPATHLEN);
4437
4438 sprintf (name1, "/proc/%d/exe", pid);
4439 if (readlink (name1, name2, MAXPATHLEN) > 0)
4440 return name2;
4441 else
4442 return name1;
4443}
4444
dba24537
AC
4445/* Records the thread's register state for the corefile note
4446 section. */
4447
4448static char *
6432734d
UW
4449linux_nat_collect_thread_registers (const struct regcache *regcache,
4450 ptid_t ptid, bfd *obfd,
4451 char *note_data, int *note_size,
4452 enum target_signal stop_signal)
dba24537 4453{
6432734d 4454 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 4455 const struct regset *regset;
55e969c1 4456 int core_regset_p;
6432734d
UW
4457 gdb_gregset_t gregs;
4458 gdb_fpregset_t fpregs;
4f844a66
DM
4459
4460 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
dba24537 4461
6432734d
UW
4462 if (core_regset_p
4463 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4464 sizeof (gregs)))
4465 != NULL && regset->collect_regset != NULL)
4466 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4f844a66 4467 else
6432734d 4468 fill_gregset (regcache, &gregs, -1);
2f2241f1 4469
6432734d
UW
4470 note_data = (char *) elfcore_write_prstatus
4471 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4472 target_signal_to_host (stop_signal), &gregs);
2f2241f1 4473
6432734d
UW
4474 if (core_regset_p
4475 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4476 sizeof (fpregs)))
3e43a32a 4477 != NULL && regset->collect_regset != NULL)
6432734d
UW
4478 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4479 else
4480 fill_fpregset (regcache, &fpregs, -1);
17ea7499 4481
6432734d
UW
4482 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4483 &fpregs, sizeof (fpregs));
4f844a66 4484
dba24537
AC
4485 return note_data;
4486}
4487
dba24537
AC
4488/* Fills the "to_make_corefile_note" target vector. Builds the note
4489 section for a corefile, and returns it in a malloc buffer. */
4490
4491static char *
4492linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4493{
6432734d
UW
4494 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4495 converted to gdbarch_core_regset_sections, this function can go away. */
4496 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4497 linux_nat_collect_thread_registers);
dba24537
AC
4498}
4499
10d6c8cd
DJ
4500/* Implement the to_xfer_partial interface for memory reads using the /proc
4501 filesystem. Because we can use a single read() call for /proc, this
4502 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4503 but it doesn't support writes. */
4504
4505static LONGEST
4506linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4507 const char *annex, gdb_byte *readbuf,
4508 const gdb_byte *writebuf,
4509 ULONGEST offset, LONGEST len)
dba24537 4510{
10d6c8cd
DJ
4511 LONGEST ret;
4512 int fd;
dba24537
AC
4513 char filename[64];
4514
10d6c8cd 4515 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4516 return 0;
4517
4518 /* Don't bother for one word. */
4519 if (len < 3 * sizeof (long))
4520 return 0;
4521
4522 /* We could keep this file open and cache it - possibly one per
4523 thread. That requires some juggling, but is even faster. */
4524 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4525 fd = open (filename, O_RDONLY | O_LARGEFILE);
4526 if (fd == -1)
4527 return 0;
4528
4529 /* If pread64 is available, use it. It's faster if the kernel
4530 supports it (only one syscall), and it's 64-bit safe even on
4531 32-bit platforms (for instance, SPARC debugging a SPARC64
4532 application). */
4533#ifdef HAVE_PREAD64
10d6c8cd 4534 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4535#else
10d6c8cd 4536 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4537#endif
4538 ret = 0;
4539 else
4540 ret = len;
4541
4542 close (fd);
4543 return ret;
4544}
4545
efcbbd14
UW
4546
4547/* Enumerate spufs IDs for process PID. */
4548static LONGEST
4549spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4550{
4551 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4552 LONGEST pos = 0;
4553 LONGEST written = 0;
4554 char path[128];
4555 DIR *dir;
4556 struct dirent *entry;
4557
4558 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4559 dir = opendir (path);
4560 if (!dir)
4561 return -1;
4562
4563 rewinddir (dir);
4564 while ((entry = readdir (dir)) != NULL)
4565 {
4566 struct stat st;
4567 struct statfs stfs;
4568 int fd;
4569
4570 fd = atoi (entry->d_name);
4571 if (!fd)
4572 continue;
4573
4574 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4575 if (stat (path, &st) != 0)
4576 continue;
4577 if (!S_ISDIR (st.st_mode))
4578 continue;
4579
4580 if (statfs (path, &stfs) != 0)
4581 continue;
4582 if (stfs.f_type != SPUFS_MAGIC)
4583 continue;
4584
4585 if (pos >= offset && pos + 4 <= offset + len)
4586 {
4587 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4588 written += 4;
4589 }
4590 pos += 4;
4591 }
4592
4593 closedir (dir);
4594 return written;
4595}
4596
4597/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4598 object type, using the /proc file system. */
4599static LONGEST
4600linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4601 const char *annex, gdb_byte *readbuf,
4602 const gdb_byte *writebuf,
4603 ULONGEST offset, LONGEST len)
4604{
4605 char buf[128];
4606 int fd = 0;
4607 int ret = -1;
4608 int pid = PIDGET (inferior_ptid);
4609
4610 if (!annex)
4611 {
4612 if (!readbuf)
4613 return -1;
4614 else
4615 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4616 }
4617
4618 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4619 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4620 if (fd <= 0)
4621 return -1;
4622
4623 if (offset != 0
4624 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4625 {
4626 close (fd);
4627 return 0;
4628 }
4629
4630 if (writebuf)
4631 ret = write (fd, writebuf, (size_t) len);
4632 else if (readbuf)
4633 ret = read (fd, readbuf, (size_t) len);
4634
4635 close (fd);
4636 return ret;
4637}
4638
4639
dba24537
AC
4640/* Parse LINE as a signal set and add its set bits to SIGS. */
4641
4642static void
4643add_line_to_sigset (const char *line, sigset_t *sigs)
4644{
4645 int len = strlen (line) - 1;
4646 const char *p;
4647 int signum;
4648
4649 if (line[len] != '\n')
8a3fe4f8 4650 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4651
4652 p = line;
4653 signum = len * 4;
4654 while (len-- > 0)
4655 {
4656 int digit;
4657
4658 if (*p >= '0' && *p <= '9')
4659 digit = *p - '0';
4660 else if (*p >= 'a' && *p <= 'f')
4661 digit = *p - 'a' + 10;
4662 else
8a3fe4f8 4663 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4664
4665 signum -= 4;
4666
4667 if (digit & 1)
4668 sigaddset (sigs, signum + 1);
4669 if (digit & 2)
4670 sigaddset (sigs, signum + 2);
4671 if (digit & 4)
4672 sigaddset (sigs, signum + 3);
4673 if (digit & 8)
4674 sigaddset (sigs, signum + 4);
4675
4676 p++;
4677 }
4678}
4679
4680/* Find process PID's pending signals from /proc/pid/status and set
4681 SIGS to match. */
4682
4683void
3e43a32a
MS
4684linux_proc_pending_signals (int pid, sigset_t *pending,
4685 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4686{
4687 FILE *procfile;
4688 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 4689 struct cleanup *cleanup;
dba24537
AC
4690
4691 sigemptyset (pending);
4692 sigemptyset (blocked);
4693 sigemptyset (ignored);
4694 sprintf (fname, "/proc/%d/status", pid);
4695 procfile = fopen (fname, "r");
4696 if (procfile == NULL)
8a3fe4f8 4697 error (_("Could not open %s"), fname);
7c8a8b04 4698 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4699
4700 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4701 {
4702 /* Normal queued signals are on the SigPnd line in the status
4703 file. However, 2.6 kernels also have a "shared" pending
4704 queue for delivering signals to a thread group, so check for
4705 a ShdPnd line also.
4706
4707 Unfortunately some Red Hat kernels include the shared pending
4708 queue but not the ShdPnd status field. */
4709
4710 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4711 add_line_to_sigset (buffer + 8, pending);
4712 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4713 add_line_to_sigset (buffer + 8, pending);
4714 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4715 add_line_to_sigset (buffer + 8, blocked);
4716 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4717 add_line_to_sigset (buffer + 8, ignored);
4718 }
4719
7c8a8b04 4720 do_cleanups (cleanup);
dba24537
AC
4721}
4722
07e059b5
VP
4723static LONGEST
4724linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
4725 const char *annex, gdb_byte *readbuf,
4726 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5 4727{
07e059b5
VP
4728 gdb_assert (object == TARGET_OBJECT_OSDATA);
4729
d26e3629 4730 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4731}
4732
10d6c8cd
DJ
4733static LONGEST
4734linux_xfer_partial (struct target_ops *ops, enum target_object object,
4735 const char *annex, gdb_byte *readbuf,
4736 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4737{
4738 LONGEST xfer;
4739
4740 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4741 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
4742 offset, len);
4743
07e059b5
VP
4744 if (object == TARGET_OBJECT_OSDATA)
4745 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4746 offset, len);
4747
efcbbd14
UW
4748 if (object == TARGET_OBJECT_SPU)
4749 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4750 offset, len);
4751
8f313923
JK
4752 /* GDB calculates all the addresses in possibly larget width of the address.
4753 Address width needs to be masked before its final use - either by
4754 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4755
4756 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4757
4758 if (object == TARGET_OBJECT_MEMORY)
4759 {
4760 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4761
4762 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4763 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4764 }
4765
10d6c8cd
DJ
4766 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4767 offset, len);
4768 if (xfer != 0)
4769 return xfer;
4770
4771 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4772 offset, len);
4773}
4774
e9efe249 4775/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4776 it with local methods. */
4777
910122bf
UW
4778static void
4779linux_target_install_ops (struct target_ops *t)
10d6c8cd 4780{
6d8fd2b7 4781 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4782 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4783 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4784 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4785 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4786 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4787 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4788 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4789 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4790 t->to_post_attach = linux_child_post_attach;
4791 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4792 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4793
4794 super_xfer_partial = t->to_xfer_partial;
4795 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
4796}
4797
4798struct target_ops *
4799linux_target (void)
4800{
4801 struct target_ops *t;
4802
4803 t = inf_ptrace_target ();
4804 linux_target_install_ops (t);
4805
4806 return t;
4807}
4808
4809struct target_ops *
7714d83a 4810linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4811{
4812 struct target_ops *t;
4813
4814 t = inf_ptrace_trad_target (register_u_offset);
4815 linux_target_install_ops (t);
10d6c8cd 4816
10d6c8cd
DJ
4817 return t;
4818}
4819
b84876c2
PA
4820/* target_is_async_p implementation. */
4821
4822static int
4823linux_nat_is_async_p (void)
4824{
4825 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4826 it explicitly with the "set target-async" command.
b84876c2 4827 Someday, linux will always be async. */
3dd5b83d 4828 return target_async_permitted;
b84876c2
PA
4829}
4830
4831/* target_can_async_p implementation. */
4832
4833static int
4834linux_nat_can_async_p (void)
4835{
4836 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4837 it explicitly with the "set target-async" command.
b84876c2 4838 Someday, linux will always be async. */
3dd5b83d 4839 return target_async_permitted;
b84876c2
PA
4840}
4841
9908b566
VP
4842static int
4843linux_nat_supports_non_stop (void)
4844{
4845 return 1;
4846}
4847
d90e17a7
PA
4848/* True if we want to support multi-process. To be removed when GDB
4849 supports multi-exec. */
4850
2277426b 4851int linux_multi_process = 1;
d90e17a7
PA
4852
4853static int
4854linux_nat_supports_multi_process (void)
4855{
4856 return linux_multi_process;
4857}
4858
03583c20
UW
4859static int
4860linux_nat_supports_disable_randomization (void)
4861{
4862#ifdef HAVE_PERSONALITY
4863 return 1;
4864#else
4865 return 0;
4866#endif
4867}
4868
b84876c2
PA
4869static int async_terminal_is_ours = 1;
4870
4871/* target_terminal_inferior implementation. */
4872
4873static void
4874linux_nat_terminal_inferior (void)
4875{
4876 if (!target_is_async_p ())
4877 {
4878 /* Async mode is disabled. */
4879 terminal_inferior ();
4880 return;
4881 }
4882
b84876c2
PA
4883 terminal_inferior ();
4884
d9d2d8b6 4885 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4886 if (!async_terminal_is_ours)
4887 return;
4888
4889 delete_file_handler (input_fd);
4890 async_terminal_is_ours = 0;
4891 set_sigint_trap ();
4892}
4893
4894/* target_terminal_ours implementation. */
4895
2c0b251b 4896static void
b84876c2
PA
4897linux_nat_terminal_ours (void)
4898{
4899 if (!target_is_async_p ())
4900 {
4901 /* Async mode is disabled. */
4902 terminal_ours ();
4903 return;
4904 }
4905
4906 /* GDB should never give the terminal to the inferior if the
4907 inferior is running in the background (run&, continue&, etc.),
4908 but claiming it sure should. */
4909 terminal_ours ();
4910
b84876c2
PA
4911 if (async_terminal_is_ours)
4912 return;
4913
4914 clear_sigint_trap ();
4915 add_file_handler (input_fd, stdin_event_handler, 0);
4916 async_terminal_is_ours = 1;
4917}
4918
4919static void (*async_client_callback) (enum inferior_event_type event_type,
4920 void *context);
4921static void *async_client_context;
4922
7feb7d06
PA
4923/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4924 so we notice when any child changes state, and notify the
4925 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4926 above to wait for the arrival of a SIGCHLD. */
4927
b84876c2 4928static void
7feb7d06 4929sigchld_handler (int signo)
b84876c2 4930{
7feb7d06
PA
4931 int old_errno = errno;
4932
01124a23
DE
4933 if (debug_linux_nat)
4934 ui_file_write_async_safe (gdb_stdlog,
4935 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4936
4937 if (signo == SIGCHLD
4938 && linux_nat_event_pipe[0] != -1)
4939 async_file_mark (); /* Let the event loop know that there are
4940 events to handle. */
4941
4942 errno = old_errno;
4943}
4944
4945/* Callback registered with the target events file descriptor. */
4946
4947static void
4948handle_target_event (int error, gdb_client_data client_data)
4949{
4950 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4951}
4952
4953/* Create/destroy the target events pipe. Returns previous state. */
4954
4955static int
4956linux_async_pipe (int enable)
4957{
4958 int previous = (linux_nat_event_pipe[0] != -1);
4959
4960 if (previous != enable)
4961 {
4962 sigset_t prev_mask;
4963
4964 block_child_signals (&prev_mask);
4965
4966 if (enable)
4967 {
4968 if (pipe (linux_nat_event_pipe) == -1)
4969 internal_error (__FILE__, __LINE__,
4970 "creating event pipe failed.");
4971
4972 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4973 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4974 }
4975 else
4976 {
4977 close (linux_nat_event_pipe[0]);
4978 close (linux_nat_event_pipe[1]);
4979 linux_nat_event_pipe[0] = -1;
4980 linux_nat_event_pipe[1] = -1;
4981 }
4982
4983 restore_child_signals_mask (&prev_mask);
4984 }
4985
4986 return previous;
b84876c2
PA
4987}
4988
4989/* target_async implementation. */
4990
4991static void
4992linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4993 void *context), void *context)
4994{
b84876c2
PA
4995 if (callback != NULL)
4996 {
4997 async_client_callback = callback;
4998 async_client_context = context;
7feb7d06
PA
4999 if (!linux_async_pipe (1))
5000 {
5001 add_file_handler (linux_nat_event_pipe[0],
5002 handle_target_event, NULL);
5003 /* There may be pending events to handle. Tell the event loop
5004 to poll them. */
5005 async_file_mark ();
5006 }
b84876c2
PA
5007 }
5008 else
5009 {
5010 async_client_callback = callback;
5011 async_client_context = context;
b84876c2 5012 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5013 linux_async_pipe (0);
b84876c2
PA
5014 }
5015 return;
5016}
5017
252fbfc8
PA
5018/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5019 event came out. */
5020
4c28f408 5021static int
252fbfc8 5022linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5023{
d90e17a7 5024 if (!lwp->stopped)
252fbfc8 5025 {
d90e17a7 5026 ptid_t ptid = lwp->ptid;
252fbfc8 5027
d90e17a7
PA
5028 if (debug_linux_nat)
5029 fprintf_unfiltered (gdb_stdlog,
5030 "LNSL: running -> suspending %s\n",
5031 target_pid_to_str (lwp->ptid));
252fbfc8 5032
252fbfc8 5033
25289eb2
PA
5034 if (lwp->last_resume_kind == resume_stop)
5035 {
5036 if (debug_linux_nat)
5037 fprintf_unfiltered (gdb_stdlog,
5038 "linux-nat: already stopping LWP %ld at "
5039 "GDB's request\n",
5040 ptid_get_lwp (lwp->ptid));
5041 return 0;
5042 }
252fbfc8 5043
25289eb2
PA
5044 stop_callback (lwp, NULL);
5045 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
5046 }
5047 else
5048 {
5049 /* Already known to be stopped; do nothing. */
252fbfc8 5050
d90e17a7
PA
5051 if (debug_linux_nat)
5052 {
e09875d4 5053 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5054 fprintf_unfiltered (gdb_stdlog,
5055 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5056 target_pid_to_str (lwp->ptid));
5057 else
3e43a32a
MS
5058 fprintf_unfiltered (gdb_stdlog,
5059 "LNSL: already stopped/no "
5060 "stop_requested yet %s\n",
d90e17a7 5061 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5062 }
5063 }
4c28f408
PA
5064 return 0;
5065}
5066
5067static void
5068linux_nat_stop (ptid_t ptid)
5069{
5070 if (non_stop)
d90e17a7 5071 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5072 else
5073 linux_ops->to_stop (ptid);
5074}
5075
d90e17a7
PA
5076static void
5077linux_nat_close (int quitting)
5078{
5079 /* Unregister from the event loop. */
305436e0
PA
5080 if (linux_nat_is_async_p ())
5081 linux_nat_async (NULL, 0);
d90e17a7 5082
d90e17a7
PA
5083 if (linux_ops->to_close)
5084 linux_ops->to_close (quitting);
5085}
5086
c0694254
PA
5087/* When requests are passed down from the linux-nat layer to the
5088 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5089 used. The address space pointer is stored in the inferior object,
5090 but the common code that is passed such ptid can't tell whether
5091 lwpid is a "main" process id or not (it assumes so). We reverse
5092 look up the "main" process id from the lwp here. */
5093
5094struct address_space *
5095linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5096{
5097 struct lwp_info *lwp;
5098 struct inferior *inf;
5099 int pid;
5100
5101 pid = GET_LWP (ptid);
5102 if (GET_LWP (ptid) == 0)
5103 {
5104 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5105 tgid. */
5106 lwp = find_lwp_pid (ptid);
5107 pid = GET_PID (lwp->ptid);
5108 }
5109 else
5110 {
5111 /* A (pid,lwpid,0) ptid. */
5112 pid = GET_PID (ptid);
5113 }
5114
5115 inf = find_inferior_pid (pid);
5116 gdb_assert (inf != NULL);
5117 return inf->aspace;
5118}
5119
dc146f7c
VP
5120int
5121linux_nat_core_of_thread_1 (ptid_t ptid)
5122{
5123 struct cleanup *back_to;
5124 char *filename;
5125 FILE *f;
5126 char *content = NULL;
5127 char *p;
5128 char *ts = 0;
5129 int content_read = 0;
5130 int i;
5131 int core;
5132
5133 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5134 GET_PID (ptid), GET_LWP (ptid));
5135 back_to = make_cleanup (xfree, filename);
5136
5137 f = fopen (filename, "r");
5138 if (!f)
5139 {
5140 do_cleanups (back_to);
5141 return -1;
5142 }
5143
5144 make_cleanup_fclose (f);
5145
5146 for (;;)
5147 {
5148 int n;
e0881a8e 5149
dc146f7c
VP
5150 content = xrealloc (content, content_read + 1024);
5151 n = fread (content + content_read, 1, 1024, f);
5152 content_read += n;
5153 if (n < 1024)
5154 {
5155 content[content_read] = '\0';
5156 break;
5157 }
5158 }
5159
5160 make_cleanup (xfree, content);
5161
5162 p = strchr (content, '(');
ca2a87a0
JK
5163
5164 /* Skip ")". */
5165 if (p != NULL)
5166 p = strchr (p, ')');
5167 if (p != NULL)
5168 p++;
dc146f7c
VP
5169
5170 /* If the first field after program name has index 0, then core number is
5171 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5172 if (p != NULL)
5173 p = strtok_r (p, " ", &ts);
5174 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5175 p = strtok_r (NULL, " ", &ts);
5176
ca2a87a0 5177 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5178 core = -1;
5179
5180 do_cleanups (back_to);
5181
5182 return core;
5183}
5184
5185/* Return the cached value of the processor core for thread PTID. */
5186
5187int
5188linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5189{
5190 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5191
dc146f7c
VP
5192 if (info)
5193 return info->core;
5194 return -1;
5195}
5196
f973ed9c
DJ
5197void
5198linux_nat_add_target (struct target_ops *t)
5199{
f973ed9c
DJ
5200 /* Save the provided single-threaded target. We save this in a separate
5201 variable because another target we've inherited from (e.g. inf-ptrace)
5202 may have saved a pointer to T; we want to use it for the final
5203 process stratum target. */
5204 linux_ops_saved = *t;
5205 linux_ops = &linux_ops_saved;
5206
5207 /* Override some methods for multithreading. */
b84876c2 5208 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5209 t->to_attach = linux_nat_attach;
5210 t->to_detach = linux_nat_detach;
5211 t->to_resume = linux_nat_resume;
5212 t->to_wait = linux_nat_wait;
2455069d 5213 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5214 t->to_xfer_partial = linux_nat_xfer_partial;
5215 t->to_kill = linux_nat_kill;
5216 t->to_mourn_inferior = linux_nat_mourn_inferior;
5217 t->to_thread_alive = linux_nat_thread_alive;
5218 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5219 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5220 t->to_has_thread_control = tc_schedlock;
c0694254 5221 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5222 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5223 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5224
b84876c2
PA
5225 t->to_can_async_p = linux_nat_can_async_p;
5226 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5227 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 5228 t->to_async = linux_nat_async;
b84876c2
PA
5229 t->to_terminal_inferior = linux_nat_terminal_inferior;
5230 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5231 t->to_close = linux_nat_close;
b84876c2 5232
4c28f408
PA
5233 /* Methods for non-stop support. */
5234 t->to_stop = linux_nat_stop;
5235
d90e17a7
PA
5236 t->to_supports_multi_process = linux_nat_supports_multi_process;
5237
03583c20
UW
5238 t->to_supports_disable_randomization
5239 = linux_nat_supports_disable_randomization;
5240
dc146f7c
VP
5241 t->to_core_of_thread = linux_nat_core_of_thread;
5242
f973ed9c
DJ
5243 /* We don't change the stratum; this target will sit at
5244 process_stratum and thread_db will set at thread_stratum. This
5245 is a little strange, since this is a multi-threaded-capable
5246 target, but we want to be on the stack below thread_db, and we
5247 also want to be used for single-threaded processes. */
5248
5249 add_target (t);
f973ed9c
DJ
5250}
5251
9f0bdab8
DJ
5252/* Register a method to call whenever a new thread is attached. */
5253void
7b50312a
PA
5254linux_nat_set_new_thread (struct target_ops *t,
5255 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
5256{
5257 /* Save the pointer. We only support a single registered instance
5258 of the GNU/Linux native target, so we do not need to map this to
5259 T. */
5260 linux_nat_new_thread = new_thread;
5261}
5262
5b009018
PA
5263/* Register a method that converts a siginfo object between the layout
5264 that ptrace returns, and the layout in the architecture of the
5265 inferior. */
5266void
5267linux_nat_set_siginfo_fixup (struct target_ops *t,
5268 int (*siginfo_fixup) (struct siginfo *,
5269 gdb_byte *,
5270 int))
5271{
5272 /* Save the pointer. */
5273 linux_nat_siginfo_fixup = siginfo_fixup;
5274}
5275
7b50312a
PA
5276/* Register a method to call prior to resuming a thread. */
5277
5278void
5279linux_nat_set_prepare_to_resume (struct target_ops *t,
5280 void (*prepare_to_resume) (struct lwp_info *))
5281{
5282 /* Save the pointer. */
5283 linux_nat_prepare_to_resume = prepare_to_resume;
5284}
5285
9f0bdab8
DJ
5286/* Return the saved siginfo associated with PTID. */
5287struct siginfo *
5288linux_nat_get_siginfo (ptid_t ptid)
5289{
5290 struct lwp_info *lp = find_lwp_pid (ptid);
5291
5292 gdb_assert (lp != NULL);
5293
5294 return &lp->siginfo;
5295}
5296
2c0b251b
PA
5297/* Provide a prototype to silence -Wmissing-prototypes. */
5298extern initialize_file_ftype _initialize_linux_nat;
5299
d6b0e80f
AC
5300void
5301_initialize_linux_nat (void)
5302{
b84876c2
PA
5303 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5304 &debug_linux_nat, _("\
5305Set debugging of GNU/Linux lwp module."), _("\
5306Show debugging of GNU/Linux lwp module."), _("\
5307Enables printf debugging output."),
5308 NULL,
5309 show_debug_linux_nat,
5310 &setdebuglist, &showdebuglist);
5311
b84876c2 5312 /* Save this mask as the default. */
d6b0e80f
AC
5313 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5314
7feb7d06
PA
5315 /* Install a SIGCHLD handler. */
5316 sigchld_action.sa_handler = sigchld_handler;
5317 sigemptyset (&sigchld_action.sa_mask);
5318 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5319
5320 /* Make it the default. */
7feb7d06 5321 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5322
5323 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5324 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5325 sigdelset (&suspend_mask, SIGCHLD);
5326
7feb7d06 5327 sigemptyset (&blocked_mask);
d6b0e80f
AC
5328}
5329\f
5330
5331/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5332 the GNU/Linux Threads library and therefore doesn't really belong
5333 here. */
5334
5335/* Read variable NAME in the target and return its value if found.
5336 Otherwise return zero. It is assumed that the type of the variable
5337 is `int'. */
5338
5339static int
5340get_signo (const char *name)
5341{
5342 struct minimal_symbol *ms;
5343 int signo;
5344
5345 ms = lookup_minimal_symbol (name, NULL, NULL);
5346 if (ms == NULL)
5347 return 0;
5348
8e70166d 5349 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5350 sizeof (signo)) != 0)
5351 return 0;
5352
5353 return signo;
5354}
5355
5356/* Return the set of signals used by the threads library in *SET. */
5357
5358void
5359lin_thread_get_thread_signals (sigset_t *set)
5360{
5361 struct sigaction action;
5362 int restart, cancel;
5363
b84876c2 5364 sigemptyset (&blocked_mask);
d6b0e80f
AC
5365 sigemptyset (set);
5366
5367 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5368 cancel = get_signo ("__pthread_sig_cancel");
5369
5370 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5371 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5372 not provide any way for the debugger to query the signal numbers -
5373 fortunately they don't change! */
5374
d6b0e80f 5375 if (restart == 0)
17fbb0bd 5376 restart = __SIGRTMIN;
d6b0e80f 5377
d6b0e80f 5378 if (cancel == 0)
17fbb0bd 5379 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5380
5381 sigaddset (set, restart);
5382 sigaddset (set, cancel);
5383
5384 /* The GNU/Linux Threads library makes terminating threads send a
5385 special "cancel" signal instead of SIGCHLD. Make sure we catch
5386 those (to prevent them from terminating GDB itself, which is
5387 likely to be their default action) and treat them the same way as
5388 SIGCHLD. */
5389
5390 action.sa_handler = sigchld_handler;
5391 sigemptyset (&action.sa_mask);
58aecb61 5392 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5393 sigaction (cancel, &action, NULL);
5394
5395 /* We block the "cancel" signal throughout this code ... */
5396 sigaddset (&blocked_mask, cancel);
5397 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5398
5399 /* ... except during a sigsuspend. */
5400 sigdelset (&suspend_mask, cancel);
5401}
This page took 1.109489 seconds and 4 git commands to generate.