* value.c (value_primitive_field): Don't fetch contents for
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
0b302171 3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
22#include "target.h"
d6b0e80f 23#include "gdb_string.h"
3993f6b1 24#include "gdb_wait.h"
d6b0e80f
AC
25#include "gdb_assert.h"
26#ifdef HAVE_TKILL_SYSCALL
27#include <unistd.h>
28#include <sys/syscall.h>
29#endif
3993f6b1 30#include <sys/ptrace.h>
0274a8ce 31#include "linux-nat.h"
af96c192 32#include "linux-ptrace.h"
13da1c97 33#include "linux-procfs.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
10d6c8cd
DJ
39#include "inf-ptrace.h"
40#include "auxv.h"
dba24537 41#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
1777feb0 47#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
48#include "gdb_stat.h" /* for struct stat */
49#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
50#include "inf-loop.h"
51#include "event-loop.h"
52#include "event-top.h"
07e059b5
VP
53#include <pwd.h>
54#include <sys/types.h>
55#include "gdb_dirent.h"
56#include "xml-support.h"
191c4426 57#include "terminal.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
d26e3629 60#include "linux-osdata.h"
6432734d 61#include "linux-tdep.h"
7dcd53a0 62#include "symfile.h"
5808517f
YQ
63#include "agent.h"
64#include "tracepoint.h"
efcbbd14
UW
65
66#ifndef SPUFS_MAGIC
67#define SPUFS_MAGIC 0x23c9b64e
68#endif
dba24537 69
10568435
JK
70#ifdef HAVE_PERSONALITY
71# include <sys/personality.h>
72# if !HAVE_DECL_ADDR_NO_RANDOMIZE
73# define ADDR_NO_RANDOMIZE 0x0040000
74# endif
75#endif /* HAVE_PERSONALITY */
76
1777feb0 77/* This comment documents high-level logic of this file.
8a77dff3
VP
78
79Waiting for events in sync mode
80===============================
81
82When waiting for an event in a specific thread, we just use waitpid, passing
83the specific pid, and not passing WNOHANG.
84
1777feb0 85When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 86version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 87threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
88miss an event. The solution is to use non-blocking waitpid, together with
89sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 90process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
91flag to check for events in cloned processes. If nothing is found, we use
92sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
93happened to a child process -- and SIGCHLD will be delivered both for events
94in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
95an event, we get back to calling nonblocking waitpid with and without
96__WCLONED.
8a77dff3
VP
97
98Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 99so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
100blocked, the signal becomes pending and sigsuspend immediately
101notices it and returns.
102
103Waiting for events in async mode
104================================
105
7feb7d06
PA
106In async mode, GDB should always be ready to handle both user input
107and target events, so neither blocking waitpid nor sigsuspend are
108viable options. Instead, we should asynchronously notify the GDB main
109event loop whenever there's an unprocessed event from the target. We
110detect asynchronous target events by handling SIGCHLD signals. To
111notify the event loop about target events, the self-pipe trick is used
112--- a pipe is registered as waitable event source in the event loop,
113the event loop select/poll's on the read end of this pipe (as well on
114other event sources, e.g., stdin), and the SIGCHLD handler writes a
115byte to this pipe. This is more portable than relying on
116pselect/ppoll, since on kernels that lack those syscalls, libc
117emulates them with select/poll+sigprocmask, and that is racy
118(a.k.a. plain broken).
119
120Obviously, if we fail to notify the event loop if there's a target
121event, it's bad. OTOH, if we notify the event loop when there's no
122event from the target, linux_nat_wait will detect that there's no real
123event to report, and return event of type TARGET_WAITKIND_IGNORE.
124This is mostly harmless, but it will waste time and is better avoided.
125
126The main design point is that every time GDB is outside linux-nat.c,
127we have a SIGCHLD handler installed that is called when something
128happens to the target and notifies the GDB event loop. Whenever GDB
129core decides to handle the event, and calls into linux-nat.c, we
130process things as in sync mode, except that the we never block in
131sigsuspend.
132
133While processing an event, we may end up momentarily blocked in
134waitpid calls. Those waitpid calls, while blocking, are guarantied to
135return quickly. E.g., in all-stop mode, before reporting to the core
136that an LWP hit a breakpoint, all LWPs are stopped by sending them
137SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
138Note that this is different from blocking indefinitely waiting for the
139next event --- here, we're already handling an event.
8a77dff3
VP
140
141Use of signals
142==============
143
144We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
145signal is not entirely significant; we just need for a signal to be delivered,
146so that we can intercept it. SIGSTOP's advantage is that it can not be
147blocked. A disadvantage is that it is not a real-time signal, so it can only
148be queued once; we do not keep track of other sources of SIGSTOP.
149
150Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
151use them, because they have special behavior when the signal is generated -
152not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
153kills the entire thread group.
154
155A delivered SIGSTOP would stop the entire thread group, not just the thread we
156tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
157cancel it (by PTRACE_CONT without passing SIGSTOP).
158
159We could use a real-time signal instead. This would solve those problems; we
160could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
161But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
162generates it, and there are races with trying to find a signal that is not
163blocked. */
a0ef4274 164
dba24537
AC
165#ifndef O_LARGEFILE
166#define O_LARGEFILE 0
167#endif
0274a8ce 168
ca2163eb
PA
169/* Unlike other extended result codes, WSTOPSIG (status) on
170 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
171 instead SIGTRAP with bit 7 set. */
172#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
173
10d6c8cd
DJ
174/* The single-threaded native GNU/Linux target_ops. We save a pointer for
175 the use of the multi-threaded target. */
176static struct target_ops *linux_ops;
f973ed9c 177static struct target_ops linux_ops_saved;
10d6c8cd 178
9f0bdab8 179/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
180static void (*linux_nat_new_thread) (struct lwp_info *);
181
182/* Hook to call prior to resuming a thread. */
183static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 184
5b009018
PA
185/* The method to call, if any, when the siginfo object needs to be
186 converted between the layout returned by ptrace, and the layout in
187 the architecture of the inferior. */
188static int (*linux_nat_siginfo_fixup) (struct siginfo *,
189 gdb_byte *,
190 int);
191
ac264b3b
MS
192/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
193 Called by our to_xfer_partial. */
194static LONGEST (*super_xfer_partial) (struct target_ops *,
195 enum target_object,
196 const char *, gdb_byte *,
197 const gdb_byte *,
10d6c8cd
DJ
198 ULONGEST, LONGEST);
199
d6b0e80f 200static int debug_linux_nat;
920d2a44
AC
201static void
202show_debug_linux_nat (struct ui_file *file, int from_tty,
203 struct cmd_list_element *c, const char *value)
204{
205 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
206 value);
207}
d6b0e80f 208
ae087d01
DJ
209struct simple_pid_list
210{
211 int pid;
3d799a95 212 int status;
ae087d01
DJ
213 struct simple_pid_list *next;
214};
215struct simple_pid_list *stopped_pids;
216
3993f6b1
DJ
217/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
218 can not be used, 1 if it can. */
219
220static int linux_supports_tracefork_flag = -1;
221
3e43a32a
MS
222/* This variable is a tri-state flag: -1 for unknown, 0 if
223 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
224
225static int linux_supports_tracesysgood_flag = -1;
226
9016a515
DJ
227/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
228 PTRACE_O_TRACEVFORKDONE. */
229
230static int linux_supports_tracevforkdone_flag = -1;
231
a96d9b2e
SDJ
232/* Stores the current used ptrace() options. */
233static int current_ptrace_options = 0;
234
3dd5b83d
PA
235/* Async mode support. */
236
b84876c2
PA
237/* The read/write ends of the pipe registered as waitable file in the
238 event loop. */
239static int linux_nat_event_pipe[2] = { -1, -1 };
240
7feb7d06 241/* Flush the event pipe. */
b84876c2 242
7feb7d06
PA
243static void
244async_file_flush (void)
b84876c2 245{
7feb7d06
PA
246 int ret;
247 char buf;
b84876c2 248
7feb7d06 249 do
b84876c2 250 {
7feb7d06 251 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 252 }
7feb7d06 253 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
254}
255
7feb7d06
PA
256/* Put something (anything, doesn't matter what, or how much) in event
257 pipe, so that the select/poll in the event-loop realizes we have
258 something to process. */
252fbfc8 259
b84876c2 260static void
7feb7d06 261async_file_mark (void)
b84876c2 262{
7feb7d06 263 int ret;
b84876c2 264
7feb7d06
PA
265 /* It doesn't really matter what the pipe contains, as long we end
266 up with something in it. Might as well flush the previous
267 left-overs. */
268 async_file_flush ();
b84876c2 269
7feb7d06 270 do
b84876c2 271 {
7feb7d06 272 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 273 }
7feb7d06 274 while (ret == -1 && errno == EINTR);
b84876c2 275
7feb7d06
PA
276 /* Ignore EAGAIN. If the pipe is full, the event loop will already
277 be awakened anyway. */
b84876c2
PA
278}
279
7feb7d06 280static void linux_nat_async (void (*callback)
3e43a32a
MS
281 (enum inferior_event_type event_type,
282 void *context),
7feb7d06 283 void *context);
7feb7d06
PA
284static int kill_lwp (int lwpid, int signo);
285
286static int stop_callback (struct lwp_info *lp, void *data);
287
288static void block_child_signals (sigset_t *prev_mask);
289static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
290
291struct lwp_info;
292static struct lwp_info *add_lwp (ptid_t ptid);
293static void purge_lwp_list (int pid);
4403d8e9 294static void delete_lwp (ptid_t ptid);
2277426b
PA
295static struct lwp_info *find_lwp_pid (ptid_t ptid);
296
ae087d01
DJ
297\f
298/* Trivial list manipulation functions to keep track of a list of
299 new stopped processes. */
300static void
3d799a95 301add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
302{
303 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 304
ae087d01 305 new_pid->pid = pid;
3d799a95 306 new_pid->status = status;
ae087d01
DJ
307 new_pid->next = *listp;
308 *listp = new_pid;
309}
310
84636d28
PA
311static int
312in_pid_list_p (struct simple_pid_list *list, int pid)
313{
314 struct simple_pid_list *p;
315
316 for (p = list; p != NULL; p = p->next)
317 if (p->pid == pid)
318 return 1;
319 return 0;
320}
321
ae087d01 322static int
46a96992 323pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
324{
325 struct simple_pid_list **p;
326
327 for (p = listp; *p != NULL; p = &(*p)->next)
328 if ((*p)->pid == pid)
329 {
330 struct simple_pid_list *next = (*p)->next;
e0881a8e 331
46a96992 332 *statusp = (*p)->status;
ae087d01
DJ
333 xfree (*p);
334 *p = next;
335 return 1;
336 }
337 return 0;
338}
339
3993f6b1
DJ
340\f
341/* A helper function for linux_test_for_tracefork, called after fork (). */
342
343static void
344linux_tracefork_child (void)
345{
3993f6b1
DJ
346 ptrace (PTRACE_TRACEME, 0, 0, 0);
347 kill (getpid (), SIGSTOP);
348 fork ();
48bb3cce 349 _exit (0);
3993f6b1
DJ
350}
351
7feb7d06 352/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
353
354static int
46a96992 355my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
356{
357 int ret;
b84876c2 358
b957e937
DJ
359 do
360 {
46a96992 361 ret = waitpid (pid, statusp, flags);
b957e937
DJ
362 }
363 while (ret == -1 && errno == EINTR);
364
365 return ret;
366}
367
368/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
369
370 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
371 we know that the feature is not available. This may change the tracing
372 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
373
374 However, if it succeeds, we don't know for sure that the feature is
375 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 376 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
377 fork tracing, and let it fork. If the process exits, we assume that we
378 can't use TRACEFORK; if we get the fork notification, and we can extract
379 the new child's PID, then we assume that we can. */
3993f6b1
DJ
380
381static void
b957e937 382linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
383{
384 int child_pid, ret, status;
385 long second_pid;
7feb7d06 386 sigset_t prev_mask;
4c28f408 387
7feb7d06
PA
388 /* We don't want those ptrace calls to be interrupted. */
389 block_child_signals (&prev_mask);
3993f6b1 390
b957e937
DJ
391 linux_supports_tracefork_flag = 0;
392 linux_supports_tracevforkdone_flag = 0;
393
394 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
395 if (ret != 0)
7feb7d06
PA
396 {
397 restore_child_signals_mask (&prev_mask);
398 return;
399 }
b957e937 400
3993f6b1
DJ
401 child_pid = fork ();
402 if (child_pid == -1)
e2e0b3e5 403 perror_with_name (("fork"));
3993f6b1
DJ
404
405 if (child_pid == 0)
406 linux_tracefork_child ();
407
b957e937 408 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 409 if (ret == -1)
e2e0b3e5 410 perror_with_name (("waitpid"));
3993f6b1 411 else if (ret != child_pid)
8a3fe4f8 412 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 413 if (! WIFSTOPPED (status))
3e43a32a
MS
414 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
415 status);
3993f6b1 416
3993f6b1
DJ
417 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
418 if (ret != 0)
419 {
b957e937
DJ
420 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
421 if (ret != 0)
422 {
8a3fe4f8 423 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 424 restore_child_signals_mask (&prev_mask);
b957e937
DJ
425 return;
426 }
427
428 ret = my_waitpid (child_pid, &status, 0);
429 if (ret != child_pid)
3e43a32a
MS
430 warning (_("linux_test_for_tracefork: failed "
431 "to wait for killed child"));
b957e937 432 else if (!WIFSIGNALED (status))
3e43a32a
MS
433 warning (_("linux_test_for_tracefork: unexpected "
434 "wait status 0x%x from killed child"), status);
b957e937 435
7feb7d06 436 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
437 return;
438 }
439
9016a515
DJ
440 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
441 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
442 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
443 linux_supports_tracevforkdone_flag = (ret == 0);
444
b957e937
DJ
445 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
446 if (ret != 0)
8a3fe4f8 447 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
448
449 ret = my_waitpid (child_pid, &status, 0);
450
3993f6b1
DJ
451 if (ret == child_pid && WIFSTOPPED (status)
452 && status >> 16 == PTRACE_EVENT_FORK)
453 {
454 second_pid = 0;
455 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
456 if (ret == 0 && second_pid != 0)
457 {
458 int second_status;
459
460 linux_supports_tracefork_flag = 1;
b957e937
DJ
461 my_waitpid (second_pid, &second_status, 0);
462 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
463 if (ret != 0)
3e43a32a
MS
464 warning (_("linux_test_for_tracefork: "
465 "failed to kill second child"));
97725dc4 466 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
467 }
468 }
b957e937 469 else
8a3fe4f8
AC
470 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
471 "(%d, status 0x%x)"), ret, status);
3993f6b1 472
b957e937
DJ
473 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
474 if (ret != 0)
8a3fe4f8 475 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 476 my_waitpid (child_pid, &status, 0);
4c28f408 477
7feb7d06 478 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
479}
480
a96d9b2e
SDJ
481/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
482
483 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
484 we know that the feature is not available. This may change the tracing
485 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
486
487static void
488linux_test_for_tracesysgood (int original_pid)
489{
490 int ret;
491 sigset_t prev_mask;
492
493 /* We don't want those ptrace calls to be interrupted. */
494 block_child_signals (&prev_mask);
495
496 linux_supports_tracesysgood_flag = 0;
497
498 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
499 if (ret != 0)
500 goto out;
501
502 linux_supports_tracesysgood_flag = 1;
503out:
504 restore_child_signals_mask (&prev_mask);
505}
506
507/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
508 This function also sets linux_supports_tracesysgood_flag. */
509
510static int
511linux_supports_tracesysgood (int pid)
512{
513 if (linux_supports_tracesysgood_flag == -1)
514 linux_test_for_tracesysgood (pid);
515 return linux_supports_tracesysgood_flag;
516}
517
3993f6b1
DJ
518/* Return non-zero iff we have tracefork functionality available.
519 This function also sets linux_supports_tracefork_flag. */
520
521static int
b957e937 522linux_supports_tracefork (int pid)
3993f6b1
DJ
523{
524 if (linux_supports_tracefork_flag == -1)
b957e937 525 linux_test_for_tracefork (pid);
3993f6b1
DJ
526 return linux_supports_tracefork_flag;
527}
528
9016a515 529static int
b957e937 530linux_supports_tracevforkdone (int pid)
9016a515
DJ
531{
532 if (linux_supports_tracefork_flag == -1)
b957e937 533 linux_test_for_tracefork (pid);
9016a515
DJ
534 return linux_supports_tracevforkdone_flag;
535}
536
a96d9b2e
SDJ
537static void
538linux_enable_tracesysgood (ptid_t ptid)
539{
540 int pid = ptid_get_lwp (ptid);
541
542 if (pid == 0)
543 pid = ptid_get_pid (ptid);
544
545 if (linux_supports_tracesysgood (pid) == 0)
546 return;
547
548 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
549
550 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
551}
552
3993f6b1 553\f
4de4c07c
DJ
554void
555linux_enable_event_reporting (ptid_t ptid)
556{
d3587048 557 int pid = ptid_get_lwp (ptid);
4de4c07c 558
d3587048
DJ
559 if (pid == 0)
560 pid = ptid_get_pid (ptid);
561
b957e937 562 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
563 return;
564
a96d9b2e
SDJ
565 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
566 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
567
b957e937 568 if (linux_supports_tracevforkdone (pid))
a96d9b2e 569 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
570
571 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
572 read-only process state. */
4de4c07c 573
a96d9b2e 574 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
575}
576
6d8fd2b7
UW
577static void
578linux_child_post_attach (int pid)
4de4c07c
DJ
579{
580 linux_enable_event_reporting (pid_to_ptid (pid));
a96d9b2e 581 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
582}
583
10d6c8cd 584static void
4de4c07c
DJ
585linux_child_post_startup_inferior (ptid_t ptid)
586{
587 linux_enable_event_reporting (ptid);
a96d9b2e 588 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
589}
590
4403d8e9
JK
591/* Return the number of known LWPs in the tgid given by PID. */
592
593static int
594num_lwps (int pid)
595{
596 int count = 0;
597 struct lwp_info *lp;
598
599 for (lp = lwp_list; lp; lp = lp->next)
600 if (ptid_get_pid (lp->ptid) == pid)
601 count++;
602
603 return count;
604}
605
606/* Call delete_lwp with prototype compatible for make_cleanup. */
607
608static void
609delete_lwp_cleanup (void *lp_voidp)
610{
611 struct lwp_info *lp = lp_voidp;
612
613 delete_lwp (lp->ptid);
614}
615
6d8fd2b7
UW
616static int
617linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 618{
7feb7d06 619 sigset_t prev_mask;
9016a515 620 int has_vforked;
4de4c07c
DJ
621 int parent_pid, child_pid;
622
7feb7d06 623 block_child_signals (&prev_mask);
b84876c2 624
e58b0e63
PA
625 has_vforked = (inferior_thread ()->pending_follow.kind
626 == TARGET_WAITKIND_VFORKED);
627 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 628 if (parent_pid == 0)
e58b0e63
PA
629 parent_pid = ptid_get_pid (inferior_ptid);
630 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 631
2277426b
PA
632 if (!detach_fork)
633 linux_enable_event_reporting (pid_to_ptid (child_pid));
634
6c95b8df
PA
635 if (has_vforked
636 && !non_stop /* Non-stop always resumes both branches. */
637 && (!target_is_async_p () || sync_execution)
638 && !(follow_child || detach_fork || sched_multi))
639 {
640 /* The parent stays blocked inside the vfork syscall until the
641 child execs or exits. If we don't let the child run, then
642 the parent stays blocked. If we're telling the parent to run
643 in the foreground, the user will not be able to ctrl-c to get
644 back the terminal, effectively hanging the debug session. */
ac74f770
MS
645 fprintf_filtered (gdb_stderr, _("\
646Can not resume the parent process over vfork in the foreground while\n\
647holding the child stopped. Try \"set detach-on-fork\" or \
648\"set schedule-multiple\".\n"));
649 /* FIXME output string > 80 columns. */
6c95b8df
PA
650 return 1;
651 }
652
4de4c07c
DJ
653 if (! follow_child)
654 {
6c95b8df 655 struct lwp_info *child_lp = NULL;
4de4c07c 656
1777feb0 657 /* We're already attached to the parent, by default. */
4de4c07c 658
ac264b3b
MS
659 /* Detach new forked process? */
660 if (detach_fork)
f75c00e4 661 {
4403d8e9
JK
662 struct cleanup *old_chain;
663
6c95b8df
PA
664 /* Before detaching from the child, remove all breakpoints
665 from it. If we forked, then this has already been taken
666 care of by infrun.c. If we vforked however, any
667 breakpoint inserted in the parent is visible in the
668 child, even those added while stopped in a vfork
669 catchpoint. This will remove the breakpoints from the
670 parent also, but they'll be reinserted below. */
671 if (has_vforked)
672 {
673 /* keep breakpoints list in sync. */
674 remove_breakpoints_pid (GET_PID (inferior_ptid));
675 }
676
e85a822c 677 if (info_verbose || debug_linux_nat)
ac264b3b
MS
678 {
679 target_terminal_ours ();
680 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
681 "Detaching after fork from "
682 "child process %d.\n",
ac264b3b
MS
683 child_pid);
684 }
4de4c07c 685
4403d8e9
JK
686 old_chain = save_inferior_ptid ();
687 inferior_ptid = ptid_build (child_pid, child_pid, 0);
688
689 child_lp = add_lwp (inferior_ptid);
690 child_lp->stopped = 1;
691 child_lp->last_resume_kind = resume_stop;
692 make_cleanup (delete_lwp_cleanup, child_lp);
693
694 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
695 See i386_inferior_data_get for the Linux kernel specifics.
696 Ensure linux_nat_prepare_to_resume will reset the hardware debug
697 registers. It is done by the linux_nat_new_thread call, which is
698 being skipped in add_lwp above for the first lwp of a pid. */
699 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
700 if (linux_nat_new_thread != NULL)
701 linux_nat_new_thread (child_lp);
702
703 if (linux_nat_prepare_to_resume != NULL)
704 linux_nat_prepare_to_resume (child_lp);
ac264b3b 705 ptrace (PTRACE_DETACH, child_pid, 0, 0);
4403d8e9
JK
706
707 do_cleanups (old_chain);
ac264b3b
MS
708 }
709 else
710 {
77435e4c 711 struct inferior *parent_inf, *child_inf;
2277426b 712 struct cleanup *old_chain;
7f9f62ba
PA
713
714 /* Add process to GDB's tables. */
77435e4c
PA
715 child_inf = add_inferior (child_pid);
716
e58b0e63 717 parent_inf = current_inferior ();
77435e4c 718 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 719 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 720
2277426b 721 old_chain = save_inferior_ptid ();
6c95b8df 722 save_current_program_space ();
2277426b
PA
723
724 inferior_ptid = ptid_build (child_pid, child_pid, 0);
725 add_thread (inferior_ptid);
6c95b8df
PA
726 child_lp = add_lwp (inferior_ptid);
727 child_lp->stopped = 1;
25289eb2 728 child_lp->last_resume_kind = resume_stop;
7dcd53a0 729 child_inf->symfile_flags = SYMFILE_NO_READ;
2277426b 730
6c95b8df
PA
731 /* If this is a vfork child, then the address-space is
732 shared with the parent. */
733 if (has_vforked)
734 {
735 child_inf->pspace = parent_inf->pspace;
736 child_inf->aspace = parent_inf->aspace;
737
738 /* The parent will be frozen until the child is done
739 with the shared region. Keep track of the
740 parent. */
741 child_inf->vfork_parent = parent_inf;
742 child_inf->pending_detach = 0;
743 parent_inf->vfork_child = child_inf;
744 parent_inf->pending_detach = 0;
745 }
746 else
747 {
748 child_inf->aspace = new_address_space ();
749 child_inf->pspace = add_program_space (child_inf->aspace);
750 child_inf->removable = 1;
751 set_current_program_space (child_inf->pspace);
752 clone_program_space (child_inf->pspace, parent_inf->pspace);
753
754 /* Let the shared library layer (solib-svr4) learn about
755 this new process, relocate the cloned exec, pull in
756 shared libraries, and install the solib event
757 breakpoint. If a "cloned-VM" event was propagated
758 better throughout the core, this wouldn't be
759 required. */
268a4a75 760 solib_create_inferior_hook (0);
6c95b8df
PA
761 }
762
763 /* Let the thread_db layer learn about this new process. */
2277426b
PA
764 check_for_thread_db ();
765
766 do_cleanups (old_chain);
ac264b3b 767 }
9016a515
DJ
768
769 if (has_vforked)
770 {
3ced3da4 771 struct lwp_info *parent_lp;
6c95b8df
PA
772 struct inferior *parent_inf;
773
774 parent_inf = current_inferior ();
775
776 /* If we detached from the child, then we have to be careful
777 to not insert breakpoints in the parent until the child
778 is done with the shared memory region. However, if we're
779 staying attached to the child, then we can and should
780 insert breakpoints, so that we can debug it. A
781 subsequent child exec or exit is enough to know when does
782 the child stops using the parent's address space. */
783 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 784 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df 785
3ced3da4 786 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937 787 gdb_assert (linux_supports_tracefork_flag >= 0);
3ced3da4 788
b957e937 789 if (linux_supports_tracevforkdone (0))
9016a515 790 {
6c95b8df
PA
791 if (debug_linux_nat)
792 fprintf_unfiltered (gdb_stdlog,
793 "LCFF: waiting for VFORK_DONE on %d\n",
794 parent_pid);
3ced3da4 795 parent_lp->stopped = 1;
9016a515 796
6c95b8df
PA
797 /* We'll handle the VFORK_DONE event like any other
798 event, in target_wait. */
9016a515
DJ
799 }
800 else
801 {
802 /* We can't insert breakpoints until the child has
803 finished with the shared memory region. We need to
804 wait until that happens. Ideal would be to just
805 call:
806 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
807 - waitpid (parent_pid, &status, __WALL);
808 However, most architectures can't handle a syscall
809 being traced on the way out if it wasn't traced on
810 the way in.
811
812 We might also think to loop, continuing the child
813 until it exits or gets a SIGTRAP. One problem is
814 that the child might call ptrace with PTRACE_TRACEME.
815
816 There's no simple and reliable way to figure out when
817 the vforked child will be done with its copy of the
818 shared memory. We could step it out of the syscall,
819 two instructions, let it go, and then single-step the
820 parent once. When we have hardware single-step, this
821 would work; with software single-step it could still
822 be made to work but we'd have to be able to insert
823 single-step breakpoints in the child, and we'd have
824 to insert -just- the single-step breakpoint in the
825 parent. Very awkward.
826
827 In the end, the best we can do is to make sure it
828 runs for a little while. Hopefully it will be out of
829 range of any breakpoints we reinsert. Usually this
830 is only the single-step breakpoint at vfork's return
831 point. */
832
6c95b8df
PA
833 if (debug_linux_nat)
834 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
835 "LCFF: no VFORK_DONE "
836 "support, sleeping a bit\n");
6c95b8df 837
9016a515 838 usleep (10000);
9016a515 839
6c95b8df
PA
840 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
841 and leave it pending. The next linux_nat_resume call
842 will notice a pending event, and bypasses actually
843 resuming the inferior. */
3ced3da4
PA
844 parent_lp->status = 0;
845 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
846 parent_lp->stopped = 1;
6c95b8df
PA
847
848 /* If we're in async mode, need to tell the event loop
849 there's something here to process. */
850 if (target_can_async_p ())
851 async_file_mark ();
852 }
9016a515 853 }
4de4c07c 854 }
3993f6b1 855 else
4de4c07c 856 {
77435e4c 857 struct inferior *parent_inf, *child_inf;
3ced3da4 858 struct lwp_info *child_lp;
6c95b8df 859 struct program_space *parent_pspace;
4de4c07c 860
e85a822c 861 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
862 {
863 target_terminal_ours ();
6c95b8df 864 if (has_vforked)
3e43a32a
MS
865 fprintf_filtered (gdb_stdlog,
866 _("Attaching after process %d "
867 "vfork to child process %d.\n"),
6c95b8df
PA
868 parent_pid, child_pid);
869 else
3e43a32a
MS
870 fprintf_filtered (gdb_stdlog,
871 _("Attaching after process %d "
872 "fork to child process %d.\n"),
6c95b8df 873 parent_pid, child_pid);
f75c00e4 874 }
4de4c07c 875
7a7d3353
PA
876 /* Add the new inferior first, so that the target_detach below
877 doesn't unpush the target. */
878
77435e4c
PA
879 child_inf = add_inferior (child_pid);
880
e58b0e63 881 parent_inf = current_inferior ();
77435e4c 882 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 883 copy_terminal_info (child_inf, parent_inf);
7a7d3353 884
6c95b8df 885 parent_pspace = parent_inf->pspace;
9016a515 886
6c95b8df
PA
887 /* If we're vforking, we want to hold on to the parent until the
888 child exits or execs. At child exec or exit time we can
889 remove the old breakpoints from the parent and detach or
890 resume debugging it. Otherwise, detach the parent now; we'll
891 want to reuse it's program/address spaces, but we can't set
892 them to the child before removing breakpoints from the
893 parent, otherwise, the breakpoints module could decide to
894 remove breakpoints from the wrong process (since they'd be
895 assigned to the same address space). */
9016a515
DJ
896
897 if (has_vforked)
7f9f62ba 898 {
6c95b8df
PA
899 gdb_assert (child_inf->vfork_parent == NULL);
900 gdb_assert (parent_inf->vfork_child == NULL);
901 child_inf->vfork_parent = parent_inf;
902 child_inf->pending_detach = 0;
903 parent_inf->vfork_child = child_inf;
904 parent_inf->pending_detach = detach_fork;
905 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 906 }
2277426b 907 else if (detach_fork)
b84876c2 908 target_detach (NULL, 0);
4de4c07c 909
6c95b8df
PA
910 /* Note that the detach above makes PARENT_INF dangling. */
911
912 /* Add the child thread to the appropriate lists, and switch to
913 this new thread, before cloning the program space, and
914 informing the solib layer about this new process. */
915
9f0bdab8 916 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b 917 add_thread (inferior_ptid);
3ced3da4
PA
918 child_lp = add_lwp (inferior_ptid);
919 child_lp->stopped = 1;
25289eb2 920 child_lp->last_resume_kind = resume_stop;
6c95b8df
PA
921
922 /* If this is a vfork child, then the address-space is shared
923 with the parent. If we detached from the parent, then we can
924 reuse the parent's program/address spaces. */
925 if (has_vforked || detach_fork)
926 {
927 child_inf->pspace = parent_pspace;
928 child_inf->aspace = child_inf->pspace->aspace;
929 }
930 else
931 {
932 child_inf->aspace = new_address_space ();
933 child_inf->pspace = add_program_space (child_inf->aspace);
934 child_inf->removable = 1;
7dcd53a0 935 child_inf->symfile_flags = SYMFILE_NO_READ;
6c95b8df
PA
936 set_current_program_space (child_inf->pspace);
937 clone_program_space (child_inf->pspace, parent_pspace);
938
939 /* Let the shared library layer (solib-svr4) learn about
940 this new process, relocate the cloned exec, pull in
941 shared libraries, and install the solib event breakpoint.
942 If a "cloned-VM" event was propagated better throughout
943 the core, this wouldn't be required. */
268a4a75 944 solib_create_inferior_hook (0);
6c95b8df 945 }
ac264b3b 946
6c95b8df 947 /* Let the thread_db layer learn about this new process. */
ef29ce1a 948 check_for_thread_db ();
4de4c07c
DJ
949 }
950
7feb7d06 951 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
952 return 0;
953}
954
4de4c07c 955\f
77b06cd7 956static int
6d8fd2b7 957linux_child_insert_fork_catchpoint (int pid)
4de4c07c 958{
77b06cd7 959 return !linux_supports_tracefork (pid);
3993f6b1
DJ
960}
961
eb73ad13
PA
962static int
963linux_child_remove_fork_catchpoint (int pid)
964{
965 return 0;
966}
967
77b06cd7 968static int
6d8fd2b7 969linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 970{
77b06cd7 971 return !linux_supports_tracefork (pid);
3993f6b1
DJ
972}
973
eb73ad13
PA
974static int
975linux_child_remove_vfork_catchpoint (int pid)
976{
977 return 0;
978}
979
77b06cd7 980static int
6d8fd2b7 981linux_child_insert_exec_catchpoint (int pid)
3993f6b1 982{
77b06cd7 983 return !linux_supports_tracefork (pid);
3993f6b1
DJ
984}
985
eb73ad13
PA
986static int
987linux_child_remove_exec_catchpoint (int pid)
988{
989 return 0;
990}
991
a96d9b2e
SDJ
992static int
993linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
994 int table_size, int *table)
995{
77b06cd7
TJB
996 if (!linux_supports_tracesysgood (pid))
997 return 1;
998
a96d9b2e
SDJ
999 /* On GNU/Linux, we ignore the arguments. It means that we only
1000 enable the syscall catchpoints, but do not disable them.
77b06cd7 1001
a96d9b2e
SDJ
1002 Also, we do not use the `table' information because we do not
1003 filter system calls here. We let GDB do the logic for us. */
1004 return 0;
1005}
1006
d6b0e80f
AC
1007/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1008 are processes sharing the same VM space. A multi-threaded process
1009 is basically a group of such processes. However, such a grouping
1010 is almost entirely a user-space issue; the kernel doesn't enforce
1011 such a grouping at all (this might change in the future). In
1012 general, we'll rely on the threads library (i.e. the GNU/Linux
1013 Threads library) to provide such a grouping.
1014
1015 It is perfectly well possible to write a multi-threaded application
1016 without the assistance of a threads library, by using the clone
1017 system call directly. This module should be able to give some
1018 rudimentary support for debugging such applications if developers
1019 specify the CLONE_PTRACE flag in the clone system call, and are
1020 using the Linux kernel 2.4 or above.
1021
1022 Note that there are some peculiarities in GNU/Linux that affect
1023 this code:
1024
1025 - In general one should specify the __WCLONE flag to waitpid in
1026 order to make it report events for any of the cloned processes
1027 (and leave it out for the initial process). However, if a cloned
1028 process has exited the exit status is only reported if the
1029 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1030 we cannot use it since GDB must work on older systems too.
1031
1032 - When a traced, cloned process exits and is waited for by the
1033 debugger, the kernel reassigns it to the original parent and
1034 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1035 library doesn't notice this, which leads to the "zombie problem":
1036 When debugged a multi-threaded process that spawns a lot of
1037 threads will run out of processes, even if the threads exit,
1038 because the "zombies" stay around. */
1039
1040/* List of known LWPs. */
9f0bdab8 1041struct lwp_info *lwp_list;
d6b0e80f
AC
1042\f
1043
d6b0e80f
AC
1044/* Original signal mask. */
1045static sigset_t normal_mask;
1046
1047/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1048 _initialize_linux_nat. */
1049static sigset_t suspend_mask;
1050
7feb7d06
PA
1051/* Signals to block to make that sigsuspend work. */
1052static sigset_t blocked_mask;
1053
1054/* SIGCHLD action. */
1055struct sigaction sigchld_action;
b84876c2 1056
7feb7d06
PA
1057/* Block child signals (SIGCHLD and linux threads signals), and store
1058 the previous mask in PREV_MASK. */
84e46146 1059
7feb7d06
PA
1060static void
1061block_child_signals (sigset_t *prev_mask)
1062{
1063 /* Make sure SIGCHLD is blocked. */
1064 if (!sigismember (&blocked_mask, SIGCHLD))
1065 sigaddset (&blocked_mask, SIGCHLD);
1066
1067 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1068}
1069
1070/* Restore child signals mask, previously returned by
1071 block_child_signals. */
1072
1073static void
1074restore_child_signals_mask (sigset_t *prev_mask)
1075{
1076 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1077}
2455069d
UW
1078
1079/* Mask of signals to pass directly to the inferior. */
1080static sigset_t pass_mask;
1081
1082/* Update signals to pass to the inferior. */
1083static void
1084linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1085{
1086 int signo;
1087
1088 sigemptyset (&pass_mask);
1089
1090 for (signo = 1; signo < NSIG; signo++)
1091 {
1092 int target_signo = target_signal_from_host (signo);
1093 if (target_signo < numsigs && pass_signals[target_signo])
1094 sigaddset (&pass_mask, signo);
1095 }
1096}
1097
d6b0e80f
AC
1098\f
1099
1100/* Prototypes for local functions. */
1101static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1102static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1103static char *linux_child_pid_to_exec_file (int pid);
710151dd 1104
d6b0e80f
AC
1105\f
1106/* Convert wait status STATUS to a string. Used for printing debug
1107 messages only. */
1108
1109static char *
1110status_to_str (int status)
1111{
1112 static char buf[64];
1113
1114 if (WIFSTOPPED (status))
206aa767 1115 {
ca2163eb 1116 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1117 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1118 strsignal (SIGTRAP));
1119 else
1120 snprintf (buf, sizeof (buf), "%s (stopped)",
1121 strsignal (WSTOPSIG (status)));
1122 }
d6b0e80f
AC
1123 else if (WIFSIGNALED (status))
1124 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1125 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1126 else
1127 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1128
1129 return buf;
1130}
1131
7b50312a
PA
1132/* Destroy and free LP. */
1133
1134static void
1135lwp_free (struct lwp_info *lp)
1136{
1137 xfree (lp->arch_private);
1138 xfree (lp);
1139}
1140
d90e17a7
PA
1141/* Remove all LWPs belong to PID from the lwp list. */
1142
1143static void
1144purge_lwp_list (int pid)
1145{
1146 struct lwp_info *lp, *lpprev, *lpnext;
1147
1148 lpprev = NULL;
1149
1150 for (lp = lwp_list; lp; lp = lpnext)
1151 {
1152 lpnext = lp->next;
1153
1154 if (ptid_get_pid (lp->ptid) == pid)
1155 {
1156 if (lp == lwp_list)
1157 lwp_list = lp->next;
1158 else
1159 lpprev->next = lp->next;
1160
7b50312a 1161 lwp_free (lp);
d90e17a7
PA
1162 }
1163 else
1164 lpprev = lp;
1165 }
1166}
1167
f973ed9c 1168/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1169 structure describing the new LWP. The LWP should already be stopped
1170 (with an exception for the very first LWP). */
d6b0e80f
AC
1171
1172static struct lwp_info *
1173add_lwp (ptid_t ptid)
1174{
1175 struct lwp_info *lp;
1176
1177 gdb_assert (is_lwp (ptid));
1178
1179 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1180
1181 memset (lp, 0, sizeof (struct lwp_info));
1182
25289eb2 1183 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1184 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1185
1186 lp->ptid = ptid;
dc146f7c 1187 lp->core = -1;
d6b0e80f
AC
1188
1189 lp->next = lwp_list;
1190 lwp_list = lp;
d6b0e80f 1191
6e012a6c
PA
1192 /* Let the arch specific bits know about this new thread. Current
1193 clients of this callback take the opportunity to install
1194 watchpoints in the new thread. Don't do this for the first
1195 thread though. If we're spawning a child ("run"), the thread
1196 executes the shell wrapper first, and we shouldn't touch it until
1197 it execs the program we want to debug. For "attach", it'd be
1198 okay to call the callback, but it's not necessary, because
1199 watchpoints can't yet have been inserted into the inferior. */
1200 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
7b50312a 1201 linux_nat_new_thread (lp);
9f0bdab8 1202
d6b0e80f
AC
1203 return lp;
1204}
1205
1206/* Remove the LWP specified by PID from the list. */
1207
1208static void
1209delete_lwp (ptid_t ptid)
1210{
1211 struct lwp_info *lp, *lpprev;
1212
1213 lpprev = NULL;
1214
1215 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1216 if (ptid_equal (lp->ptid, ptid))
1217 break;
1218
1219 if (!lp)
1220 return;
1221
d6b0e80f
AC
1222 if (lpprev)
1223 lpprev->next = lp->next;
1224 else
1225 lwp_list = lp->next;
1226
7b50312a 1227 lwp_free (lp);
d6b0e80f
AC
1228}
1229
1230/* Return a pointer to the structure describing the LWP corresponding
1231 to PID. If no corresponding LWP could be found, return NULL. */
1232
1233static struct lwp_info *
1234find_lwp_pid (ptid_t ptid)
1235{
1236 struct lwp_info *lp;
1237 int lwp;
1238
1239 if (is_lwp (ptid))
1240 lwp = GET_LWP (ptid);
1241 else
1242 lwp = GET_PID (ptid);
1243
1244 for (lp = lwp_list; lp; lp = lp->next)
1245 if (lwp == GET_LWP (lp->ptid))
1246 return lp;
1247
1248 return NULL;
1249}
1250
1251/* Call CALLBACK with its second argument set to DATA for every LWP in
1252 the list. If CALLBACK returns 1 for a particular LWP, return a
1253 pointer to the structure describing that LWP immediately.
1254 Otherwise return NULL. */
1255
1256struct lwp_info *
d90e17a7
PA
1257iterate_over_lwps (ptid_t filter,
1258 int (*callback) (struct lwp_info *, void *),
1259 void *data)
d6b0e80f
AC
1260{
1261 struct lwp_info *lp, *lpnext;
1262
1263 for (lp = lwp_list; lp; lp = lpnext)
1264 {
1265 lpnext = lp->next;
d90e17a7
PA
1266
1267 if (ptid_match (lp->ptid, filter))
1268 {
1269 if ((*callback) (lp, data))
1270 return lp;
1271 }
d6b0e80f
AC
1272 }
1273
1274 return NULL;
1275}
1276
4403d8e9
JK
1277/* Iterate like iterate_over_lwps does except when forking-off a child call
1278 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1279
1280void
1281linux_nat_iterate_watchpoint_lwps
1282 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1283{
1284 int inferior_pid = ptid_get_pid (inferior_ptid);
1285 struct inferior *inf = current_inferior ();
1286
1287 if (inf->pid == inferior_pid)
1288 {
1289 /* Iterate all the threads of the current inferior. Without specifying
1290 INFERIOR_PID it would iterate all threads of all inferiors, which is
1291 inappropriate for watchpoints. */
1292
1293 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1294 }
1295 else
1296 {
1297 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1298
1299 struct lwp_info *child_lp;
1300 struct cleanup *old_chain;
1301 pid_t child_pid = GET_PID (inferior_ptid);
1302 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1303
1304 gdb_assert (!is_lwp (inferior_ptid));
1305 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1306 child_lp = add_lwp (child_ptid);
1307 child_lp->stopped = 1;
1308 child_lp->last_resume_kind = resume_stop;
1309 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1310
1311 callback (child_lp, callback_data);
1312
1313 do_cleanups (old_chain);
1314 }
1315}
1316
2277426b
PA
1317/* Update our internal state when changing from one checkpoint to
1318 another indicated by NEW_PTID. We can only switch single-threaded
1319 applications, so we only create one new LWP, and the previous list
1320 is discarded. */
f973ed9c
DJ
1321
1322void
1323linux_nat_switch_fork (ptid_t new_ptid)
1324{
1325 struct lwp_info *lp;
1326
2277426b
PA
1327 purge_lwp_list (GET_PID (inferior_ptid));
1328
f973ed9c
DJ
1329 lp = add_lwp (new_ptid);
1330 lp->stopped = 1;
e26af52f 1331
2277426b
PA
1332 /* This changes the thread's ptid while preserving the gdb thread
1333 num. Also changes the inferior pid, while preserving the
1334 inferior num. */
1335 thread_change_ptid (inferior_ptid, new_ptid);
1336
1337 /* We've just told GDB core that the thread changed target id, but,
1338 in fact, it really is a different thread, with different register
1339 contents. */
1340 registers_changed ();
e26af52f
DJ
1341}
1342
e26af52f
DJ
1343/* Handle the exit of a single thread LP. */
1344
1345static void
1346exit_lwp (struct lwp_info *lp)
1347{
e09875d4 1348 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1349
1350 if (th)
e26af52f 1351 {
17faa917
DJ
1352 if (print_thread_events)
1353 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1354
4f8d22e3 1355 delete_thread (lp->ptid);
e26af52f
DJ
1356 }
1357
1358 delete_lwp (lp->ptid);
1359}
1360
a0ef4274
DJ
1361/* Wait for the LWP specified by LP, which we have just attached to.
1362 Returns a wait status for that LWP, to cache. */
1363
1364static int
1365linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1366 int *signalled)
1367{
1368 pid_t new_pid, pid = GET_LWP (ptid);
1369 int status;
1370
644cebc9 1371 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
1372 {
1373 if (debug_linux_nat)
1374 fprintf_unfiltered (gdb_stdlog,
1375 "LNPAW: Attaching to a stopped process\n");
1376
1377 /* The process is definitely stopped. It is in a job control
1378 stop, unless the kernel predates the TASK_STOPPED /
1379 TASK_TRACED distinction, in which case it might be in a
1380 ptrace stop. Make sure it is in a ptrace stop; from there we
1381 can kill it, signal it, et cetera.
1382
1383 First make sure there is a pending SIGSTOP. Since we are
1384 already attached, the process can not transition from stopped
1385 to running without a PTRACE_CONT; so we know this signal will
1386 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1387 probably already in the queue (unless this kernel is old
1388 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1389 is not an RT signal, it can only be queued once. */
1390 kill_lwp (pid, SIGSTOP);
1391
1392 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1393 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1394 ptrace (PTRACE_CONT, pid, 0, 0);
1395 }
1396
1397 /* Make sure the initial process is stopped. The user-level threads
1398 layer might want to poke around in the inferior, and that won't
1399 work if things haven't stabilized yet. */
1400 new_pid = my_waitpid (pid, &status, 0);
1401 if (new_pid == -1 && errno == ECHILD)
1402 {
1403 if (first)
1404 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1405
1406 /* Try again with __WCLONE to check cloned processes. */
1407 new_pid = my_waitpid (pid, &status, __WCLONE);
1408 *cloned = 1;
1409 }
1410
dacc9cb2
PP
1411 gdb_assert (pid == new_pid);
1412
1413 if (!WIFSTOPPED (status))
1414 {
1415 /* The pid we tried to attach has apparently just exited. */
1416 if (debug_linux_nat)
1417 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1418 pid, status_to_str (status));
1419 return status;
1420 }
a0ef4274
DJ
1421
1422 if (WSTOPSIG (status) != SIGSTOP)
1423 {
1424 *signalled = 1;
1425 if (debug_linux_nat)
1426 fprintf_unfiltered (gdb_stdlog,
1427 "LNPAW: Received %s after attaching\n",
1428 status_to_str (status));
1429 }
1430
1431 return status;
1432}
1433
84636d28
PA
1434/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1435 the new LWP could not be attached, or 1 if we're already auto
1436 attached to this thread, but haven't processed the
1437 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1438 its existance, without considering it an error. */
d6b0e80f 1439
9ee57c33 1440int
93815fbf 1441lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1442{
9ee57c33 1443 struct lwp_info *lp;
7feb7d06 1444 sigset_t prev_mask;
84636d28 1445 int lwpid;
d6b0e80f
AC
1446
1447 gdb_assert (is_lwp (ptid));
1448
7feb7d06 1449 block_child_signals (&prev_mask);
d6b0e80f 1450
9ee57c33 1451 lp = find_lwp_pid (ptid);
84636d28 1452 lwpid = GET_LWP (ptid);
d6b0e80f
AC
1453
1454 /* We assume that we're already attached to any LWP that has an id
1455 equal to the overall process id, and to any LWP that is already
1456 in our list of LWPs. If we're not seeing exit events from threads
1457 and we've had PID wraparound since we last tried to stop all threads,
1458 this assumption might be wrong; fortunately, this is very unlikely
1459 to happen. */
84636d28 1460 if (lwpid != GET_PID (ptid) && lp == NULL)
d6b0e80f 1461 {
a0ef4274 1462 int status, cloned = 0, signalled = 0;
d6b0e80f 1463
84636d28 1464 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1465 {
84636d28
PA
1466 if (linux_supports_tracefork_flag)
1467 {
1468 /* If we haven't stopped all threads when we get here,
1469 we may have seen a thread listed in thread_db's list,
1470 but not processed the PTRACE_EVENT_CLONE yet. If
1471 that's the case, ignore this new thread, and let
1472 normal event handling discover it later. */
1473 if (in_pid_list_p (stopped_pids, lwpid))
1474 {
1475 /* We've already seen this thread stop, but we
1476 haven't seen the PTRACE_EVENT_CLONE extended
1477 event yet. */
1478 restore_child_signals_mask (&prev_mask);
1479 return 0;
1480 }
1481 else
1482 {
1483 int new_pid;
1484 int status;
1485
1486 /* See if we've got a stop for this new child
1487 pending. If so, we're already attached. */
1488 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1489 if (new_pid == -1 && errno == ECHILD)
1490 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1491 if (new_pid != -1)
1492 {
1493 if (WIFSTOPPED (status))
1494 add_to_pid_list (&stopped_pids, lwpid, status);
1495
1496 restore_child_signals_mask (&prev_mask);
1497 return 1;
1498 }
1499 }
1500 }
1501
9ee57c33
DJ
1502 /* If we fail to attach to the thread, issue a warning,
1503 but continue. One way this can happen is if thread
e9efe249 1504 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1505 bug may place threads in the thread list and then fail
1506 to create them. */
1507 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1508 safe_strerror (errno));
7feb7d06 1509 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1510 return -1;
1511 }
1512
d6b0e80f
AC
1513 if (debug_linux_nat)
1514 fprintf_unfiltered (gdb_stdlog,
1515 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1516 target_pid_to_str (ptid));
1517
a0ef4274 1518 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1519 if (!WIFSTOPPED (status))
673c2bbe
DE
1520 {
1521 restore_child_signals_mask (&prev_mask);
f687d035 1522 return 1;
673c2bbe 1523 }
dacc9cb2 1524
a0ef4274
DJ
1525 lp = add_lwp (ptid);
1526 lp->stopped = 1;
1527 lp->cloned = cloned;
1528 lp->signalled = signalled;
1529 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1530 {
a0ef4274
DJ
1531 lp->resumed = 1;
1532 lp->status = status;
d6b0e80f
AC
1533 }
1534
a0ef4274 1535 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1536
1537 if (debug_linux_nat)
1538 {
1539 fprintf_unfiltered (gdb_stdlog,
1540 "LLAL: waitpid %s received %s\n",
1541 target_pid_to_str (ptid),
1542 status_to_str (status));
1543 }
1544 }
1545 else
1546 {
1547 /* We assume that the LWP representing the original process is
1548 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1549 that the GNU/linux ptrace layer uses to keep track of
1550 threads. Note that this won't have already been done since
1551 the main thread will have, we assume, been stopped by an
1552 attach from a different layer. */
9ee57c33
DJ
1553 if (lp == NULL)
1554 lp = add_lwp (ptid);
d6b0e80f
AC
1555 lp->stopped = 1;
1556 }
9ee57c33 1557
25289eb2 1558 lp->last_resume_kind = resume_stop;
7feb7d06 1559 restore_child_signals_mask (&prev_mask);
9ee57c33 1560 return 0;
d6b0e80f
AC
1561}
1562
b84876c2 1563static void
136d6dae
VP
1564linux_nat_create_inferior (struct target_ops *ops,
1565 char *exec_file, char *allargs, char **env,
b84876c2
PA
1566 int from_tty)
1567{
10568435
JK
1568#ifdef HAVE_PERSONALITY
1569 int personality_orig = 0, personality_set = 0;
1570#endif /* HAVE_PERSONALITY */
b84876c2
PA
1571
1572 /* The fork_child mechanism is synchronous and calls target_wait, so
1573 we have to mask the async mode. */
1574
10568435
JK
1575#ifdef HAVE_PERSONALITY
1576 if (disable_randomization)
1577 {
1578 errno = 0;
1579 personality_orig = personality (0xffffffff);
1580 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1581 {
1582 personality_set = 1;
1583 personality (personality_orig | ADDR_NO_RANDOMIZE);
1584 }
1585 if (errno != 0 || (personality_set
1586 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1587 warning (_("Error disabling address space randomization: %s"),
1588 safe_strerror (errno));
1589 }
1590#endif /* HAVE_PERSONALITY */
1591
2455069d
UW
1592 /* Make sure we report all signals during startup. */
1593 linux_nat_pass_signals (0, NULL);
1594
136d6dae 1595 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1596
10568435
JK
1597#ifdef HAVE_PERSONALITY
1598 if (personality_set)
1599 {
1600 errno = 0;
1601 personality (personality_orig);
1602 if (errno != 0)
1603 warning (_("Error restoring address space randomization: %s"),
1604 safe_strerror (errno));
1605 }
1606#endif /* HAVE_PERSONALITY */
b84876c2
PA
1607}
1608
d6b0e80f 1609static void
136d6dae 1610linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1611{
1612 struct lwp_info *lp;
d6b0e80f 1613 int status;
af990527 1614 ptid_t ptid;
d6b0e80f 1615
2455069d
UW
1616 /* Make sure we report all signals during attach. */
1617 linux_nat_pass_signals (0, NULL);
1618
136d6dae 1619 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1620
af990527
PA
1621 /* The ptrace base target adds the main thread with (pid,0,0)
1622 format. Decorate it with lwp info. */
1623 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1624 thread_change_ptid (inferior_ptid, ptid);
1625
9f0bdab8 1626 /* Add the initial process as the first LWP to the list. */
af990527 1627 lp = add_lwp (ptid);
a0ef4274
DJ
1628
1629 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1630 &lp->signalled);
dacc9cb2
PP
1631 if (!WIFSTOPPED (status))
1632 {
1633 if (WIFEXITED (status))
1634 {
1635 int exit_code = WEXITSTATUS (status);
1636
1637 target_terminal_ours ();
1638 target_mourn_inferior ();
1639 if (exit_code == 0)
1640 error (_("Unable to attach: program exited normally."));
1641 else
1642 error (_("Unable to attach: program exited with code %d."),
1643 exit_code);
1644 }
1645 else if (WIFSIGNALED (status))
1646 {
1647 enum target_signal signo;
1648
1649 target_terminal_ours ();
1650 target_mourn_inferior ();
1651
1652 signo = target_signal_from_host (WTERMSIG (status));
1653 error (_("Unable to attach: program terminated with signal "
1654 "%s, %s."),
1655 target_signal_to_name (signo),
1656 target_signal_to_string (signo));
1657 }
1658
1659 internal_error (__FILE__, __LINE__,
1660 _("unexpected status %d for PID %ld"),
1661 status, (long) GET_LWP (ptid));
1662 }
1663
a0ef4274 1664 lp->stopped = 1;
9f0bdab8 1665
a0ef4274 1666 /* Save the wait status to report later. */
d6b0e80f 1667 lp->resumed = 1;
a0ef4274
DJ
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "LNA: waitpid %ld, saving status %s\n",
1671 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1672
7feb7d06
PA
1673 lp->status = status;
1674
1675 if (target_can_async_p ())
1676 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1677}
1678
a0ef4274
DJ
1679/* Get pending status of LP. */
1680static int
1681get_pending_status (struct lwp_info *lp, int *status)
1682{
ca2163eb
PA
1683 enum target_signal signo = TARGET_SIGNAL_0;
1684
1685 /* If we paused threads momentarily, we may have stored pending
1686 events in lp->status or lp->waitstatus (see stop_wait_callback),
1687 and GDB core hasn't seen any signal for those threads.
1688 Otherwise, the last signal reported to the core is found in the
1689 thread object's stop_signal.
1690
1691 There's a corner case that isn't handled here at present. Only
1692 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1693 stop_signal make sense as a real signal to pass to the inferior.
1694 Some catchpoint related events, like
1695 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1696 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1697 those traps are debug API (ptrace in our case) related and
1698 induced; the inferior wouldn't see them if it wasn't being
1699 traced. Hence, we should never pass them to the inferior, even
1700 when set to pass state. Since this corner case isn't handled by
1701 infrun.c when proceeding with a signal, for consistency, neither
1702 do we handle it here (or elsewhere in the file we check for
1703 signal pass state). Normally SIGTRAP isn't set to pass state, so
1704 this is really a corner case. */
1705
1706 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1707 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1708 else if (lp->status)
1709 signo = target_signal_from_host (WSTOPSIG (lp->status));
1710 else if (non_stop && !is_executing (lp->ptid))
1711 {
1712 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1713
16c381f0 1714 signo = tp->suspend.stop_signal;
ca2163eb
PA
1715 }
1716 else if (!non_stop)
a0ef4274 1717 {
ca2163eb
PA
1718 struct target_waitstatus last;
1719 ptid_t last_ptid;
4c28f408 1720
ca2163eb 1721 get_last_target_status (&last_ptid, &last);
4c28f408 1722
ca2163eb
PA
1723 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1724 {
e09875d4 1725 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1726
16c381f0 1727 signo = tp->suspend.stop_signal;
4c28f408 1728 }
ca2163eb 1729 }
4c28f408 1730
ca2163eb 1731 *status = 0;
4c28f408 1732
ca2163eb
PA
1733 if (signo == TARGET_SIGNAL_0)
1734 {
1735 if (debug_linux_nat)
1736 fprintf_unfiltered (gdb_stdlog,
1737 "GPT: lwp %s has no pending signal\n",
1738 target_pid_to_str (lp->ptid));
1739 }
1740 else if (!signal_pass_state (signo))
1741 {
1742 if (debug_linux_nat)
3e43a32a
MS
1743 fprintf_unfiltered (gdb_stdlog,
1744 "GPT: lwp %s had signal %s, "
1745 "but it is in no pass state\n",
ca2163eb
PA
1746 target_pid_to_str (lp->ptid),
1747 target_signal_to_string (signo));
a0ef4274 1748 }
a0ef4274 1749 else
4c28f408 1750 {
ca2163eb
PA
1751 *status = W_STOPCODE (target_signal_to_host (signo));
1752
1753 if (debug_linux_nat)
1754 fprintf_unfiltered (gdb_stdlog,
1755 "GPT: lwp %s has pending signal %s\n",
1756 target_pid_to_str (lp->ptid),
1757 target_signal_to_string (signo));
4c28f408 1758 }
a0ef4274
DJ
1759
1760 return 0;
1761}
1762
d6b0e80f
AC
1763static int
1764detach_callback (struct lwp_info *lp, void *data)
1765{
1766 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1767
1768 if (debug_linux_nat && lp->status)
1769 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1770 strsignal (WSTOPSIG (lp->status)),
1771 target_pid_to_str (lp->ptid));
1772
a0ef4274
DJ
1773 /* If there is a pending SIGSTOP, get rid of it. */
1774 if (lp->signalled)
d6b0e80f 1775 {
d6b0e80f
AC
1776 if (debug_linux_nat)
1777 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1778 "DC: Sending SIGCONT to %s\n",
1779 target_pid_to_str (lp->ptid));
d6b0e80f 1780
a0ef4274 1781 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1782 lp->signalled = 0;
d6b0e80f
AC
1783 }
1784
1785 /* We don't actually detach from the LWP that has an id equal to the
1786 overall process id just yet. */
1787 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1788 {
a0ef4274
DJ
1789 int status = 0;
1790
1791 /* Pass on any pending signal for this LWP. */
1792 get_pending_status (lp, &status);
1793
7b50312a
PA
1794 if (linux_nat_prepare_to_resume != NULL)
1795 linux_nat_prepare_to_resume (lp);
d6b0e80f
AC
1796 errno = 0;
1797 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1798 WSTOPSIG (status)) < 0)
8a3fe4f8 1799 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1800 safe_strerror (errno));
1801
1802 if (debug_linux_nat)
1803 fprintf_unfiltered (gdb_stdlog,
1804 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1805 target_pid_to_str (lp->ptid),
7feb7d06 1806 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1807
1808 delete_lwp (lp->ptid);
1809 }
1810
1811 return 0;
1812}
1813
1814static void
136d6dae 1815linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1816{
b84876c2 1817 int pid;
a0ef4274 1818 int status;
d90e17a7
PA
1819 struct lwp_info *main_lwp;
1820
1821 pid = GET_PID (inferior_ptid);
a0ef4274 1822
b84876c2
PA
1823 if (target_can_async_p ())
1824 linux_nat_async (NULL, 0);
1825
4c28f408
PA
1826 /* Stop all threads before detaching. ptrace requires that the
1827 thread is stopped to sucessfully detach. */
d90e17a7 1828 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1829 /* ... and wait until all of them have reported back that
1830 they're no longer running. */
d90e17a7 1831 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1832
d90e17a7 1833 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1834
1835 /* Only the initial process should be left right now. */
d90e17a7
PA
1836 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1837
1838 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1839
a0ef4274
DJ
1840 /* Pass on any pending signal for the last LWP. */
1841 if ((args == NULL || *args == '\0')
d90e17a7 1842 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1843 && WIFSTOPPED (status))
1844 {
1845 /* Put the signal number in ARGS so that inf_ptrace_detach will
1846 pass it along with PTRACE_DETACH. */
1847 args = alloca (8);
1848 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1849 if (debug_linux_nat)
1850 fprintf_unfiltered (gdb_stdlog,
1851 "LND: Sending signal %s to %s\n",
1852 args,
1853 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1854 }
1855
7b50312a
PA
1856 if (linux_nat_prepare_to_resume != NULL)
1857 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1858 delete_lwp (main_lwp->ptid);
b84876c2 1859
7a7d3353
PA
1860 if (forks_exist_p ())
1861 {
1862 /* Multi-fork case. The current inferior_ptid is being detached
1863 from, but there are other viable forks to debug. Detach from
1864 the current fork, and context-switch to the first
1865 available. */
1866 linux_fork_detach (args, from_tty);
1867
1868 if (non_stop && target_can_async_p ())
1869 target_async (inferior_event_handler, 0);
1870 }
1871 else
1872 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1873}
1874
1875/* Resume LP. */
1876
25289eb2
PA
1877static void
1878resume_lwp (struct lwp_info *lp, int step)
d6b0e80f 1879{
25289eb2 1880 if (lp->stopped)
6c95b8df 1881 {
25289eb2
PA
1882 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1883
1884 if (inf->vfork_child != NULL)
1885 {
1886 if (debug_linux_nat)
1887 fprintf_unfiltered (gdb_stdlog,
1888 "RC: Not resuming %s (vfork parent)\n",
1889 target_pid_to_str (lp->ptid));
1890 }
1891 else if (lp->status == 0
1892 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1893 {
1894 if (debug_linux_nat)
1895 fprintf_unfiltered (gdb_stdlog,
1896 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1897 target_pid_to_str (lp->ptid));
1898
7b50312a
PA
1899 if (linux_nat_prepare_to_resume != NULL)
1900 linux_nat_prepare_to_resume (lp);
25289eb2
PA
1901 linux_ops->to_resume (linux_ops,
1902 pid_to_ptid (GET_LWP (lp->ptid)),
1903 step, TARGET_SIGNAL_0);
25289eb2
PA
1904 lp->stopped = 0;
1905 lp->step = step;
1906 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1907 lp->stopped_by_watchpoint = 0;
1908 }
1909 else
1910 {
1911 if (debug_linux_nat)
1912 fprintf_unfiltered (gdb_stdlog,
1913 "RC: Not resuming sibling %s (has pending)\n",
1914 target_pid_to_str (lp->ptid));
1915 }
6c95b8df 1916 }
25289eb2 1917 else
d6b0e80f 1918 {
d90e17a7
PA
1919 if (debug_linux_nat)
1920 fprintf_unfiltered (gdb_stdlog,
25289eb2 1921 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1922 target_pid_to_str (lp->ptid));
d6b0e80f 1923 }
25289eb2 1924}
d6b0e80f 1925
25289eb2
PA
1926static int
1927resume_callback (struct lwp_info *lp, void *data)
1928{
1929 resume_lwp (lp, 0);
d6b0e80f
AC
1930 return 0;
1931}
1932
1933static int
1934resume_clear_callback (struct lwp_info *lp, void *data)
1935{
1936 lp->resumed = 0;
25289eb2 1937 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1938 return 0;
1939}
1940
1941static int
1942resume_set_callback (struct lwp_info *lp, void *data)
1943{
1944 lp->resumed = 1;
25289eb2 1945 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1946 return 0;
1947}
1948
1949static void
28439f5e
PA
1950linux_nat_resume (struct target_ops *ops,
1951 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1952{
7feb7d06 1953 sigset_t prev_mask;
d6b0e80f 1954 struct lwp_info *lp;
d90e17a7 1955 int resume_many;
d6b0e80f 1956
76f50ad1
DJ
1957 if (debug_linux_nat)
1958 fprintf_unfiltered (gdb_stdlog,
1959 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1960 step ? "step" : "resume",
1961 target_pid_to_str (ptid),
423ec54c
JK
1962 (signo != TARGET_SIGNAL_0
1963 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1964 target_pid_to_str (inferior_ptid));
1965
7feb7d06 1966 block_child_signals (&prev_mask);
b84876c2 1967
d6b0e80f 1968 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1969 resume_many = (ptid_equal (minus_one_ptid, ptid)
1970 || ptid_is_pid (ptid));
4c28f408 1971
e3e9f5a2
PA
1972 /* Mark the lwps we're resuming as resumed. */
1973 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1974
d90e17a7
PA
1975 /* See if it's the current inferior that should be handled
1976 specially. */
1977 if (resume_many)
1978 lp = find_lwp_pid (inferior_ptid);
1979 else
1980 lp = find_lwp_pid (ptid);
9f0bdab8 1981 gdb_assert (lp != NULL);
d6b0e80f 1982
9f0bdab8
DJ
1983 /* Remember if we're stepping. */
1984 lp->step = step;
25289eb2 1985 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1986
9f0bdab8
DJ
1987 /* If we have a pending wait status for this thread, there is no
1988 point in resuming the process. But first make sure that
1989 linux_nat_wait won't preemptively handle the event - we
1990 should never take this short-circuit if we are going to
1991 leave LP running, since we have skipped resuming all the
1992 other threads. This bit of code needs to be synchronized
1993 with linux_nat_wait. */
76f50ad1 1994
9f0bdab8
DJ
1995 if (lp->status && WIFSTOPPED (lp->status))
1996 {
2455069d
UW
1997 if (!lp->step
1998 && WSTOPSIG (lp->status)
1999 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 2000 {
9f0bdab8
DJ
2001 if (debug_linux_nat)
2002 fprintf_unfiltered (gdb_stdlog,
2003 "LLR: Not short circuiting for ignored "
2004 "status 0x%x\n", lp->status);
2005
d6b0e80f
AC
2006 /* FIXME: What should we do if we are supposed to continue
2007 this thread with a signal? */
2008 gdb_assert (signo == TARGET_SIGNAL_0);
2455069d 2009 signo = target_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
2010 lp->status = 0;
2011 }
2012 }
76f50ad1 2013
6c95b8df 2014 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
2015 {
2016 /* FIXME: What should we do if we are supposed to continue
2017 this thread with a signal? */
2018 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 2019
9f0bdab8
DJ
2020 if (debug_linux_nat)
2021 fprintf_unfiltered (gdb_stdlog,
2022 "LLR: Short circuiting for status 0x%x\n",
2023 lp->status);
d6b0e80f 2024
7feb7d06
PA
2025 restore_child_signals_mask (&prev_mask);
2026 if (target_can_async_p ())
2027 {
2028 target_async (inferior_event_handler, 0);
2029 /* Tell the event loop we have something to process. */
2030 async_file_mark ();
2031 }
9f0bdab8 2032 return;
d6b0e80f
AC
2033 }
2034
9f0bdab8
DJ
2035 /* Mark LWP as not stopped to prevent it from being continued by
2036 resume_callback. */
2037 lp->stopped = 0;
2038
d90e17a7
PA
2039 if (resume_many)
2040 iterate_over_lwps (ptid, resume_callback, NULL);
2041
2042 /* Convert to something the lower layer understands. */
2043 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 2044
7b50312a
PA
2045 if (linux_nat_prepare_to_resume != NULL)
2046 linux_nat_prepare_to_resume (lp);
28439f5e 2047 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 2048 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 2049 lp->stopped_by_watchpoint = 0;
9f0bdab8 2050
d6b0e80f
AC
2051 if (debug_linux_nat)
2052 fprintf_unfiltered (gdb_stdlog,
2053 "LLR: %s %s, %s (resume event thread)\n",
2054 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2055 target_pid_to_str (ptid),
423ec54c
JK
2056 (signo != TARGET_SIGNAL_0
2057 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 2058
7feb7d06 2059 restore_child_signals_mask (&prev_mask);
b84876c2 2060 if (target_can_async_p ())
8ea051c5 2061 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2062}
2063
c5f62d5f 2064/* Send a signal to an LWP. */
d6b0e80f
AC
2065
2066static int
2067kill_lwp (int lwpid, int signo)
2068{
c5f62d5f
DE
2069 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2070 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2071
2072#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2073 {
2074 static int tkill_failed;
2075
2076 if (!tkill_failed)
2077 {
2078 int ret;
2079
2080 errno = 0;
2081 ret = syscall (__NR_tkill, lwpid, signo);
2082 if (errno != ENOSYS)
2083 return ret;
2084 tkill_failed = 1;
2085 }
2086 }
d6b0e80f
AC
2087#endif
2088
2089 return kill (lwpid, signo);
2090}
2091
ca2163eb
PA
2092/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2093 event, check if the core is interested in it: if not, ignore the
2094 event, and keep waiting; otherwise, we need to toggle the LWP's
2095 syscall entry/exit status, since the ptrace event itself doesn't
2096 indicate it, and report the trap to higher layers. */
2097
2098static int
2099linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2100{
2101 struct target_waitstatus *ourstatus = &lp->waitstatus;
2102 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2103 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2104
2105 if (stopping)
2106 {
2107 /* If we're stopping threads, there's a SIGSTOP pending, which
2108 makes it so that the LWP reports an immediate syscall return,
2109 followed by the SIGSTOP. Skip seeing that "return" using
2110 PTRACE_CONT directly, and let stop_wait_callback collect the
2111 SIGSTOP. Later when the thread is resumed, a new syscall
2112 entry event. If we didn't do this (and returned 0), we'd
2113 leave a syscall entry pending, and our caller, by using
2114 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2115 itself. Later, when the user re-resumes this LWP, we'd see
2116 another syscall entry event and we'd mistake it for a return.
2117
2118 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2119 (leaving immediately with LWP->signalled set, without issuing
2120 a PTRACE_CONT), it would still be problematic to leave this
2121 syscall enter pending, as later when the thread is resumed,
2122 it would then see the same syscall exit mentioned above,
2123 followed by the delayed SIGSTOP, while the syscall didn't
2124 actually get to execute. It seems it would be even more
2125 confusing to the user. */
2126
2127 if (debug_linux_nat)
2128 fprintf_unfiltered (gdb_stdlog,
2129 "LHST: ignoring syscall %d "
2130 "for LWP %ld (stopping threads), "
2131 "resuming with PTRACE_CONT for SIGSTOP\n",
2132 syscall_number,
2133 GET_LWP (lp->ptid));
2134
2135 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2136 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2137 return 1;
2138 }
2139
2140 if (catch_syscall_enabled ())
2141 {
2142 /* Always update the entry/return state, even if this particular
2143 syscall isn't interesting to the core now. In async mode,
2144 the user could install a new catchpoint for this syscall
2145 between syscall enter/return, and we'll need to know to
2146 report a syscall return if that happens. */
2147 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2148 ? TARGET_WAITKIND_SYSCALL_RETURN
2149 : TARGET_WAITKIND_SYSCALL_ENTRY);
2150
2151 if (catching_syscall_number (syscall_number))
2152 {
2153 /* Alright, an event to report. */
2154 ourstatus->kind = lp->syscall_state;
2155 ourstatus->value.syscall_number = syscall_number;
2156
2157 if (debug_linux_nat)
2158 fprintf_unfiltered (gdb_stdlog,
2159 "LHST: stopping for %s of syscall %d"
2160 " for LWP %ld\n",
3e43a32a
MS
2161 lp->syscall_state
2162 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2163 ? "entry" : "return",
2164 syscall_number,
2165 GET_LWP (lp->ptid));
2166 return 0;
2167 }
2168
2169 if (debug_linux_nat)
2170 fprintf_unfiltered (gdb_stdlog,
2171 "LHST: ignoring %s of syscall %d "
2172 "for LWP %ld\n",
2173 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2174 ? "entry" : "return",
2175 syscall_number,
2176 GET_LWP (lp->ptid));
2177 }
2178 else
2179 {
2180 /* If we had been syscall tracing, and hence used PT_SYSCALL
2181 before on this LWP, it could happen that the user removes all
2182 syscall catchpoints before we get to process this event.
2183 There are two noteworthy issues here:
2184
2185 - When stopped at a syscall entry event, resuming with
2186 PT_STEP still resumes executing the syscall and reports a
2187 syscall return.
2188
2189 - Only PT_SYSCALL catches syscall enters. If we last
2190 single-stepped this thread, then this event can't be a
2191 syscall enter. If we last single-stepped this thread, this
2192 has to be a syscall exit.
2193
2194 The points above mean that the next resume, be it PT_STEP or
2195 PT_CONTINUE, can not trigger a syscall trace event. */
2196 if (debug_linux_nat)
2197 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2198 "LHST: caught syscall event "
2199 "with no syscall catchpoints."
ca2163eb
PA
2200 " %d for LWP %ld, ignoring\n",
2201 syscall_number,
2202 GET_LWP (lp->ptid));
2203 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2204 }
2205
2206 /* The core isn't interested in this event. For efficiency, avoid
2207 stopping all threads only to have the core resume them all again.
2208 Since we're not stopping threads, if we're still syscall tracing
2209 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2210 subsequent syscall. Simply resume using the inf-ptrace layer,
2211 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2212
2213 /* Note that gdbarch_get_syscall_number may access registers, hence
2214 fill a regcache. */
2215 registers_changed ();
7b50312a
PA
2216 if (linux_nat_prepare_to_resume != NULL)
2217 linux_nat_prepare_to_resume (lp);
ca2163eb
PA
2218 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2219 lp->step, TARGET_SIGNAL_0);
2220 return 1;
2221}
2222
3d799a95
DJ
2223/* Handle a GNU/Linux extended wait response. If we see a clone
2224 event, we need to add the new LWP to our list (and not report the
2225 trap to higher layers). This function returns non-zero if the
2226 event should be ignored and we should wait again. If STOPPING is
2227 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2228
2229static int
3d799a95
DJ
2230linux_handle_extended_wait (struct lwp_info *lp, int status,
2231 int stopping)
d6b0e80f 2232{
3d799a95
DJ
2233 int pid = GET_LWP (lp->ptid);
2234 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2235 int event = status >> 16;
d6b0e80f 2236
3d799a95
DJ
2237 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2238 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2239 {
3d799a95
DJ
2240 unsigned long new_pid;
2241 int ret;
2242
2243 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2244
3d799a95
DJ
2245 /* If we haven't already seen the new PID stop, wait for it now. */
2246 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2247 {
2248 /* The new child has a pending SIGSTOP. We can't affect it until it
2249 hits the SIGSTOP, but we're already attached. */
2250 ret = my_waitpid (new_pid, &status,
2251 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2252 if (ret == -1)
2253 perror_with_name (_("waiting for new child"));
2254 else if (ret != new_pid)
2255 internal_error (__FILE__, __LINE__,
2256 _("wait returned unexpected PID %d"), ret);
2257 else if (!WIFSTOPPED (status))
2258 internal_error (__FILE__, __LINE__,
2259 _("wait returned unexpected status 0x%x"), status);
2260 }
2261
3a3e9ee3 2262 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2263
2277426b
PA
2264 if (event == PTRACE_EVENT_FORK
2265 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2266 {
2277426b
PA
2267 /* Handle checkpointing by linux-fork.c here as a special
2268 case. We don't want the follow-fork-mode or 'catch fork'
2269 to interfere with this. */
2270
2271 /* This won't actually modify the breakpoint list, but will
2272 physically remove the breakpoints from the child. */
2273 detach_breakpoints (new_pid);
2274
2275 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2276 if (!find_fork_pid (new_pid))
2277 add_fork (new_pid);
2277426b
PA
2278
2279 /* Report as spurious, so that infrun doesn't want to follow
2280 this fork. We're actually doing an infcall in
2281 linux-fork.c. */
2282 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2283 linux_enable_event_reporting (pid_to_ptid (new_pid));
2284
2285 /* Report the stop to the core. */
2286 return 0;
2287 }
2288
3d799a95
DJ
2289 if (event == PTRACE_EVENT_FORK)
2290 ourstatus->kind = TARGET_WAITKIND_FORKED;
2291 else if (event == PTRACE_EVENT_VFORK)
2292 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2293 else
3d799a95 2294 {
78768c4a
JK
2295 struct lwp_info *new_lp;
2296
3d799a95 2297 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2298
3c4d7e12
PA
2299 if (debug_linux_nat)
2300 fprintf_unfiltered (gdb_stdlog,
2301 "LHEW: Got clone event "
2302 "from LWP %d, new child is LWP %ld\n",
2303 pid, new_pid);
2304
d90e17a7 2305 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2306 new_lp->cloned = 1;
4c28f408 2307 new_lp->stopped = 1;
d6b0e80f 2308
3d799a95
DJ
2309 if (WSTOPSIG (status) != SIGSTOP)
2310 {
2311 /* This can happen if someone starts sending signals to
2312 the new thread before it gets a chance to run, which
2313 have a lower number than SIGSTOP (e.g. SIGUSR1).
2314 This is an unlikely case, and harder to handle for
2315 fork / vfork than for clone, so we do not try - but
2316 we handle it for clone events here. We'll send
2317 the other signal on to the thread below. */
2318
2319 new_lp->signalled = 1;
2320 }
2321 else
79395f92
PA
2322 {
2323 struct thread_info *tp;
2324
2325 /* When we stop for an event in some other thread, and
2326 pull the thread list just as this thread has cloned,
2327 we'll have seen the new thread in the thread_db list
2328 before handling the CLONE event (glibc's
2329 pthread_create adds the new thread to the thread list
2330 before clone'ing, and has the kernel fill in the
2331 thread's tid on the clone call with
2332 CLONE_PARENT_SETTID). If that happened, and the core
2333 had requested the new thread to stop, we'll have
2334 killed it with SIGSTOP. But since SIGSTOP is not an
2335 RT signal, it can only be queued once. We need to be
2336 careful to not resume the LWP if we wanted it to
2337 stop. In that case, we'll leave the SIGSTOP pending.
2338 It will later be reported as TARGET_SIGNAL_0. */
2339 tp = find_thread_ptid (new_lp->ptid);
2340 if (tp != NULL && tp->stop_requested)
2341 new_lp->last_resume_kind = resume_stop;
2342 else
2343 status = 0;
2344 }
d6b0e80f 2345
4c28f408 2346 if (non_stop)
3d799a95 2347 {
4c28f408
PA
2348 /* Add the new thread to GDB's lists as soon as possible
2349 so that:
2350
2351 1) the frontend doesn't have to wait for a stop to
2352 display them, and,
2353
2354 2) we tag it with the correct running state. */
2355
2356 /* If the thread_db layer is active, let it know about
2357 this new thread, and add it to GDB's list. */
2358 if (!thread_db_attach_lwp (new_lp->ptid))
2359 {
2360 /* We're not using thread_db. Add it to GDB's
2361 list. */
2362 target_post_attach (GET_LWP (new_lp->ptid));
2363 add_thread (new_lp->ptid);
2364 }
2365
2366 if (!stopping)
2367 {
2368 set_running (new_lp->ptid, 1);
2369 set_executing (new_lp->ptid, 1);
e21ffe51
PA
2370 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2371 resume_stop. */
2372 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2373 }
2374 }
2375
79395f92
PA
2376 if (status != 0)
2377 {
2378 /* We created NEW_LP so it cannot yet contain STATUS. */
2379 gdb_assert (new_lp->status == 0);
2380
2381 /* Save the wait status to report later. */
2382 if (debug_linux_nat)
2383 fprintf_unfiltered (gdb_stdlog,
2384 "LHEW: waitpid of new LWP %ld, "
2385 "saving status %s\n",
2386 (long) GET_LWP (new_lp->ptid),
2387 status_to_str (status));
2388 new_lp->status = status;
2389 }
2390
ca2163eb
PA
2391 /* Note the need to use the low target ops to resume, to
2392 handle resuming with PT_SYSCALL if we have syscall
2393 catchpoints. */
4c28f408
PA
2394 if (!stopping)
2395 {
3d799a95 2396 new_lp->resumed = 1;
ca2163eb 2397
79395f92 2398 if (status == 0)
ad34eb2f 2399 {
e21ffe51 2400 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
2401 if (debug_linux_nat)
2402 fprintf_unfiltered (gdb_stdlog,
79395f92
PA
2403 "LHEW: resuming new LWP %ld\n",
2404 GET_LWP (new_lp->ptid));
7b50312a
PA
2405 if (linux_nat_prepare_to_resume != NULL)
2406 linux_nat_prepare_to_resume (new_lp);
79395f92
PA
2407 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2408 0, TARGET_SIGNAL_0);
2409 new_lp->stopped = 0;
ad34eb2f
JK
2410 }
2411 }
d6b0e80f 2412
3d799a95
DJ
2413 if (debug_linux_nat)
2414 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 2415 "LHEW: resuming parent LWP %d\n", pid);
7b50312a
PA
2416 if (linux_nat_prepare_to_resume != NULL)
2417 linux_nat_prepare_to_resume (lp);
ca2163eb
PA
2418 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2419 0, TARGET_SIGNAL_0);
3d799a95
DJ
2420
2421 return 1;
2422 }
2423
2424 return 0;
d6b0e80f
AC
2425 }
2426
3d799a95
DJ
2427 if (event == PTRACE_EVENT_EXEC)
2428 {
a75724bc
PA
2429 if (debug_linux_nat)
2430 fprintf_unfiltered (gdb_stdlog,
2431 "LHEW: Got exec event from LWP %ld\n",
2432 GET_LWP (lp->ptid));
2433
3d799a95
DJ
2434 ourstatus->kind = TARGET_WAITKIND_EXECD;
2435 ourstatus->value.execd_pathname
6d8fd2b7 2436 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2437
6c95b8df
PA
2438 return 0;
2439 }
2440
2441 if (event == PTRACE_EVENT_VFORK_DONE)
2442 {
2443 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2444 {
6c95b8df 2445 if (debug_linux_nat)
3e43a32a
MS
2446 fprintf_unfiltered (gdb_stdlog,
2447 "LHEW: Got expected PTRACE_EVENT_"
2448 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2449 GET_LWP (lp->ptid));
3d799a95 2450
6c95b8df
PA
2451 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2452 return 0;
3d799a95
DJ
2453 }
2454
6c95b8df 2455 if (debug_linux_nat)
3e43a32a
MS
2456 fprintf_unfiltered (gdb_stdlog,
2457 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2458 "from LWP %ld: resuming\n",
6c95b8df
PA
2459 GET_LWP (lp->ptid));
2460 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2461 return 1;
3d799a95
DJ
2462 }
2463
2464 internal_error (__FILE__, __LINE__,
2465 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2466}
2467
432b4d03
JK
2468/* Return non-zero if LWP is a zombie. */
2469
2470static int
2471linux_lwp_is_zombie (long lwp)
2472{
2473 char buffer[MAXPATHLEN];
2474 FILE *procfile;
ea23808b
PA
2475 int retval;
2476 int have_state;
432b4d03 2477
07e78767 2478 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
432b4d03
JK
2479 procfile = fopen (buffer, "r");
2480 if (procfile == NULL)
2481 {
2482 warning (_("unable to open /proc file '%s'"), buffer);
2483 return 0;
2484 }
ea23808b
PA
2485
2486 have_state = 0;
432b4d03 2487 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
ea23808b 2488 if (strncmp (buffer, "State:", 6) == 0)
432b4d03 2489 {
ea23808b 2490 have_state = 1;
432b4d03
JK
2491 break;
2492 }
ea23808b
PA
2493 retval = (have_state
2494 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
432b4d03 2495 fclose (procfile);
432b4d03
JK
2496 return retval;
2497}
2498
d6b0e80f
AC
2499/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2500 exited. */
2501
2502static int
2503wait_lwp (struct lwp_info *lp)
2504{
2505 pid_t pid;
432b4d03 2506 int status = 0;
d6b0e80f 2507 int thread_dead = 0;
432b4d03 2508 sigset_t prev_mask;
d6b0e80f
AC
2509
2510 gdb_assert (!lp->stopped);
2511 gdb_assert (lp->status == 0);
2512
432b4d03
JK
2513 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2514 block_child_signals (&prev_mask);
2515
2516 for (;;)
d6b0e80f 2517 {
432b4d03
JK
2518 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2519 was right and we should just call sigsuspend. */
2520
2521 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
d6b0e80f 2522 if (pid == -1 && errno == ECHILD)
432b4d03 2523 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2524 if (pid == -1 && errno == ECHILD)
2525 {
2526 /* The thread has previously exited. We need to delete it
2527 now because, for some vendor 2.4 kernels with NPTL
2528 support backported, there won't be an exit event unless
2529 it is the main thread. 2.6 kernels will report an exit
2530 event for each thread that exits, as expected. */
2531 thread_dead = 1;
2532 if (debug_linux_nat)
2533 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2534 target_pid_to_str (lp->ptid));
2535 }
432b4d03
JK
2536 if (pid != 0)
2537 break;
2538
2539 /* Bugs 10970, 12702.
2540 Thread group leader may have exited in which case we'll lock up in
2541 waitpid if there are other threads, even if they are all zombies too.
2542 Basically, we're not supposed to use waitpid this way.
2543 __WCLONE is not applicable for the leader so we can't use that.
2544 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2545 process; it gets ESRCH both for the zombie and for running processes.
2546
2547 As a workaround, check if we're waiting for the thread group leader and
2548 if it's a zombie, and avoid calling waitpid if it is.
2549
2550 This is racy, what if the tgl becomes a zombie right after we check?
2551 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2552 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2553
2554 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2555 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
d6b0e80f 2556 {
d6b0e80f
AC
2557 thread_dead = 1;
2558 if (debug_linux_nat)
432b4d03
JK
2559 fprintf_unfiltered (gdb_stdlog,
2560 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2561 target_pid_to_str (lp->ptid));
432b4d03 2562 break;
d6b0e80f 2563 }
432b4d03
JK
2564
2565 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2566 get invoked despite our caller had them intentionally blocked by
2567 block_child_signals. This is sensitive only to the loop of
2568 linux_nat_wait_1 and there if we get called my_waitpid gets called
2569 again before it gets to sigsuspend so we can safely let the handlers
2570 get executed here. */
2571
2572 sigsuspend (&suspend_mask);
2573 }
2574
2575 restore_child_signals_mask (&prev_mask);
2576
d6b0e80f
AC
2577 if (!thread_dead)
2578 {
2579 gdb_assert (pid == GET_LWP (lp->ptid));
2580
2581 if (debug_linux_nat)
2582 {
2583 fprintf_unfiltered (gdb_stdlog,
2584 "WL: waitpid %s received %s\n",
2585 target_pid_to_str (lp->ptid),
2586 status_to_str (status));
2587 }
d6b0e80f 2588
a9f4bb21
PA
2589 /* Check if the thread has exited. */
2590 if (WIFEXITED (status) || WIFSIGNALED (status))
2591 {
2592 thread_dead = 1;
2593 if (debug_linux_nat)
2594 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2595 target_pid_to_str (lp->ptid));
2596 }
d6b0e80f
AC
2597 }
2598
2599 if (thread_dead)
2600 {
e26af52f 2601 exit_lwp (lp);
d6b0e80f
AC
2602 return 0;
2603 }
2604
2605 gdb_assert (WIFSTOPPED (status));
2606
ca2163eb
PA
2607 /* Handle GNU/Linux's syscall SIGTRAPs. */
2608 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2609 {
2610 /* No longer need the sysgood bit. The ptrace event ends up
2611 recorded in lp->waitstatus if we care for it. We can carry
2612 on handling the event like a regular SIGTRAP from here
2613 on. */
2614 status = W_STOPCODE (SIGTRAP);
2615 if (linux_handle_syscall_trap (lp, 1))
2616 return wait_lwp (lp);
2617 }
2618
d6b0e80f
AC
2619 /* Handle GNU/Linux's extended waitstatus for trace events. */
2620 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2621 {
2622 if (debug_linux_nat)
2623 fprintf_unfiltered (gdb_stdlog,
2624 "WL: Handling extended status 0x%06x\n",
2625 status);
3d799a95 2626 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2627 return wait_lwp (lp);
2628 }
2629
2630 return status;
2631}
2632
9f0bdab8
DJ
2633/* Save the most recent siginfo for LP. This is currently only called
2634 for SIGTRAP; some ports use the si_addr field for
2635 target_stopped_data_address. In the future, it may also be used to
2636 restore the siginfo of requeued signals. */
2637
2638static void
2639save_siginfo (struct lwp_info *lp)
2640{
2641 errno = 0;
2642 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2643 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2644
2645 if (errno != 0)
2646 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2647}
2648
d6b0e80f
AC
2649/* Send a SIGSTOP to LP. */
2650
2651static int
2652stop_callback (struct lwp_info *lp, void *data)
2653{
2654 if (!lp->stopped && !lp->signalled)
2655 {
2656 int ret;
2657
2658 if (debug_linux_nat)
2659 {
2660 fprintf_unfiltered (gdb_stdlog,
2661 "SC: kill %s **<SIGSTOP>**\n",
2662 target_pid_to_str (lp->ptid));
2663 }
2664 errno = 0;
2665 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2666 if (debug_linux_nat)
2667 {
2668 fprintf_unfiltered (gdb_stdlog,
2669 "SC: lwp kill %d %s\n",
2670 ret,
2671 errno ? safe_strerror (errno) : "ERRNO-OK");
2672 }
2673
2674 lp->signalled = 1;
2675 gdb_assert (lp->status == 0);
2676 }
2677
2678 return 0;
2679}
2680
7b50312a
PA
2681/* Request a stop on LWP. */
2682
2683void
2684linux_stop_lwp (struct lwp_info *lwp)
2685{
2686 stop_callback (lwp, NULL);
2687}
2688
57380f4e 2689/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2690
2691static int
57380f4e
DJ
2692linux_nat_has_pending_sigint (int pid)
2693{
2694 sigset_t pending, blocked, ignored;
57380f4e
DJ
2695
2696 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2697
2698 if (sigismember (&pending, SIGINT)
2699 && !sigismember (&ignored, SIGINT))
2700 return 1;
2701
2702 return 0;
2703}
2704
2705/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2706
2707static int
2708set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2709{
57380f4e
DJ
2710 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2711 flag to consume the next one. */
2712 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2713 && WSTOPSIG (lp->status) == SIGINT)
2714 lp->status = 0;
2715 else
2716 lp->ignore_sigint = 1;
2717
2718 return 0;
2719}
2720
2721/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2722 This function is called after we know the LWP has stopped; if the LWP
2723 stopped before the expected SIGINT was delivered, then it will never have
2724 arrived. Also, if the signal was delivered to a shared queue and consumed
2725 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2726
57380f4e
DJ
2727static void
2728maybe_clear_ignore_sigint (struct lwp_info *lp)
2729{
2730 if (!lp->ignore_sigint)
2731 return;
2732
2733 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2734 {
2735 if (debug_linux_nat)
2736 fprintf_unfiltered (gdb_stdlog,
2737 "MCIS: Clearing bogus flag for %s\n",
2738 target_pid_to_str (lp->ptid));
2739 lp->ignore_sigint = 0;
2740 }
2741}
2742
ebec9a0f
PA
2743/* Fetch the possible triggered data watchpoint info and store it in
2744 LP.
2745
2746 On some archs, like x86, that use debug registers to set
2747 watchpoints, it's possible that the way to know which watched
2748 address trapped, is to check the register that is used to select
2749 which address to watch. Problem is, between setting the watchpoint
2750 and reading back which data address trapped, the user may change
2751 the set of watchpoints, and, as a consequence, GDB changes the
2752 debug registers in the inferior. To avoid reading back a stale
2753 stopped-data-address when that happens, we cache in LP the fact
2754 that a watchpoint trapped, and the corresponding data address, as
2755 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2756 registers meanwhile, we have the cached data we can rely on. */
2757
2758static void
2759save_sigtrap (struct lwp_info *lp)
2760{
2761 struct cleanup *old_chain;
2762
2763 if (linux_ops->to_stopped_by_watchpoint == NULL)
2764 {
2765 lp->stopped_by_watchpoint = 0;
2766 return;
2767 }
2768
2769 old_chain = save_inferior_ptid ();
2770 inferior_ptid = lp->ptid;
2771
2772 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2773
2774 if (lp->stopped_by_watchpoint)
2775 {
2776 if (linux_ops->to_stopped_data_address != NULL)
2777 lp->stopped_data_address_p =
2778 linux_ops->to_stopped_data_address (&current_target,
2779 &lp->stopped_data_address);
2780 else
2781 lp->stopped_data_address_p = 0;
2782 }
2783
2784 do_cleanups (old_chain);
2785}
2786
2787/* See save_sigtrap. */
2788
2789static int
2790linux_nat_stopped_by_watchpoint (void)
2791{
2792 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2793
2794 gdb_assert (lp != NULL);
2795
2796 return lp->stopped_by_watchpoint;
2797}
2798
2799static int
2800linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2801{
2802 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2803
2804 gdb_assert (lp != NULL);
2805
2806 *addr_p = lp->stopped_data_address;
2807
2808 return lp->stopped_data_address_p;
2809}
2810
26ab7092
JK
2811/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2812
2813static int
2814sigtrap_is_event (int status)
2815{
2816 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2817}
2818
2819/* SIGTRAP-like events recognizer. */
2820
2821static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2822
00390b84
JK
2823/* Check for SIGTRAP-like events in LP. */
2824
2825static int
2826linux_nat_lp_status_is_event (struct lwp_info *lp)
2827{
2828 /* We check for lp->waitstatus in addition to lp->status, because we can
2829 have pending process exits recorded in lp->status
2830 and W_EXITCODE(0,0) == 0. We should probably have an additional
2831 lp->status_p flag. */
2832
2833 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2834 && linux_nat_status_is_event (lp->status));
2835}
2836
26ab7092
JK
2837/* Set alternative SIGTRAP-like events recognizer. If
2838 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2839 applied. */
2840
2841void
2842linux_nat_set_status_is_event (struct target_ops *t,
2843 int (*status_is_event) (int status))
2844{
2845 linux_nat_status_is_event = status_is_event;
2846}
2847
57380f4e
DJ
2848/* Wait until LP is stopped. */
2849
2850static int
2851stop_wait_callback (struct lwp_info *lp, void *data)
2852{
6c95b8df
PA
2853 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2854
2855 /* If this is a vfork parent, bail out, it is not going to report
2856 any SIGSTOP until the vfork is done with. */
2857 if (inf->vfork_child != NULL)
2858 return 0;
2859
d6b0e80f
AC
2860 if (!lp->stopped)
2861 {
2862 int status;
2863
2864 status = wait_lwp (lp);
2865 if (status == 0)
2866 return 0;
2867
57380f4e
DJ
2868 if (lp->ignore_sigint && WIFSTOPPED (status)
2869 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2870 {
57380f4e 2871 lp->ignore_sigint = 0;
d6b0e80f
AC
2872
2873 errno = 0;
2874 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2875 if (debug_linux_nat)
2876 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2877 "PTRACE_CONT %s, 0, 0 (%s) "
2878 "(discarding SIGINT)\n",
d6b0e80f
AC
2879 target_pid_to_str (lp->ptid),
2880 errno ? safe_strerror (errno) : "OK");
2881
57380f4e 2882 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2883 }
2884
57380f4e
DJ
2885 maybe_clear_ignore_sigint (lp);
2886
d6b0e80f
AC
2887 if (WSTOPSIG (status) != SIGSTOP)
2888 {
26ab7092 2889 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2890 {
2891 /* If a LWP other than the LWP that we're reporting an
2892 event for has hit a GDB breakpoint (as opposed to
2893 some random trap signal), then just arrange for it to
2894 hit it again later. We don't keep the SIGTRAP status
2895 and don't forward the SIGTRAP signal to the LWP. We
2896 will handle the current event, eventually we will
2897 resume all LWPs, and this one will get its breakpoint
2898 trap again.
2899
2900 If we do not do this, then we run the risk that the
2901 user will delete or disable the breakpoint, but the
2902 thread will have already tripped on it. */
2903
9f0bdab8
DJ
2904 /* Save the trap's siginfo in case we need it later. */
2905 save_siginfo (lp);
2906
ebec9a0f
PA
2907 save_sigtrap (lp);
2908
1777feb0 2909 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2910 errno = 0;
2911 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2912 if (debug_linux_nat)
2913 {
2914 fprintf_unfiltered (gdb_stdlog,
2915 "PTRACE_CONT %s, 0, 0 (%s)\n",
2916 target_pid_to_str (lp->ptid),
2917 errno ? safe_strerror (errno) : "OK");
2918
2919 fprintf_unfiltered (gdb_stdlog,
2920 "SWC: Candidate SIGTRAP event in %s\n",
2921 target_pid_to_str (lp->ptid));
2922 }
710151dd 2923 /* Hold this event/waitstatus while we check to see if
1777feb0 2924 there are any more (we still want to get that SIGSTOP). */
57380f4e 2925 stop_wait_callback (lp, NULL);
710151dd 2926
7feb7d06
PA
2927 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2928 there's another event, throw it back into the
1777feb0 2929 queue. */
7feb7d06 2930 if (lp->status)
710151dd 2931 {
7feb7d06
PA
2932 if (debug_linux_nat)
2933 fprintf_unfiltered (gdb_stdlog,
2934 "SWC: kill %s, %s\n",
2935 target_pid_to_str (lp->ptid),
2936 status_to_str ((int) status));
2937 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2938 }
7feb7d06 2939
1777feb0 2940 /* Save the sigtrap event. */
7feb7d06 2941 lp->status = status;
d6b0e80f
AC
2942 return 0;
2943 }
2944 else
2945 {
2946 /* The thread was stopped with a signal other than
1777feb0 2947 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2948
2949 if (debug_linux_nat)
2950 {
2951 fprintf_unfiltered (gdb_stdlog,
2952 "SWC: Pending event %s in %s\n",
2953 status_to_str ((int) status),
2954 target_pid_to_str (lp->ptid));
2955 }
1777feb0 2956 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2957 errno = 0;
2958 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2959 if (debug_linux_nat)
2960 fprintf_unfiltered (gdb_stdlog,
2961 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2962 target_pid_to_str (lp->ptid),
2963 errno ? safe_strerror (errno) : "OK");
2964
2965 /* Hold this event/waitstatus while we check to see if
1777feb0 2966 there are any more (we still want to get that SIGSTOP). */
57380f4e 2967 stop_wait_callback (lp, NULL);
710151dd
PA
2968
2969 /* If the lp->status field is still empty, use it to
2970 hold this event. If not, then this event must be
2971 returned to the event queue of the LWP. */
7feb7d06 2972 if (lp->status)
d6b0e80f
AC
2973 {
2974 if (debug_linux_nat)
2975 {
2976 fprintf_unfiltered (gdb_stdlog,
2977 "SWC: kill %s, %s\n",
2978 target_pid_to_str (lp->ptid),
2979 status_to_str ((int) status));
2980 }
2981 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2982 }
710151dd
PA
2983 else
2984 lp->status = status;
d6b0e80f
AC
2985 return 0;
2986 }
2987 }
2988 else
2989 {
2990 /* We caught the SIGSTOP that we intended to catch, so
2991 there's no SIGSTOP pending. */
2992 lp->stopped = 1;
2993 lp->signalled = 0;
2994 }
2995 }
2996
2997 return 0;
2998}
2999
d6b0e80f
AC
3000/* Return non-zero if LP has a wait status pending. */
3001
3002static int
3003status_callback (struct lwp_info *lp, void *data)
3004{
3005 /* Only report a pending wait status if we pretend that this has
3006 indeed been resumed. */
ca2163eb
PA
3007 if (!lp->resumed)
3008 return 0;
3009
3010 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3011 {
3012 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 3013 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
3014 0', so a clean process exit can not be stored pending in
3015 lp->status, it is indistinguishable from
3016 no-pending-status. */
3017 return 1;
3018 }
3019
3020 if (lp->status != 0)
3021 return 1;
3022
3023 return 0;
d6b0e80f
AC
3024}
3025
3026/* Return non-zero if LP isn't stopped. */
3027
3028static int
3029running_callback (struct lwp_info *lp, void *data)
3030{
25289eb2
PA
3031 return (!lp->stopped
3032 || ((lp->status != 0
3033 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3034 && lp->resumed));
d6b0e80f
AC
3035}
3036
3037/* Count the LWP's that have had events. */
3038
3039static int
3040count_events_callback (struct lwp_info *lp, void *data)
3041{
3042 int *count = data;
3043
3044 gdb_assert (count != NULL);
3045
e09490f1 3046 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 3047 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
3048 (*count)++;
3049
3050 return 0;
3051}
3052
3053/* Select the LWP (if any) that is currently being single-stepped. */
3054
3055static int
3056select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3057{
25289eb2
PA
3058 if (lp->last_resume_kind == resume_step
3059 && lp->status != 0)
d6b0e80f
AC
3060 return 1;
3061 else
3062 return 0;
3063}
3064
3065/* Select the Nth LWP that has had a SIGTRAP event. */
3066
3067static int
3068select_event_lwp_callback (struct lwp_info *lp, void *data)
3069{
3070 int *selector = data;
3071
3072 gdb_assert (selector != NULL);
3073
1777feb0 3074 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 3075 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
3076 if ((*selector)-- == 0)
3077 return 1;
3078
3079 return 0;
3080}
3081
710151dd
PA
3082static int
3083cancel_breakpoint (struct lwp_info *lp)
3084{
3085 /* Arrange for a breakpoint to be hit again later. We don't keep
3086 the SIGTRAP status and don't forward the SIGTRAP signal to the
3087 LWP. We will handle the current event, eventually we will resume
3088 this LWP, and this breakpoint will trap again.
3089
3090 If we do not do this, then we run the risk that the user will
3091 delete or disable the breakpoint, but the LWP will have already
3092 tripped on it. */
3093
515630c5
UW
3094 struct regcache *regcache = get_thread_regcache (lp->ptid);
3095 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3096 CORE_ADDR pc;
3097
3098 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 3099 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
3100 {
3101 if (debug_linux_nat)
3102 fprintf_unfiltered (gdb_stdlog,
3103 "CB: Push back breakpoint for %s\n",
3104 target_pid_to_str (lp->ptid));
3105
3106 /* Back up the PC if necessary. */
515630c5
UW
3107 if (gdbarch_decr_pc_after_break (gdbarch))
3108 regcache_write_pc (regcache, pc);
3109
710151dd
PA
3110 return 1;
3111 }
3112 return 0;
3113}
3114
d6b0e80f
AC
3115static int
3116cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3117{
3118 struct lwp_info *event_lp = data;
3119
3120 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3121 if (lp == event_lp)
3122 return 0;
3123
3124 /* If a LWP other than the LWP that we're reporting an event for has
3125 hit a GDB breakpoint (as opposed to some random trap signal),
3126 then just arrange for it to hit it again later. We don't keep
3127 the SIGTRAP status and don't forward the SIGTRAP signal to the
3128 LWP. We will handle the current event, eventually we will resume
3129 all LWPs, and this one will get its breakpoint trap again.
3130
3131 If we do not do this, then we run the risk that the user will
3132 delete or disable the breakpoint, but the LWP will have already
3133 tripped on it. */
3134
00390b84 3135 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
3136 && cancel_breakpoint (lp))
3137 /* Throw away the SIGTRAP. */
3138 lp->status = 0;
d6b0e80f
AC
3139
3140 return 0;
3141}
3142
3143/* Select one LWP out of those that have events pending. */
3144
3145static void
d90e17a7 3146select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
3147{
3148 int num_events = 0;
3149 int random_selector;
3150 struct lwp_info *event_lp;
3151
ac264b3b 3152 /* Record the wait status for the original LWP. */
d6b0e80f
AC
3153 (*orig_lp)->status = *status;
3154
3155 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
3156 event_lp = iterate_over_lwps (filter,
3157 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
3158 if (event_lp != NULL)
3159 {
3160 if (debug_linux_nat)
3161 fprintf_unfiltered (gdb_stdlog,
3162 "SEL: Select single-step %s\n",
3163 target_pid_to_str (event_lp->ptid));
3164 }
3165 else
3166 {
3167 /* No single-stepping LWP. Select one at random, out of those
3168 which have had SIGTRAP events. */
3169
3170 /* First see how many SIGTRAP events we have. */
d90e17a7 3171 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
3172
3173 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3174 random_selector = (int)
3175 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3176
3177 if (debug_linux_nat && num_events > 1)
3178 fprintf_unfiltered (gdb_stdlog,
3179 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3180 num_events, random_selector);
3181
d90e17a7
PA
3182 event_lp = iterate_over_lwps (filter,
3183 select_event_lwp_callback,
d6b0e80f
AC
3184 &random_selector);
3185 }
3186
3187 if (event_lp != NULL)
3188 {
3189 /* Switch the event LWP. */
3190 *orig_lp = event_lp;
3191 *status = event_lp->status;
3192 }
3193
3194 /* Flush the wait status for the event LWP. */
3195 (*orig_lp)->status = 0;
3196}
3197
3198/* Return non-zero if LP has been resumed. */
3199
3200static int
3201resumed_callback (struct lwp_info *lp, void *data)
3202{
3203 return lp->resumed;
3204}
3205
12d9289a
PA
3206/* Stop an active thread, verify it still exists, then resume it. If
3207 the thread ends up with a pending status, then it is not resumed,
3208 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
3209
3210static int
3211stop_and_resume_callback (struct lwp_info *lp, void *data)
3212{
12d9289a
PA
3213 int *new_pending_p = data;
3214
25289eb2 3215 if (!lp->stopped)
d6b0e80f 3216 {
25289eb2
PA
3217 ptid_t ptid = lp->ptid;
3218
d6b0e80f
AC
3219 stop_callback (lp, NULL);
3220 stop_wait_callback (lp, NULL);
25289eb2
PA
3221
3222 /* Resume if the lwp still exists, and the core wanted it
3223 running. */
12d9289a
PA
3224 lp = find_lwp_pid (ptid);
3225 if (lp != NULL)
25289eb2 3226 {
12d9289a
PA
3227 if (lp->last_resume_kind == resume_stop
3228 && lp->status == 0)
3229 {
3230 /* The core wanted the LWP to stop. Even if it stopped
3231 cleanly (with SIGSTOP), leave the event pending. */
3232 if (debug_linux_nat)
3233 fprintf_unfiltered (gdb_stdlog,
3234 "SARC: core wanted LWP %ld stopped "
3235 "(leaving SIGSTOP pending)\n",
3236 GET_LWP (lp->ptid));
3237 lp->status = W_STOPCODE (SIGSTOP);
3238 }
3239
3240 if (lp->status == 0)
3241 {
3242 if (debug_linux_nat)
3243 fprintf_unfiltered (gdb_stdlog,
3244 "SARC: re-resuming LWP %ld\n",
3245 GET_LWP (lp->ptid));
3246 resume_lwp (lp, lp->step);
3247 }
3248 else
3249 {
3250 if (debug_linux_nat)
3251 fprintf_unfiltered (gdb_stdlog,
3252 "SARC: not re-resuming LWP %ld "
3253 "(has pending)\n",
3254 GET_LWP (lp->ptid));
3255 if (new_pending_p)
3256 *new_pending_p = 1;
3257 }
25289eb2 3258 }
d6b0e80f
AC
3259 }
3260 return 0;
3261}
3262
02f3fc28 3263/* Check if we should go on and pass this event to common code.
12d9289a
PA
3264 Return the affected lwp if we are, or NULL otherwise. If we stop
3265 all lwps temporarily, we may end up with new pending events in some
3266 other lwp. In that case set *NEW_PENDING_P to true. */
3267
02f3fc28 3268static struct lwp_info *
0e5bf2a8 3269linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
3270{
3271 struct lwp_info *lp;
3272
12d9289a
PA
3273 *new_pending_p = 0;
3274
02f3fc28
PA
3275 lp = find_lwp_pid (pid_to_ptid (lwpid));
3276
3277 /* Check for stop events reported by a process we didn't already
3278 know about - anything not already in our LWP list.
3279
3280 If we're expecting to receive stopped processes after
3281 fork, vfork, and clone events, then we'll just add the
3282 new one to our list and go back to waiting for the event
3283 to be reported - the stopped process might be returned
0e5bf2a8
PA
3284 from waitpid before or after the event is.
3285
3286 But note the case of a non-leader thread exec'ing after the
3287 leader having exited, and gone from our lists. The non-leader
3288 thread changes its tid to the tgid. */
3289
3290 if (WIFSTOPPED (status) && lp == NULL
3291 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3292 {
3293 /* A multi-thread exec after we had seen the leader exiting. */
3294 if (debug_linux_nat)
3295 fprintf_unfiltered (gdb_stdlog,
3296 "LLW: Re-adding thread group leader LWP %d.\n",
3297 lwpid);
3298
3299 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3300 lp->stopped = 1;
3301 lp->resumed = 1;
3302 add_thread (lp->ptid);
3303 }
3304
02f3fc28
PA
3305 if (WIFSTOPPED (status) && !lp)
3306 {
84636d28 3307 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3308 return NULL;
3309 }
3310
3311 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3312 our list, i.e. not part of the current process. This can happen
fd62cb89 3313 if we detach from a program we originally forked and then it
02f3fc28
PA
3314 exits. */
3315 if (!WIFSTOPPED (status) && !lp)
3316 return NULL;
3317
ca2163eb
PA
3318 /* Handle GNU/Linux's syscall SIGTRAPs. */
3319 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3320 {
3321 /* No longer need the sysgood bit. The ptrace event ends up
3322 recorded in lp->waitstatus if we care for it. We can carry
3323 on handling the event like a regular SIGTRAP from here
3324 on. */
3325 status = W_STOPCODE (SIGTRAP);
3326 if (linux_handle_syscall_trap (lp, 0))
3327 return NULL;
3328 }
02f3fc28 3329
ca2163eb
PA
3330 /* Handle GNU/Linux's extended waitstatus for trace events. */
3331 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3332 {
3333 if (debug_linux_nat)
3334 fprintf_unfiltered (gdb_stdlog,
3335 "LLW: Handling extended status 0x%06x\n",
3336 status);
3337 if (linux_handle_extended_wait (lp, status, 0))
3338 return NULL;
3339 }
3340
26ab7092 3341 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3342 {
3343 /* Save the trap's siginfo in case we need it later. */
3344 save_siginfo (lp);
3345
3346 save_sigtrap (lp);
3347 }
ca2163eb 3348
02f3fc28 3349 /* Check if the thread has exited. */
d90e17a7
PA
3350 if ((WIFEXITED (status) || WIFSIGNALED (status))
3351 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3352 {
9db03742
JB
3353 /* If this is the main thread, we must stop all threads and verify
3354 if they are still alive. This is because in the nptl thread model
3355 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3356 other than the main thread. We only get the main thread exit
3357 signal once all child threads have already exited. If we
3358 stop all the threads and use the stop_wait_callback to check
3359 if they have exited we can determine whether this signal
3360 should be ignored or whether it means the end of the debugged
3361 application, regardless of which threading model is being
5d3b6af6 3362 used. */
02f3fc28
PA
3363 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3364 {
3365 lp->stopped = 1;
d90e17a7 3366 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
12d9289a 3367 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
3368 }
3369
3370 if (debug_linux_nat)
3371 fprintf_unfiltered (gdb_stdlog,
3372 "LLW: %s exited.\n",
3373 target_pid_to_str (lp->ptid));
3374
d90e17a7 3375 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3376 {
3377 /* If there is at least one more LWP, then the exit signal
3378 was not the end of the debugged application and should be
3379 ignored. */
3380 exit_lwp (lp);
3381 return NULL;
3382 }
02f3fc28
PA
3383 }
3384
3385 /* Check if the current LWP has previously exited. In the nptl
3386 thread model, LWPs other than the main thread do not issue
3387 signals when they exit so we must check whenever the thread has
3388 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3389 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3390 {
d90e17a7
PA
3391 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3392
02f3fc28
PA
3393 if (debug_linux_nat)
3394 fprintf_unfiltered (gdb_stdlog,
3395 "LLW: %s exited.\n",
3396 target_pid_to_str (lp->ptid));
3397
3398 exit_lwp (lp);
3399
3400 /* Make sure there is at least one thread running. */
d90e17a7 3401 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3402
3403 /* Discard the event. */
3404 return NULL;
3405 }
3406
3407 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3408 an attempt to stop an LWP. */
3409 if (lp->signalled
3410 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3411 {
3412 if (debug_linux_nat)
3413 fprintf_unfiltered (gdb_stdlog,
3414 "LLW: Delayed SIGSTOP caught for %s.\n",
3415 target_pid_to_str (lp->ptid));
3416
02f3fc28
PA
3417 lp->signalled = 0;
3418
25289eb2
PA
3419 if (lp->last_resume_kind != resume_stop)
3420 {
3421 /* This is a delayed SIGSTOP. */
02f3fc28 3422
25289eb2
PA
3423 registers_changed ();
3424
7b50312a
PA
3425 if (linux_nat_prepare_to_resume != NULL)
3426 linux_nat_prepare_to_resume (lp);
25289eb2 3427 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28 3428 lp->step, TARGET_SIGNAL_0);
25289eb2
PA
3429 if (debug_linux_nat)
3430 fprintf_unfiltered (gdb_stdlog,
3431 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3432 lp->step ?
3433 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3434 target_pid_to_str (lp->ptid));
02f3fc28 3435
25289eb2
PA
3436 lp->stopped = 0;
3437 gdb_assert (lp->resumed);
02f3fc28 3438
25289eb2
PA
3439 /* Discard the event. */
3440 return NULL;
3441 }
02f3fc28
PA
3442 }
3443
57380f4e
DJ
3444 /* Make sure we don't report a SIGINT that we have already displayed
3445 for another thread. */
3446 if (lp->ignore_sigint
3447 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3448 {
3449 if (debug_linux_nat)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "LLW: Delayed SIGINT caught for %s.\n",
3452 target_pid_to_str (lp->ptid));
3453
3454 /* This is a delayed SIGINT. */
3455 lp->ignore_sigint = 0;
3456
3457 registers_changed ();
7b50312a
PA
3458 if (linux_nat_prepare_to_resume != NULL)
3459 linux_nat_prepare_to_resume (lp);
28439f5e 3460 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3461 lp->step, TARGET_SIGNAL_0);
3462 if (debug_linux_nat)
3463 fprintf_unfiltered (gdb_stdlog,
3464 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3465 lp->step ?
3466 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3467 target_pid_to_str (lp->ptid));
3468
3469 lp->stopped = 0;
3470 gdb_assert (lp->resumed);
3471
3472 /* Discard the event. */
3473 return NULL;
3474 }
3475
02f3fc28
PA
3476 /* An interesting event. */
3477 gdb_assert (lp);
ca2163eb 3478 lp->status = status;
02f3fc28
PA
3479 return lp;
3480}
3481
0e5bf2a8
PA
3482/* Detect zombie thread group leaders, and "exit" them. We can't reap
3483 their exits until all other threads in the group have exited. */
3484
3485static void
3486check_zombie_leaders (void)
3487{
3488 struct inferior *inf;
3489
3490 ALL_INFERIORS (inf)
3491 {
3492 struct lwp_info *leader_lp;
3493
3494 if (inf->pid == 0)
3495 continue;
3496
3497 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3498 if (leader_lp != NULL
3499 /* Check if there are other threads in the group, as we may
3500 have raced with the inferior simply exiting. */
3501 && num_lwps (inf->pid) > 1
3502 && linux_lwp_is_zombie (inf->pid))
3503 {
3504 if (debug_linux_nat)
3505 fprintf_unfiltered (gdb_stdlog,
3506 "CZL: Thread group leader %d zombie "
3507 "(it exited, or another thread execd).\n",
3508 inf->pid);
3509
3510 /* A leader zombie can mean one of two things:
3511
3512 - It exited, and there's an exit status pending
3513 available, or only the leader exited (not the whole
3514 program). In the latter case, we can't waitpid the
3515 leader's exit status until all other threads are gone.
3516
3517 - There are 3 or more threads in the group, and a thread
3518 other than the leader exec'd. On an exec, the Linux
3519 kernel destroys all other threads (except the execing
3520 one) in the thread group, and resets the execing thread's
3521 tid to the tgid. No exit notification is sent for the
3522 execing thread -- from the ptracer's perspective, it
3523 appears as though the execing thread just vanishes.
3524 Until we reap all other threads except the leader and the
3525 execing thread, the leader will be zombie, and the
3526 execing thread will be in `D (disc sleep)'. As soon as
3527 all other threads are reaped, the execing thread changes
3528 it's tid to the tgid, and the previous (zombie) leader
3529 vanishes, giving place to the "new" leader. We could try
3530 distinguishing the exit and exec cases, by waiting once
3531 more, and seeing if something comes out, but it doesn't
3532 sound useful. The previous leader _does_ go away, and
3533 we'll re-add the new one once we see the exec event
3534 (which is just the same as what would happen if the
3535 previous leader did exit voluntarily before some other
3536 thread execs). */
3537
3538 if (debug_linux_nat)
3539 fprintf_unfiltered (gdb_stdlog,
3540 "CZL: Thread group leader %d vanished.\n",
3541 inf->pid);
3542 exit_lwp (leader_lp);
3543 }
3544 }
3545}
3546
d6b0e80f 3547static ptid_t
7feb7d06 3548linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3549 ptid_t ptid, struct target_waitstatus *ourstatus,
3550 int target_options)
d6b0e80f 3551{
7feb7d06 3552 static sigset_t prev_mask;
4b60df3d 3553 enum resume_kind last_resume_kind;
12d9289a 3554 struct lwp_info *lp;
12d9289a 3555 int status;
d6b0e80f 3556
01124a23 3557 if (debug_linux_nat)
b84876c2
PA
3558 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3559
f973ed9c
DJ
3560 /* The first time we get here after starting a new inferior, we may
3561 not have added it to the LWP list yet - this is the earliest
3562 moment at which we know its PID. */
d90e17a7 3563 if (ptid_is_pid (inferior_ptid))
f973ed9c 3564 {
27c9d204
PA
3565 /* Upgrade the main thread's ptid. */
3566 thread_change_ptid (inferior_ptid,
3567 BUILD_LWP (GET_PID (inferior_ptid),
3568 GET_PID (inferior_ptid)));
3569
f973ed9c
DJ
3570 lp = add_lwp (inferior_ptid);
3571 lp->resumed = 1;
3572 }
3573
7feb7d06
PA
3574 /* Make sure SIGCHLD is blocked. */
3575 block_child_signals (&prev_mask);
d6b0e80f
AC
3576
3577retry:
d90e17a7
PA
3578 lp = NULL;
3579 status = 0;
d6b0e80f
AC
3580
3581 /* First check if there is a LWP with a wait status pending. */
0e5bf2a8 3582 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d6b0e80f 3583 {
0e5bf2a8 3584 /* Any LWP in the PTID group that's been resumed will do. */
d90e17a7 3585 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3586 if (lp)
3587 {
ca2163eb 3588 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3589 fprintf_unfiltered (gdb_stdlog,
3590 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3591 status_to_str (lp->status),
d6b0e80f
AC
3592 target_pid_to_str (lp->ptid));
3593 }
d6b0e80f
AC
3594 }
3595 else if (is_lwp (ptid))
3596 {
3597 if (debug_linux_nat)
3598 fprintf_unfiltered (gdb_stdlog,
3599 "LLW: Waiting for specific LWP %s.\n",
3600 target_pid_to_str (ptid));
3601
3602 /* We have a specific LWP to check. */
3603 lp = find_lwp_pid (ptid);
3604 gdb_assert (lp);
d6b0e80f 3605
ca2163eb 3606 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3607 fprintf_unfiltered (gdb_stdlog,
3608 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3609 status_to_str (lp->status),
d6b0e80f
AC
3610 target_pid_to_str (lp->ptid));
3611
d90e17a7
PA
3612 /* We check for lp->waitstatus in addition to lp->status,
3613 because we can have pending process exits recorded in
3614 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3615 an additional lp->status_p flag. */
ca2163eb 3616 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3617 lp = NULL;
d6b0e80f
AC
3618 }
3619
25289eb2 3620 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
d6b0e80f
AC
3621 {
3622 /* A pending SIGSTOP may interfere with the normal stream of
3623 events. In a typical case where interference is a problem,
3624 we have a SIGSTOP signal pending for LWP A while
3625 single-stepping it, encounter an event in LWP B, and take the
3626 pending SIGSTOP while trying to stop LWP A. After processing
3627 the event in LWP B, LWP A is continued, and we'll never see
3628 the SIGTRAP associated with the last time we were
3629 single-stepping LWP A. */
3630
3631 /* Resume the thread. It should halt immediately returning the
3632 pending SIGSTOP. */
3633 registers_changed ();
7b50312a
PA
3634 if (linux_nat_prepare_to_resume != NULL)
3635 linux_nat_prepare_to_resume (lp);
28439f5e 3636 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3637 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3638 if (debug_linux_nat)
3639 fprintf_unfiltered (gdb_stdlog,
3640 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3641 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3642 target_pid_to_str (lp->ptid));
3643 lp->stopped = 0;
3644 gdb_assert (lp->resumed);
3645
ca2163eb
PA
3646 /* Catch the pending SIGSTOP. */
3647 status = lp->status;
3648 lp->status = 0;
3649
d6b0e80f 3650 stop_wait_callback (lp, NULL);
ca2163eb
PA
3651
3652 /* If the lp->status field isn't empty, we caught another signal
3653 while flushing the SIGSTOP. Return it back to the event
3654 queue of the LWP, as we already have an event to handle. */
3655 if (lp->status)
3656 {
3657 if (debug_linux_nat)
3658 fprintf_unfiltered (gdb_stdlog,
3659 "LLW: kill %s, %s\n",
3660 target_pid_to_str (lp->ptid),
3661 status_to_str (lp->status));
3662 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3663 }
3664
3665 lp->status = status;
d6b0e80f
AC
3666 }
3667
b84876c2
PA
3668 if (!target_can_async_p ())
3669 {
3670 /* Causes SIGINT to be passed on to the attached process. */
3671 set_sigint_trap ();
b84876c2 3672 }
d6b0e80f 3673
0e5bf2a8 3674 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3675
d90e17a7 3676 while (lp == NULL)
d6b0e80f
AC
3677 {
3678 pid_t lwpid;
3679
0e5bf2a8
PA
3680 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3681 quirks:
3682
3683 - If the thread group leader exits while other threads in the
3684 thread group still exist, waitpid(TGID, ...) hangs. That
3685 waitpid won't return an exit status until the other threads
3686 in the group are reapped.
3687
3688 - When a non-leader thread execs, that thread just vanishes
3689 without reporting an exit (so we'd hang if we waited for it
3690 explicitly in that case). The exec event is reported to
3691 the TGID pid. */
3692
3693 errno = 0;
3694 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3695 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3696 lwpid = my_waitpid (-1, &status, WNOHANG);
3697
3698 if (debug_linux_nat)
3699 fprintf_unfiltered (gdb_stdlog,
3700 "LNW: waitpid(-1, ...) returned %d, %s\n",
3701 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3702
d6b0e80f
AC
3703 if (lwpid > 0)
3704 {
12d9289a
PA
3705 /* If this is true, then we paused LWPs momentarily, and may
3706 now have pending events to handle. */
3707 int new_pending;
3708
d6b0e80f
AC
3709 if (debug_linux_nat)
3710 {
3711 fprintf_unfiltered (gdb_stdlog,
3712 "LLW: waitpid %ld received %s\n",
3713 (long) lwpid, status_to_str (status));
3714 }
3715
0e5bf2a8 3716 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3717
33355866
JK
3718 /* STATUS is now no longer valid, use LP->STATUS instead. */
3719 status = 0;
3720
0e5bf2a8 3721 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3722 {
e3e9f5a2
PA
3723 gdb_assert (lp->resumed);
3724
d90e17a7 3725 if (debug_linux_nat)
3e43a32a
MS
3726 fprintf (stderr,
3727 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3728 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3729
ca2163eb 3730 if (WIFSTOPPED (lp->status))
d90e17a7 3731 {
ca2163eb 3732 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3733 {
e3e9f5a2
PA
3734 /* Cancel breakpoint hits. The breakpoint may
3735 be removed before we fetch events from this
3736 process to report to the core. It is best
3737 not to assume the moribund breakpoints
3738 heuristic always handles these cases --- it
3739 could be too many events go through to the
3740 core before this one is handled. All-stop
3741 always cancels breakpoint hits in all
3742 threads. */
3743 if (non_stop
00390b84 3744 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3745 && cancel_breakpoint (lp))
3746 {
3747 /* Throw away the SIGTRAP. */
3748 lp->status = 0;
3749
3750 if (debug_linux_nat)
3751 fprintf (stderr,
3e43a32a
MS
3752 "LLW: LWP %ld hit a breakpoint while"
3753 " waiting for another process;"
3754 " cancelled it\n",
e3e9f5a2
PA
3755 ptid_get_lwp (lp->ptid));
3756 }
3757 lp->stopped = 1;
d90e17a7
PA
3758 }
3759 else
3760 {
3761 lp->stopped = 1;
3762 lp->signalled = 0;
3763 }
3764 }
33355866 3765 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3766 {
3767 if (debug_linux_nat)
3e43a32a
MS
3768 fprintf (stderr,
3769 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3770 ptid_get_lwp (lp->ptid));
3771
3772 /* This was the last lwp in the process. Since
3773 events are serialized to GDB core, and we can't
3774 report this one right now, but GDB core and the
3775 other target layers will want to be notified
3776 about the exit code/signal, leave the status
3777 pending for the next time we're able to report
3778 it. */
d90e17a7
PA
3779
3780 /* Prevent trying to stop this thread again. We'll
3781 never try to resume it because it has a pending
3782 status. */
3783 lp->stopped = 1;
3784
3785 /* Dead LWP's aren't expected to reported a pending
3786 sigstop. */
3787 lp->signalled = 0;
3788
3789 /* Store the pending event in the waitstatus as
3790 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3791 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3792 }
3793
3794 /* Keep looking. */
3795 lp = NULL;
d6b0e80f
AC
3796 }
3797
0e5bf2a8 3798 if (new_pending)
d90e17a7 3799 {
0e5bf2a8
PA
3800 /* Some LWP now has a pending event. Go all the way
3801 back to check it. */
3802 goto retry;
3803 }
12d9289a 3804
0e5bf2a8
PA
3805 if (lp)
3806 {
3807 /* We got an event to report to the core. */
3808 break;
d90e17a7 3809 }
0e5bf2a8
PA
3810
3811 /* Retry until nothing comes out of waitpid. A single
3812 SIGCHLD can indicate more than one child stopped. */
3813 continue;
d6b0e80f
AC
3814 }
3815
0e5bf2a8
PA
3816 /* Check for zombie thread group leaders. Those can't be reaped
3817 until all other threads in the thread group are. */
3818 check_zombie_leaders ();
d6b0e80f 3819
0e5bf2a8
PA
3820 /* If there are no resumed children left, bail. We'd be stuck
3821 forever in the sigsuspend call below otherwise. */
3822 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3823 {
3824 if (debug_linux_nat)
3825 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3826
0e5bf2a8 3827 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3828
0e5bf2a8
PA
3829 if (!target_can_async_p ())
3830 clear_sigint_trap ();
b84876c2 3831
0e5bf2a8
PA
3832 restore_child_signals_mask (&prev_mask);
3833 return minus_one_ptid;
d6b0e80f 3834 }
28736962 3835
0e5bf2a8
PA
3836 /* No interesting event to report to the core. */
3837
3838 if (target_options & TARGET_WNOHANG)
3839 {
01124a23 3840 if (debug_linux_nat)
28736962
PA
3841 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3842
0e5bf2a8 3843 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3844 restore_child_signals_mask (&prev_mask);
3845 return minus_one_ptid;
3846 }
d6b0e80f
AC
3847
3848 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3849 gdb_assert (lp == NULL);
0e5bf2a8
PA
3850
3851 /* Block until we get an event reported with SIGCHLD. */
3852 sigsuspend (&suspend_mask);
d6b0e80f
AC
3853 }
3854
b84876c2 3855 if (!target_can_async_p ())
d26b5354 3856 clear_sigint_trap ();
d6b0e80f
AC
3857
3858 gdb_assert (lp);
3859
ca2163eb
PA
3860 status = lp->status;
3861 lp->status = 0;
3862
d6b0e80f
AC
3863 /* Don't report signals that GDB isn't interested in, such as
3864 signals that are neither printed nor stopped upon. Stopping all
3865 threads can be a bit time-consuming so if we want decent
3866 performance with heavily multi-threaded programs, especially when
3867 they're using a high frequency timer, we'd better avoid it if we
3868 can. */
3869
3870 if (WIFSTOPPED (status))
3871 {
423ec54c 3872 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b0e80f 3873
2455069d
UW
3874 /* When using hardware single-step, we need to report every signal.
3875 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3876 if (!lp->step
2455069d 3877 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3878 {
3879 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3880 here? It is not clear we should. GDB may not expect
3881 other threads to run. On the other hand, not resuming
3882 newly attached threads may cause an unwanted delay in
3883 getting them running. */
3884 registers_changed ();
7b50312a
PA
3885 if (linux_nat_prepare_to_resume != NULL)
3886 linux_nat_prepare_to_resume (lp);
28439f5e 3887 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3888 lp->step, signo);
d6b0e80f
AC
3889 if (debug_linux_nat)
3890 fprintf_unfiltered (gdb_stdlog,
3891 "LLW: %s %s, %s (preempt 'handle')\n",
3892 lp->step ?
3893 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3894 target_pid_to_str (lp->ptid),
423ec54c
JK
3895 (signo != TARGET_SIGNAL_0
3896 ? strsignal (target_signal_to_host (signo))
3897 : "0"));
d6b0e80f 3898 lp->stopped = 0;
d6b0e80f
AC
3899 goto retry;
3900 }
3901
1ad15515 3902 if (!non_stop)
d6b0e80f 3903 {
1ad15515
PA
3904 /* Only do the below in all-stop, as we currently use SIGINT
3905 to implement target_stop (see linux_nat_stop) in
3906 non-stop. */
3907 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3908 {
3909 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3910 forwarded to the entire process group, that is, all LWPs
3911 will receive it - unless they're using CLONE_THREAD to
3912 share signals. Since we only want to report it once, we
3913 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3914 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3915 set_ignore_sigint, NULL);
1ad15515
PA
3916 lp->ignore_sigint = 0;
3917 }
3918 else
3919 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3920 }
3921 }
3922
3923 /* This LWP is stopped now. */
3924 lp->stopped = 1;
3925
3926 if (debug_linux_nat)
3927 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3928 status_to_str (status), target_pid_to_str (lp->ptid));
3929
4c28f408
PA
3930 if (!non_stop)
3931 {
3932 /* Now stop all other LWP's ... */
d90e17a7 3933 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3934
3935 /* ... and wait until all of them have reported back that
3936 they're no longer running. */
d90e17a7 3937 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3938
3939 /* If we're not waiting for a specific LWP, choose an event LWP
3940 from among those that have had events. Giving equal priority
3941 to all LWPs that have had events helps prevent
3942 starvation. */
0e5bf2a8 3943 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3944 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3945
e3e9f5a2
PA
3946 /* Now that we've selected our final event LWP, cancel any
3947 breakpoints in other LWPs that have hit a GDB breakpoint.
3948 See the comment in cancel_breakpoints_callback to find out
3949 why. */
3950 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3951
4b60df3d
PA
3952 /* We'll need this to determine whether to report a SIGSTOP as
3953 TARGET_WAITKIND_0. Need to take a copy because
3954 resume_clear_callback clears it. */
3955 last_resume_kind = lp->last_resume_kind;
3956
e3e9f5a2
PA
3957 /* In all-stop, from the core's perspective, all LWPs are now
3958 stopped until a new resume action is sent over. */
3959 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3960 }
3961 else
25289eb2 3962 {
4b60df3d
PA
3963 /* See above. */
3964 last_resume_kind = lp->last_resume_kind;
3965 resume_clear_callback (lp, NULL);
25289eb2 3966 }
d6b0e80f 3967
26ab7092 3968 if (linux_nat_status_is_event (status))
d6b0e80f 3969 {
d6b0e80f
AC
3970 if (debug_linux_nat)
3971 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3972 "LLW: trap ptid is %s.\n",
3973 target_pid_to_str (lp->ptid));
d6b0e80f 3974 }
d6b0e80f
AC
3975
3976 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3977 {
3978 *ourstatus = lp->waitstatus;
3979 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3980 }
3981 else
3982 store_waitstatus (ourstatus, status);
3983
01124a23 3984 if (debug_linux_nat)
b84876c2
PA
3985 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3986
7feb7d06 3987 restore_child_signals_mask (&prev_mask);
1e225492 3988
4b60df3d 3989 if (last_resume_kind == resume_stop
25289eb2
PA
3990 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3991 && WSTOPSIG (status) == SIGSTOP)
3992 {
3993 /* A thread that has been requested to stop by GDB with
3994 target_stop, and it stopped cleanly, so report as SIG0. The
3995 use of SIGSTOP is an implementation detail. */
3996 ourstatus->value.sig = TARGET_SIGNAL_0;
3997 }
3998
1e225492
JK
3999 if (ourstatus->kind == TARGET_WAITKIND_EXITED
4000 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
4001 lp->core = -1;
4002 else
4003 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
4004
f973ed9c 4005 return lp->ptid;
d6b0e80f
AC
4006}
4007
e3e9f5a2
PA
4008/* Resume LWPs that are currently stopped without any pending status
4009 to report, but are resumed from the core's perspective. */
4010
4011static int
4012resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
4013{
4014 ptid_t *wait_ptid_p = data;
4015
4016 if (lp->stopped
4017 && lp->resumed
4018 && lp->status == 0
4019 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
4020 {
336060f3
PA
4021 struct regcache *regcache = get_thread_regcache (lp->ptid);
4022 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4023 CORE_ADDR pc = regcache_read_pc (regcache);
4024
e3e9f5a2
PA
4025 gdb_assert (is_executing (lp->ptid));
4026
4027 /* Don't bother if there's a breakpoint at PC that we'd hit
4028 immediately, and we're not waiting for this LWP. */
4029 if (!ptid_match (lp->ptid, *wait_ptid_p))
4030 {
e3e9f5a2
PA
4031 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
4032 return 0;
4033 }
4034
4035 if (debug_linux_nat)
4036 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
4037 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
4038 target_pid_to_str (lp->ptid),
4039 paddress (gdbarch, pc),
4040 lp->step);
e3e9f5a2 4041
336060f3 4042 registers_changed ();
7b50312a
PA
4043 if (linux_nat_prepare_to_resume != NULL)
4044 linux_nat_prepare_to_resume (lp);
e3e9f5a2
PA
4045 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
4046 lp->step, TARGET_SIGNAL_0);
4047 lp->stopped = 0;
4048 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
4049 lp->stopped_by_watchpoint = 0;
4050 }
4051
4052 return 0;
4053}
4054
7feb7d06
PA
4055static ptid_t
4056linux_nat_wait (struct target_ops *ops,
47608cb1
PA
4057 ptid_t ptid, struct target_waitstatus *ourstatus,
4058 int target_options)
7feb7d06
PA
4059{
4060 ptid_t event_ptid;
4061
4062 if (debug_linux_nat)
3e43a32a
MS
4063 fprintf_unfiltered (gdb_stdlog,
4064 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
4065
4066 /* Flush the async file first. */
4067 if (target_can_async_p ())
4068 async_file_flush ();
4069
e3e9f5a2
PA
4070 /* Resume LWPs that are currently stopped without any pending status
4071 to report, but are resumed from the core's perspective. LWPs get
4072 in this state if we find them stopping at a time we're not
4073 interested in reporting the event (target_wait on a
4074 specific_process, for example, see linux_nat_wait_1), and
4075 meanwhile the event became uninteresting. Don't bother resuming
4076 LWPs we're not going to wait for if they'd stop immediately. */
4077 if (non_stop)
4078 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4079
47608cb1 4080 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
4081
4082 /* If we requested any event, and something came out, assume there
4083 may be more. If we requested a specific lwp or process, also
4084 assume there may be more. */
4085 if (target_can_async_p ()
6953d224
PA
4086 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4087 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
4088 || !ptid_equal (ptid, minus_one_ptid)))
4089 async_file_mark ();
4090
4091 /* Get ready for the next event. */
4092 if (target_can_async_p ())
4093 target_async (inferior_event_handler, 0);
4094
4095 return event_ptid;
4096}
4097
d6b0e80f
AC
4098static int
4099kill_callback (struct lwp_info *lp, void *data)
4100{
ed731959
JK
4101 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4102
4103 errno = 0;
4104 kill (GET_LWP (lp->ptid), SIGKILL);
4105 if (debug_linux_nat)
4106 fprintf_unfiltered (gdb_stdlog,
4107 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4108 target_pid_to_str (lp->ptid),
4109 errno ? safe_strerror (errno) : "OK");
4110
4111 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4112
d6b0e80f
AC
4113 errno = 0;
4114 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4115 if (debug_linux_nat)
4116 fprintf_unfiltered (gdb_stdlog,
4117 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4118 target_pid_to_str (lp->ptid),
4119 errno ? safe_strerror (errno) : "OK");
4120
4121 return 0;
4122}
4123
4124static int
4125kill_wait_callback (struct lwp_info *lp, void *data)
4126{
4127 pid_t pid;
4128
4129 /* We must make sure that there are no pending events (delayed
4130 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4131 program doesn't interfere with any following debugging session. */
4132
4133 /* For cloned processes we must check both with __WCLONE and
4134 without, since the exit status of a cloned process isn't reported
4135 with __WCLONE. */
4136 if (lp->cloned)
4137 {
4138 do
4139 {
58aecb61 4140 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 4141 if (pid != (pid_t) -1)
d6b0e80f 4142 {
e85a822c
DJ
4143 if (debug_linux_nat)
4144 fprintf_unfiltered (gdb_stdlog,
4145 "KWC: wait %s received unknown.\n",
4146 target_pid_to_str (lp->ptid));
4147 /* The Linux kernel sometimes fails to kill a thread
4148 completely after PTRACE_KILL; that goes from the stop
4149 point in do_fork out to the one in
4150 get_signal_to_deliever and waits again. So kill it
4151 again. */
4152 kill_callback (lp, NULL);
d6b0e80f
AC
4153 }
4154 }
4155 while (pid == GET_LWP (lp->ptid));
4156
4157 gdb_assert (pid == -1 && errno == ECHILD);
4158 }
4159
4160 do
4161 {
58aecb61 4162 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 4163 if (pid != (pid_t) -1)
d6b0e80f 4164 {
e85a822c
DJ
4165 if (debug_linux_nat)
4166 fprintf_unfiltered (gdb_stdlog,
4167 "KWC: wait %s received unk.\n",
4168 target_pid_to_str (lp->ptid));
4169 /* See the call to kill_callback above. */
4170 kill_callback (lp, NULL);
d6b0e80f
AC
4171 }
4172 }
4173 while (pid == GET_LWP (lp->ptid));
4174
4175 gdb_assert (pid == -1 && errno == ECHILD);
4176 return 0;
4177}
4178
4179static void
7d85a9c0 4180linux_nat_kill (struct target_ops *ops)
d6b0e80f 4181{
f973ed9c
DJ
4182 struct target_waitstatus last;
4183 ptid_t last_ptid;
4184 int status;
d6b0e80f 4185
f973ed9c
DJ
4186 /* If we're stopped while forking and we haven't followed yet,
4187 kill the other task. We need to do this first because the
4188 parent will be sleeping if this is a vfork. */
d6b0e80f 4189
f973ed9c 4190 get_last_target_status (&last_ptid, &last);
d6b0e80f 4191
f973ed9c
DJ
4192 if (last.kind == TARGET_WAITKIND_FORKED
4193 || last.kind == TARGET_WAITKIND_VFORKED)
4194 {
3a3e9ee3 4195 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
4196 wait (&status);
4197 }
4198
4199 if (forks_exist_p ())
7feb7d06 4200 linux_fork_killall ();
f973ed9c
DJ
4201 else
4202 {
d90e17a7 4203 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 4204
4c28f408
PA
4205 /* Stop all threads before killing them, since ptrace requires
4206 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 4207 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
4208 /* ... and wait until all of them have reported back that
4209 they're no longer running. */
d90e17a7 4210 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 4211
f973ed9c 4212 /* Kill all LWP's ... */
d90e17a7 4213 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
4214
4215 /* ... and wait until we've flushed all events. */
d90e17a7 4216 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
4217 }
4218
4219 target_mourn_inferior ();
d6b0e80f
AC
4220}
4221
4222static void
136d6dae 4223linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 4224{
d90e17a7 4225 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 4226
f973ed9c 4227 if (! forks_exist_p ())
d90e17a7
PA
4228 /* Normal case, no other forks available. */
4229 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
4230 else
4231 /* Multi-fork case. The current inferior_ptid has exited, but
4232 there are other viable forks to debug. Delete the exiting
4233 one and context-switch to the first available. */
4234 linux_fork_mourn_inferior ();
d6b0e80f
AC
4235}
4236
5b009018
PA
4237/* Convert a native/host siginfo object, into/from the siginfo in the
4238 layout of the inferiors' architecture. */
4239
4240static void
4241siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4242{
4243 int done = 0;
4244
4245 if (linux_nat_siginfo_fixup != NULL)
4246 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4247
4248 /* If there was no callback, or the callback didn't do anything,
4249 then just do a straight memcpy. */
4250 if (!done)
4251 {
4252 if (direction == 1)
4253 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4254 else
4255 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4256 }
4257}
4258
4aa995e1
PA
4259static LONGEST
4260linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4261 const char *annex, gdb_byte *readbuf,
4262 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4263{
4aa995e1
PA
4264 int pid;
4265 struct siginfo siginfo;
5b009018 4266 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4267
4268 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4269 gdb_assert (readbuf || writebuf);
4270
4271 pid = GET_LWP (inferior_ptid);
4272 if (pid == 0)
4273 pid = GET_PID (inferior_ptid);
4274
4275 if (offset > sizeof (siginfo))
4276 return -1;
4277
4278 errno = 0;
4279 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4280 if (errno != 0)
4281 return -1;
4282
5b009018
PA
4283 /* When GDB is built as a 64-bit application, ptrace writes into
4284 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4285 inferior with a 64-bit GDB should look the same as debugging it
4286 with a 32-bit GDB, we need to convert it. GDB core always sees
4287 the converted layout, so any read/write will have to be done
4288 post-conversion. */
4289 siginfo_fixup (&siginfo, inf_siginfo, 0);
4290
4aa995e1
PA
4291 if (offset + len > sizeof (siginfo))
4292 len = sizeof (siginfo) - offset;
4293
4294 if (readbuf != NULL)
5b009018 4295 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4296 else
4297 {
5b009018
PA
4298 memcpy (inf_siginfo + offset, writebuf, len);
4299
4300 /* Convert back to ptrace layout before flushing it out. */
4301 siginfo_fixup (&siginfo, inf_siginfo, 1);
4302
4aa995e1
PA
4303 errno = 0;
4304 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4305 if (errno != 0)
4306 return -1;
4307 }
4308
4309 return len;
4310}
4311
10d6c8cd
DJ
4312static LONGEST
4313linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4314 const char *annex, gdb_byte *readbuf,
4315 const gdb_byte *writebuf,
4316 ULONGEST offset, LONGEST len)
d6b0e80f 4317{
4aa995e1 4318 struct cleanup *old_chain;
10d6c8cd 4319 LONGEST xfer;
d6b0e80f 4320
4aa995e1
PA
4321 if (object == TARGET_OBJECT_SIGNAL_INFO)
4322 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4323 offset, len);
4324
c35b1492
PA
4325 /* The target is connected but no live inferior is selected. Pass
4326 this request down to a lower stratum (e.g., the executable
4327 file). */
4328 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4329 return 0;
4330
4aa995e1
PA
4331 old_chain = save_inferior_ptid ();
4332
d6b0e80f
AC
4333 if (is_lwp (inferior_ptid))
4334 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4335
10d6c8cd
DJ
4336 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4337 offset, len);
d6b0e80f
AC
4338
4339 do_cleanups (old_chain);
4340 return xfer;
4341}
4342
4343static int
28439f5e 4344linux_thread_alive (ptid_t ptid)
d6b0e80f 4345{
8c6a60d1 4346 int err, tmp_errno;
4c28f408 4347
d6b0e80f
AC
4348 gdb_assert (is_lwp (ptid));
4349
4c28f408
PA
4350 /* Send signal 0 instead of anything ptrace, because ptracing a
4351 running thread errors out claiming that the thread doesn't
4352 exist. */
4353 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4354 tmp_errno = errno;
d6b0e80f
AC
4355 if (debug_linux_nat)
4356 fprintf_unfiltered (gdb_stdlog,
4c28f408 4357 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4358 target_pid_to_str (ptid),
8c6a60d1 4359 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4360
4c28f408 4361 if (err != 0)
d6b0e80f
AC
4362 return 0;
4363
4364 return 1;
4365}
4366
28439f5e
PA
4367static int
4368linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4369{
4370 return linux_thread_alive (ptid);
4371}
4372
d6b0e80f 4373static char *
117de6a9 4374linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4375{
4376 static char buf[64];
4377
a0ef4274 4378 if (is_lwp (ptid)
d90e17a7
PA
4379 && (GET_PID (ptid) != GET_LWP (ptid)
4380 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4381 {
4382 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4383 return buf;
4384 }
4385
4386 return normal_pid_to_str (ptid);
4387}
4388
4694da01
TT
4389static char *
4390linux_nat_thread_name (struct thread_info *thr)
4391{
4392 int pid = ptid_get_pid (thr->ptid);
4393 long lwp = ptid_get_lwp (thr->ptid);
4394#define FORMAT "/proc/%d/task/%ld/comm"
4395 char buf[sizeof (FORMAT) + 30];
4396 FILE *comm_file;
4397 char *result = NULL;
4398
4399 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4400 comm_file = fopen (buf, "r");
4401 if (comm_file)
4402 {
4403 /* Not exported by the kernel, so we define it here. */
4404#define COMM_LEN 16
4405 static char line[COMM_LEN + 1];
4406
4407 if (fgets (line, sizeof (line), comm_file))
4408 {
4409 char *nl = strchr (line, '\n');
4410
4411 if (nl)
4412 *nl = '\0';
4413 if (*line != '\0')
4414 result = line;
4415 }
4416
4417 fclose (comm_file);
4418 }
4419
4420#undef COMM_LEN
4421#undef FORMAT
4422
4423 return result;
4424}
4425
dba24537
AC
4426/* Accepts an integer PID; Returns a string representing a file that
4427 can be opened to get the symbols for the child process. */
4428
6d8fd2b7
UW
4429static char *
4430linux_child_pid_to_exec_file (int pid)
dba24537
AC
4431{
4432 char *name1, *name2;
4433
4434 name1 = xmalloc (MAXPATHLEN);
4435 name2 = xmalloc (MAXPATHLEN);
4436 make_cleanup (xfree, name1);
4437 make_cleanup (xfree, name2);
4438 memset (name2, 0, MAXPATHLEN);
4439
4440 sprintf (name1, "/proc/%d/exe", pid);
4441 if (readlink (name1, name2, MAXPATHLEN) > 0)
4442 return name2;
4443 else
4444 return name1;
4445}
4446
dba24537
AC
4447/* Records the thread's register state for the corefile note
4448 section. */
4449
4450static char *
6432734d
UW
4451linux_nat_collect_thread_registers (const struct regcache *regcache,
4452 ptid_t ptid, bfd *obfd,
4453 char *note_data, int *note_size,
4454 enum target_signal stop_signal)
dba24537 4455{
6432734d 4456 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 4457 const struct regset *regset;
55e969c1 4458 int core_regset_p;
6432734d
UW
4459 gdb_gregset_t gregs;
4460 gdb_fpregset_t fpregs;
4f844a66
DM
4461
4462 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
dba24537 4463
6432734d
UW
4464 if (core_regset_p
4465 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4466 sizeof (gregs)))
4467 != NULL && regset->collect_regset != NULL)
4468 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4f844a66 4469 else
6432734d 4470 fill_gregset (regcache, &gregs, -1);
2f2241f1 4471
6432734d
UW
4472 note_data = (char *) elfcore_write_prstatus
4473 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4474 target_signal_to_host (stop_signal), &gregs);
2f2241f1 4475
6432734d
UW
4476 if (core_regset_p
4477 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4478 sizeof (fpregs)))
3e43a32a 4479 != NULL && regset->collect_regset != NULL)
6432734d
UW
4480 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4481 else
4482 fill_fpregset (regcache, &fpregs, -1);
17ea7499 4483
6432734d
UW
4484 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4485 &fpregs, sizeof (fpregs));
4f844a66 4486
dba24537
AC
4487 return note_data;
4488}
4489
dba24537
AC
4490/* Fills the "to_make_corefile_note" target vector. Builds the note
4491 section for a corefile, and returns it in a malloc buffer. */
4492
4493static char *
4494linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4495{
6432734d
UW
4496 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4497 converted to gdbarch_core_regset_sections, this function can go away. */
4498 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4499 linux_nat_collect_thread_registers);
dba24537
AC
4500}
4501
10d6c8cd
DJ
4502/* Implement the to_xfer_partial interface for memory reads using the /proc
4503 filesystem. Because we can use a single read() call for /proc, this
4504 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4505 but it doesn't support writes. */
4506
4507static LONGEST
4508linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4509 const char *annex, gdb_byte *readbuf,
4510 const gdb_byte *writebuf,
4511 ULONGEST offset, LONGEST len)
dba24537 4512{
10d6c8cd
DJ
4513 LONGEST ret;
4514 int fd;
dba24537
AC
4515 char filename[64];
4516
10d6c8cd 4517 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4518 return 0;
4519
4520 /* Don't bother for one word. */
4521 if (len < 3 * sizeof (long))
4522 return 0;
4523
4524 /* We could keep this file open and cache it - possibly one per
4525 thread. That requires some juggling, but is even faster. */
4526 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4527 fd = open (filename, O_RDONLY | O_LARGEFILE);
4528 if (fd == -1)
4529 return 0;
4530
4531 /* If pread64 is available, use it. It's faster if the kernel
4532 supports it (only one syscall), and it's 64-bit safe even on
4533 32-bit platforms (for instance, SPARC debugging a SPARC64
4534 application). */
4535#ifdef HAVE_PREAD64
10d6c8cd 4536 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4537#else
10d6c8cd 4538 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4539#endif
4540 ret = 0;
4541 else
4542 ret = len;
4543
4544 close (fd);
4545 return ret;
4546}
4547
efcbbd14
UW
4548
4549/* Enumerate spufs IDs for process PID. */
4550static LONGEST
4551spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4552{
4553 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4554 LONGEST pos = 0;
4555 LONGEST written = 0;
4556 char path[128];
4557 DIR *dir;
4558 struct dirent *entry;
4559
4560 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4561 dir = opendir (path);
4562 if (!dir)
4563 return -1;
4564
4565 rewinddir (dir);
4566 while ((entry = readdir (dir)) != NULL)
4567 {
4568 struct stat st;
4569 struct statfs stfs;
4570 int fd;
4571
4572 fd = atoi (entry->d_name);
4573 if (!fd)
4574 continue;
4575
4576 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4577 if (stat (path, &st) != 0)
4578 continue;
4579 if (!S_ISDIR (st.st_mode))
4580 continue;
4581
4582 if (statfs (path, &stfs) != 0)
4583 continue;
4584 if (stfs.f_type != SPUFS_MAGIC)
4585 continue;
4586
4587 if (pos >= offset && pos + 4 <= offset + len)
4588 {
4589 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4590 written += 4;
4591 }
4592 pos += 4;
4593 }
4594
4595 closedir (dir);
4596 return written;
4597}
4598
4599/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4600 object type, using the /proc file system. */
4601static LONGEST
4602linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4603 const char *annex, gdb_byte *readbuf,
4604 const gdb_byte *writebuf,
4605 ULONGEST offset, LONGEST len)
4606{
4607 char buf[128];
4608 int fd = 0;
4609 int ret = -1;
4610 int pid = PIDGET (inferior_ptid);
4611
4612 if (!annex)
4613 {
4614 if (!readbuf)
4615 return -1;
4616 else
4617 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4618 }
4619
4620 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4621 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4622 if (fd <= 0)
4623 return -1;
4624
4625 if (offset != 0
4626 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4627 {
4628 close (fd);
4629 return 0;
4630 }
4631
4632 if (writebuf)
4633 ret = write (fd, writebuf, (size_t) len);
4634 else if (readbuf)
4635 ret = read (fd, readbuf, (size_t) len);
4636
4637 close (fd);
4638 return ret;
4639}
4640
4641
dba24537
AC
4642/* Parse LINE as a signal set and add its set bits to SIGS. */
4643
4644static void
4645add_line_to_sigset (const char *line, sigset_t *sigs)
4646{
4647 int len = strlen (line) - 1;
4648 const char *p;
4649 int signum;
4650
4651 if (line[len] != '\n')
8a3fe4f8 4652 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4653
4654 p = line;
4655 signum = len * 4;
4656 while (len-- > 0)
4657 {
4658 int digit;
4659
4660 if (*p >= '0' && *p <= '9')
4661 digit = *p - '0';
4662 else if (*p >= 'a' && *p <= 'f')
4663 digit = *p - 'a' + 10;
4664 else
8a3fe4f8 4665 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4666
4667 signum -= 4;
4668
4669 if (digit & 1)
4670 sigaddset (sigs, signum + 1);
4671 if (digit & 2)
4672 sigaddset (sigs, signum + 2);
4673 if (digit & 4)
4674 sigaddset (sigs, signum + 3);
4675 if (digit & 8)
4676 sigaddset (sigs, signum + 4);
4677
4678 p++;
4679 }
4680}
4681
4682/* Find process PID's pending signals from /proc/pid/status and set
4683 SIGS to match. */
4684
4685void
3e43a32a
MS
4686linux_proc_pending_signals (int pid, sigset_t *pending,
4687 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4688{
4689 FILE *procfile;
4690 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 4691 struct cleanup *cleanup;
dba24537
AC
4692
4693 sigemptyset (pending);
4694 sigemptyset (blocked);
4695 sigemptyset (ignored);
4696 sprintf (fname, "/proc/%d/status", pid);
4697 procfile = fopen (fname, "r");
4698 if (procfile == NULL)
8a3fe4f8 4699 error (_("Could not open %s"), fname);
7c8a8b04 4700 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4701
4702 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4703 {
4704 /* Normal queued signals are on the SigPnd line in the status
4705 file. However, 2.6 kernels also have a "shared" pending
4706 queue for delivering signals to a thread group, so check for
4707 a ShdPnd line also.
4708
4709 Unfortunately some Red Hat kernels include the shared pending
4710 queue but not the ShdPnd status field. */
4711
4712 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4713 add_line_to_sigset (buffer + 8, pending);
4714 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4715 add_line_to_sigset (buffer + 8, pending);
4716 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4717 add_line_to_sigset (buffer + 8, blocked);
4718 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4719 add_line_to_sigset (buffer + 8, ignored);
4720 }
4721
7c8a8b04 4722 do_cleanups (cleanup);
dba24537
AC
4723}
4724
07e059b5
VP
4725static LONGEST
4726linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
4727 const char *annex, gdb_byte *readbuf,
4728 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5 4729{
07e059b5
VP
4730 gdb_assert (object == TARGET_OBJECT_OSDATA);
4731
d26e3629 4732 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4733}
4734
10d6c8cd
DJ
4735static LONGEST
4736linux_xfer_partial (struct target_ops *ops, enum target_object object,
4737 const char *annex, gdb_byte *readbuf,
4738 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4739{
4740 LONGEST xfer;
4741
4742 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4743 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
4744 offset, len);
4745
07e059b5
VP
4746 if (object == TARGET_OBJECT_OSDATA)
4747 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4748 offset, len);
4749
efcbbd14
UW
4750 if (object == TARGET_OBJECT_SPU)
4751 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4752 offset, len);
4753
8f313923
JK
4754 /* GDB calculates all the addresses in possibly larget width of the address.
4755 Address width needs to be masked before its final use - either by
4756 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4757
4758 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4759
4760 if (object == TARGET_OBJECT_MEMORY)
4761 {
4762 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4763
4764 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4765 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4766 }
4767
10d6c8cd
DJ
4768 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4769 offset, len);
4770 if (xfer != 0)
4771 return xfer;
4772
4773 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4774 offset, len);
4775}
4776
5808517f
YQ
4777static void
4778cleanup_target_stop (void *arg)
4779{
4780 ptid_t *ptid = (ptid_t *) arg;
4781
4782 gdb_assert (arg != NULL);
4783
4784 /* Unpause all */
4785 target_resume (*ptid, 0, TARGET_SIGNAL_0);
4786}
4787
4788static VEC(static_tracepoint_marker_p) *
4789linux_child_static_tracepoint_markers_by_strid (const char *strid)
4790{
4791 char s[IPA_CMD_BUF_SIZE];
4792 struct cleanup *old_chain;
4793 int pid = ptid_get_pid (inferior_ptid);
4794 VEC(static_tracepoint_marker_p) *markers = NULL;
4795 struct static_tracepoint_marker *marker = NULL;
4796 char *p = s;
4797 ptid_t ptid = ptid_build (pid, 0, 0);
4798
4799 /* Pause all */
4800 target_stop (ptid);
4801
4802 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4803 s[sizeof ("qTfSTM")] = 0;
4804
4805 agent_run_command (pid, s);
4806
4807 old_chain = make_cleanup (free_current_marker, &marker);
4808 make_cleanup (cleanup_target_stop, &ptid);
4809
4810 while (*p++ == 'm')
4811 {
4812 if (marker == NULL)
4813 marker = XCNEW (struct static_tracepoint_marker);
4814
4815 do
4816 {
4817 parse_static_tracepoint_marker_definition (p, &p, marker);
4818
4819 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4820 {
4821 VEC_safe_push (static_tracepoint_marker_p,
4822 markers, marker);
4823 marker = NULL;
4824 }
4825 else
4826 {
4827 release_static_tracepoint_marker (marker);
4828 memset (marker, 0, sizeof (*marker));
4829 }
4830 }
4831 while (*p++ == ','); /* comma-separated list */
4832
4833 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4834 s[sizeof ("qTsSTM")] = 0;
4835 agent_run_command (pid, s);
4836 p = s;
4837 }
4838
4839 do_cleanups (old_chain);
4840
4841 return markers;
4842}
4843
e9efe249 4844/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4845 it with local methods. */
4846
910122bf
UW
4847static void
4848linux_target_install_ops (struct target_ops *t)
10d6c8cd 4849{
6d8fd2b7 4850 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4851 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4852 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4853 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4854 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4855 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4856 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4857 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4858 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4859 t->to_post_attach = linux_child_post_attach;
4860 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4861 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4862
4863 super_xfer_partial = t->to_xfer_partial;
4864 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4865
4866 t->to_static_tracepoint_markers_by_strid
4867 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4868}
4869
4870struct target_ops *
4871linux_target (void)
4872{
4873 struct target_ops *t;
4874
4875 t = inf_ptrace_target ();
4876 linux_target_install_ops (t);
4877
4878 return t;
4879}
4880
4881struct target_ops *
7714d83a 4882linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4883{
4884 struct target_ops *t;
4885
4886 t = inf_ptrace_trad_target (register_u_offset);
4887 linux_target_install_ops (t);
10d6c8cd 4888
10d6c8cd
DJ
4889 return t;
4890}
4891
b84876c2
PA
4892/* target_is_async_p implementation. */
4893
4894static int
4895linux_nat_is_async_p (void)
4896{
4897 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4898 it explicitly with the "set target-async" command.
b84876c2 4899 Someday, linux will always be async. */
3dd5b83d 4900 return target_async_permitted;
b84876c2
PA
4901}
4902
4903/* target_can_async_p implementation. */
4904
4905static int
4906linux_nat_can_async_p (void)
4907{
4908 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4909 it explicitly with the "set target-async" command.
b84876c2 4910 Someday, linux will always be async. */
3dd5b83d 4911 return target_async_permitted;
b84876c2
PA
4912}
4913
9908b566
VP
4914static int
4915linux_nat_supports_non_stop (void)
4916{
4917 return 1;
4918}
4919
d90e17a7
PA
4920/* True if we want to support multi-process. To be removed when GDB
4921 supports multi-exec. */
4922
2277426b 4923int linux_multi_process = 1;
d90e17a7
PA
4924
4925static int
4926linux_nat_supports_multi_process (void)
4927{
4928 return linux_multi_process;
4929}
4930
03583c20
UW
4931static int
4932linux_nat_supports_disable_randomization (void)
4933{
4934#ifdef HAVE_PERSONALITY
4935 return 1;
4936#else
4937 return 0;
4938#endif
4939}
4940
b84876c2
PA
4941static int async_terminal_is_ours = 1;
4942
4943/* target_terminal_inferior implementation. */
4944
4945static void
4946linux_nat_terminal_inferior (void)
4947{
4948 if (!target_is_async_p ())
4949 {
4950 /* Async mode is disabled. */
4951 terminal_inferior ();
4952 return;
4953 }
4954
b84876c2
PA
4955 terminal_inferior ();
4956
d9d2d8b6 4957 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4958 if (!async_terminal_is_ours)
4959 return;
4960
4961 delete_file_handler (input_fd);
4962 async_terminal_is_ours = 0;
4963 set_sigint_trap ();
4964}
4965
4966/* target_terminal_ours implementation. */
4967
2c0b251b 4968static void
b84876c2
PA
4969linux_nat_terminal_ours (void)
4970{
4971 if (!target_is_async_p ())
4972 {
4973 /* Async mode is disabled. */
4974 terminal_ours ();
4975 return;
4976 }
4977
4978 /* GDB should never give the terminal to the inferior if the
4979 inferior is running in the background (run&, continue&, etc.),
4980 but claiming it sure should. */
4981 terminal_ours ();
4982
b84876c2
PA
4983 if (async_terminal_is_ours)
4984 return;
4985
4986 clear_sigint_trap ();
4987 add_file_handler (input_fd, stdin_event_handler, 0);
4988 async_terminal_is_ours = 1;
4989}
4990
4991static void (*async_client_callback) (enum inferior_event_type event_type,
4992 void *context);
4993static void *async_client_context;
4994
7feb7d06
PA
4995/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4996 so we notice when any child changes state, and notify the
4997 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4998 above to wait for the arrival of a SIGCHLD. */
4999
b84876c2 5000static void
7feb7d06 5001sigchld_handler (int signo)
b84876c2 5002{
7feb7d06
PA
5003 int old_errno = errno;
5004
01124a23
DE
5005 if (debug_linux_nat)
5006 ui_file_write_async_safe (gdb_stdlog,
5007 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
5008
5009 if (signo == SIGCHLD
5010 && linux_nat_event_pipe[0] != -1)
5011 async_file_mark (); /* Let the event loop know that there are
5012 events to handle. */
5013
5014 errno = old_errno;
5015}
5016
5017/* Callback registered with the target events file descriptor. */
5018
5019static void
5020handle_target_event (int error, gdb_client_data client_data)
5021{
5022 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5023}
5024
5025/* Create/destroy the target events pipe. Returns previous state. */
5026
5027static int
5028linux_async_pipe (int enable)
5029{
5030 int previous = (linux_nat_event_pipe[0] != -1);
5031
5032 if (previous != enable)
5033 {
5034 sigset_t prev_mask;
5035
5036 block_child_signals (&prev_mask);
5037
5038 if (enable)
5039 {
5040 if (pipe (linux_nat_event_pipe) == -1)
5041 internal_error (__FILE__, __LINE__,
5042 "creating event pipe failed.");
5043
5044 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5045 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5046 }
5047 else
5048 {
5049 close (linux_nat_event_pipe[0]);
5050 close (linux_nat_event_pipe[1]);
5051 linux_nat_event_pipe[0] = -1;
5052 linux_nat_event_pipe[1] = -1;
5053 }
5054
5055 restore_child_signals_mask (&prev_mask);
5056 }
5057
5058 return previous;
b84876c2
PA
5059}
5060
5061/* target_async implementation. */
5062
5063static void
5064linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5065 void *context), void *context)
5066{
b84876c2
PA
5067 if (callback != NULL)
5068 {
5069 async_client_callback = callback;
5070 async_client_context = context;
7feb7d06
PA
5071 if (!linux_async_pipe (1))
5072 {
5073 add_file_handler (linux_nat_event_pipe[0],
5074 handle_target_event, NULL);
5075 /* There may be pending events to handle. Tell the event loop
5076 to poll them. */
5077 async_file_mark ();
5078 }
b84876c2
PA
5079 }
5080 else
5081 {
5082 async_client_callback = callback;
5083 async_client_context = context;
b84876c2 5084 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5085 linux_async_pipe (0);
b84876c2
PA
5086 }
5087 return;
5088}
5089
252fbfc8
PA
5090/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5091 event came out. */
5092
4c28f408 5093static int
252fbfc8 5094linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5095{
d90e17a7 5096 if (!lwp->stopped)
252fbfc8 5097 {
d90e17a7 5098 ptid_t ptid = lwp->ptid;
252fbfc8 5099
d90e17a7
PA
5100 if (debug_linux_nat)
5101 fprintf_unfiltered (gdb_stdlog,
5102 "LNSL: running -> suspending %s\n",
5103 target_pid_to_str (lwp->ptid));
252fbfc8 5104
252fbfc8 5105
25289eb2
PA
5106 if (lwp->last_resume_kind == resume_stop)
5107 {
5108 if (debug_linux_nat)
5109 fprintf_unfiltered (gdb_stdlog,
5110 "linux-nat: already stopping LWP %ld at "
5111 "GDB's request\n",
5112 ptid_get_lwp (lwp->ptid));
5113 return 0;
5114 }
252fbfc8 5115
25289eb2
PA
5116 stop_callback (lwp, NULL);
5117 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
5118 }
5119 else
5120 {
5121 /* Already known to be stopped; do nothing. */
252fbfc8 5122
d90e17a7
PA
5123 if (debug_linux_nat)
5124 {
e09875d4 5125 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5126 fprintf_unfiltered (gdb_stdlog,
5127 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5128 target_pid_to_str (lwp->ptid));
5129 else
3e43a32a
MS
5130 fprintf_unfiltered (gdb_stdlog,
5131 "LNSL: already stopped/no "
5132 "stop_requested yet %s\n",
d90e17a7 5133 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5134 }
5135 }
4c28f408
PA
5136 return 0;
5137}
5138
5139static void
5140linux_nat_stop (ptid_t ptid)
5141{
5142 if (non_stop)
d90e17a7 5143 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5144 else
5145 linux_ops->to_stop (ptid);
5146}
5147
d90e17a7
PA
5148static void
5149linux_nat_close (int quitting)
5150{
5151 /* Unregister from the event loop. */
305436e0
PA
5152 if (linux_nat_is_async_p ())
5153 linux_nat_async (NULL, 0);
d90e17a7 5154
d90e17a7
PA
5155 if (linux_ops->to_close)
5156 linux_ops->to_close (quitting);
5157}
5158
c0694254
PA
5159/* When requests are passed down from the linux-nat layer to the
5160 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5161 used. The address space pointer is stored in the inferior object,
5162 but the common code that is passed such ptid can't tell whether
5163 lwpid is a "main" process id or not (it assumes so). We reverse
5164 look up the "main" process id from the lwp here. */
5165
70221824 5166static struct address_space *
c0694254
PA
5167linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5168{
5169 struct lwp_info *lwp;
5170 struct inferior *inf;
5171 int pid;
5172
5173 pid = GET_LWP (ptid);
5174 if (GET_LWP (ptid) == 0)
5175 {
5176 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5177 tgid. */
5178 lwp = find_lwp_pid (ptid);
5179 pid = GET_PID (lwp->ptid);
5180 }
5181 else
5182 {
5183 /* A (pid,lwpid,0) ptid. */
5184 pid = GET_PID (ptid);
5185 }
5186
5187 inf = find_inferior_pid (pid);
5188 gdb_assert (inf != NULL);
5189 return inf->aspace;
5190}
5191
dc146f7c
VP
5192int
5193linux_nat_core_of_thread_1 (ptid_t ptid)
5194{
5195 struct cleanup *back_to;
5196 char *filename;
5197 FILE *f;
5198 char *content = NULL;
5199 char *p;
5200 char *ts = 0;
5201 int content_read = 0;
5202 int i;
5203 int core;
5204
5205 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5206 GET_PID (ptid), GET_LWP (ptid));
5207 back_to = make_cleanup (xfree, filename);
5208
5209 f = fopen (filename, "r");
5210 if (!f)
5211 {
5212 do_cleanups (back_to);
5213 return -1;
5214 }
5215
5216 make_cleanup_fclose (f);
5217
5218 for (;;)
5219 {
5220 int n;
e0881a8e 5221
dc146f7c
VP
5222 content = xrealloc (content, content_read + 1024);
5223 n = fread (content + content_read, 1, 1024, f);
5224 content_read += n;
5225 if (n < 1024)
5226 {
5227 content[content_read] = '\0';
5228 break;
5229 }
5230 }
5231
5232 make_cleanup (xfree, content);
5233
5234 p = strchr (content, '(');
ca2a87a0
JK
5235
5236 /* Skip ")". */
5237 if (p != NULL)
5238 p = strchr (p, ')');
5239 if (p != NULL)
5240 p++;
dc146f7c
VP
5241
5242 /* If the first field after program name has index 0, then core number is
5243 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5244 if (p != NULL)
5245 p = strtok_r (p, " ", &ts);
5246 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5247 p = strtok_r (NULL, " ", &ts);
5248
ca2a87a0 5249 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5250 core = -1;
5251
5252 do_cleanups (back_to);
5253
5254 return core;
5255}
5256
5257/* Return the cached value of the processor core for thread PTID. */
5258
70221824 5259static int
dc146f7c
VP
5260linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5261{
5262 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5263
dc146f7c
VP
5264 if (info)
5265 return info->core;
5266 return -1;
5267}
5268
f973ed9c
DJ
5269void
5270linux_nat_add_target (struct target_ops *t)
5271{
f973ed9c
DJ
5272 /* Save the provided single-threaded target. We save this in a separate
5273 variable because another target we've inherited from (e.g. inf-ptrace)
5274 may have saved a pointer to T; we want to use it for the final
5275 process stratum target. */
5276 linux_ops_saved = *t;
5277 linux_ops = &linux_ops_saved;
5278
5279 /* Override some methods for multithreading. */
b84876c2 5280 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5281 t->to_attach = linux_nat_attach;
5282 t->to_detach = linux_nat_detach;
5283 t->to_resume = linux_nat_resume;
5284 t->to_wait = linux_nat_wait;
2455069d 5285 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5286 t->to_xfer_partial = linux_nat_xfer_partial;
5287 t->to_kill = linux_nat_kill;
5288 t->to_mourn_inferior = linux_nat_mourn_inferior;
5289 t->to_thread_alive = linux_nat_thread_alive;
5290 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5291 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5292 t->to_has_thread_control = tc_schedlock;
c0694254 5293 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5294 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5295 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5296
b84876c2
PA
5297 t->to_can_async_p = linux_nat_can_async_p;
5298 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5299 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 5300 t->to_async = linux_nat_async;
b84876c2
PA
5301 t->to_terminal_inferior = linux_nat_terminal_inferior;
5302 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5303 t->to_close = linux_nat_close;
b84876c2 5304
4c28f408
PA
5305 /* Methods for non-stop support. */
5306 t->to_stop = linux_nat_stop;
5307
d90e17a7
PA
5308 t->to_supports_multi_process = linux_nat_supports_multi_process;
5309
03583c20
UW
5310 t->to_supports_disable_randomization
5311 = linux_nat_supports_disable_randomization;
5312
dc146f7c
VP
5313 t->to_core_of_thread = linux_nat_core_of_thread;
5314
f973ed9c
DJ
5315 /* We don't change the stratum; this target will sit at
5316 process_stratum and thread_db will set at thread_stratum. This
5317 is a little strange, since this is a multi-threaded-capable
5318 target, but we want to be on the stack below thread_db, and we
5319 also want to be used for single-threaded processes. */
5320
5321 add_target (t);
f973ed9c
DJ
5322}
5323
9f0bdab8
DJ
5324/* Register a method to call whenever a new thread is attached. */
5325void
7b50312a
PA
5326linux_nat_set_new_thread (struct target_ops *t,
5327 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
5328{
5329 /* Save the pointer. We only support a single registered instance
5330 of the GNU/Linux native target, so we do not need to map this to
5331 T. */
5332 linux_nat_new_thread = new_thread;
5333}
5334
5b009018
PA
5335/* Register a method that converts a siginfo object between the layout
5336 that ptrace returns, and the layout in the architecture of the
5337 inferior. */
5338void
5339linux_nat_set_siginfo_fixup (struct target_ops *t,
5340 int (*siginfo_fixup) (struct siginfo *,
5341 gdb_byte *,
5342 int))
5343{
5344 /* Save the pointer. */
5345 linux_nat_siginfo_fixup = siginfo_fixup;
5346}
5347
7b50312a
PA
5348/* Register a method to call prior to resuming a thread. */
5349
5350void
5351linux_nat_set_prepare_to_resume (struct target_ops *t,
5352 void (*prepare_to_resume) (struct lwp_info *))
5353{
5354 /* Save the pointer. */
5355 linux_nat_prepare_to_resume = prepare_to_resume;
5356}
5357
9f0bdab8
DJ
5358/* Return the saved siginfo associated with PTID. */
5359struct siginfo *
5360linux_nat_get_siginfo (ptid_t ptid)
5361{
5362 struct lwp_info *lp = find_lwp_pid (ptid);
5363
5364 gdb_assert (lp != NULL);
5365
5366 return &lp->siginfo;
5367}
5368
2c0b251b
PA
5369/* Provide a prototype to silence -Wmissing-prototypes. */
5370extern initialize_file_ftype _initialize_linux_nat;
5371
d6b0e80f
AC
5372void
5373_initialize_linux_nat (void)
5374{
b84876c2
PA
5375 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5376 &debug_linux_nat, _("\
5377Set debugging of GNU/Linux lwp module."), _("\
5378Show debugging of GNU/Linux lwp module."), _("\
5379Enables printf debugging output."),
5380 NULL,
5381 show_debug_linux_nat,
5382 &setdebuglist, &showdebuglist);
5383
b84876c2 5384 /* Save this mask as the default. */
d6b0e80f
AC
5385 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5386
7feb7d06
PA
5387 /* Install a SIGCHLD handler. */
5388 sigchld_action.sa_handler = sigchld_handler;
5389 sigemptyset (&sigchld_action.sa_mask);
5390 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5391
5392 /* Make it the default. */
7feb7d06 5393 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5394
5395 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5396 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5397 sigdelset (&suspend_mask, SIGCHLD);
5398
7feb7d06 5399 sigemptyset (&blocked_mask);
d6b0e80f
AC
5400}
5401\f
5402
5403/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5404 the GNU/Linux Threads library and therefore doesn't really belong
5405 here. */
5406
5407/* Read variable NAME in the target and return its value if found.
5408 Otherwise return zero. It is assumed that the type of the variable
5409 is `int'. */
5410
5411static int
5412get_signo (const char *name)
5413{
5414 struct minimal_symbol *ms;
5415 int signo;
5416
5417 ms = lookup_minimal_symbol (name, NULL, NULL);
5418 if (ms == NULL)
5419 return 0;
5420
8e70166d 5421 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5422 sizeof (signo)) != 0)
5423 return 0;
5424
5425 return signo;
5426}
5427
5428/* Return the set of signals used by the threads library in *SET. */
5429
5430void
5431lin_thread_get_thread_signals (sigset_t *set)
5432{
5433 struct sigaction action;
5434 int restart, cancel;
5435
b84876c2 5436 sigemptyset (&blocked_mask);
d6b0e80f
AC
5437 sigemptyset (set);
5438
5439 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5440 cancel = get_signo ("__pthread_sig_cancel");
5441
5442 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5443 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5444 not provide any way for the debugger to query the signal numbers -
5445 fortunately they don't change! */
5446
d6b0e80f 5447 if (restart == 0)
17fbb0bd 5448 restart = __SIGRTMIN;
d6b0e80f 5449
d6b0e80f 5450 if (cancel == 0)
17fbb0bd 5451 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5452
5453 sigaddset (set, restart);
5454 sigaddset (set, cancel);
5455
5456 /* The GNU/Linux Threads library makes terminating threads send a
5457 special "cancel" signal instead of SIGCHLD. Make sure we catch
5458 those (to prevent them from terminating GDB itself, which is
5459 likely to be their default action) and treat them the same way as
5460 SIGCHLD. */
5461
5462 action.sa_handler = sigchld_handler;
5463 sigemptyset (&action.sa_mask);
58aecb61 5464 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5465 sigaction (cancel, &action, NULL);
5466
5467 /* We block the "cancel" signal throughout this code ... */
5468 sigaddset (&blocked_mask, cancel);
5469 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5470
5471 /* ... except during a sigsuspend. */
5472 sigdelset (&suspend_mask, cancel);
5473}
This page took 1.091656 seconds and 4 git commands to generate.