include alloca.h if available.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
545587ee 2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4c38e0a4 3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
da6d8c04
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
19
20#include "server.h"
58caa3dc 21#include "linux-low.h"
da6d8c04 22
58caa3dc 23#include <sys/wait.h>
da6d8c04
DJ
24#include <stdio.h>
25#include <sys/param.h>
da6d8c04 26#include <sys/ptrace.h>
da6d8c04
DJ
27#include <signal.h>
28#include <sys/ioctl.h>
29#include <fcntl.h>
d07c63e7 30#include <string.h>
0a30fbc4
DJ
31#include <stdlib.h>
32#include <unistd.h>
fa6a77dc 33#include <errno.h>
fd500816 34#include <sys/syscall.h>
f9387fc3 35#include <sched.h>
07e059b5
VP
36#include <ctype.h>
37#include <pwd.h>
38#include <sys/types.h>
39#include <dirent.h>
efcbbd14
UW
40#include <sys/stat.h>
41#include <sys/vfs.h>
1570b33e 42#include <sys/uio.h>
957f3f49
DE
43#ifndef ELFMAG0
44/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48#include <elf.h>
49#endif
efcbbd14
UW
50
51#ifndef SPUFS_MAGIC
52#define SPUFS_MAGIC 0x23c9b64e
53#endif
da6d8c04 54
32ca6d61
DJ
55#ifndef PTRACE_GETSIGINFO
56# define PTRACE_GETSIGINFO 0x4202
57# define PTRACE_SETSIGINFO 0x4203
58#endif
59
fd462a61
DJ
60#ifndef O_LARGEFILE
61#define O_LARGEFILE 0
62#endif
63
24a09b5f
DJ
64/* If the system headers did not provide the constants, hard-code the normal
65 values. */
66#ifndef PTRACE_EVENT_FORK
67
68#define PTRACE_SETOPTIONS 0x4200
69#define PTRACE_GETEVENTMSG 0x4201
70
71/* options set using PTRACE_SETOPTIONS */
72#define PTRACE_O_TRACESYSGOOD 0x00000001
73#define PTRACE_O_TRACEFORK 0x00000002
74#define PTRACE_O_TRACEVFORK 0x00000004
75#define PTRACE_O_TRACECLONE 0x00000008
76#define PTRACE_O_TRACEEXEC 0x00000010
77#define PTRACE_O_TRACEVFORKDONE 0x00000020
78#define PTRACE_O_TRACEEXIT 0x00000040
79
80/* Wait extended result codes for the above trace options. */
81#define PTRACE_EVENT_FORK 1
82#define PTRACE_EVENT_VFORK 2
83#define PTRACE_EVENT_CLONE 3
84#define PTRACE_EVENT_EXEC 4
85#define PTRACE_EVENT_VFORK_DONE 5
86#define PTRACE_EVENT_EXIT 6
87
88#endif /* PTRACE_EVENT_FORK */
89
90/* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93#ifndef __WALL
94#define __WALL 0x40000000 /* Wait for any child. */
95#endif
96
ec8ebe72
DE
97#ifndef W_STOPCODE
98#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99#endif
100
1a981360
PA
101/* This is the kernel's hard limit. Not to be confused with
102 SIGRTMIN. */
103#ifndef __SIGRTMIN
104#define __SIGRTMIN 32
105#endif
106
42c81e2a
DJ
107#ifdef __UCLIBC__
108#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
109#define HAS_NOMMU
110#endif
111#endif
112
24a09b5f
DJ
113/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
114 representation of the thread ID.
611cb4a5 115
54a0b537 116 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
95954743
PA
117 the same as the LWP ID.
118
119 ``all_processes'' is keyed by the "overall process ID", which
120 GNU/Linux calls tgid, "thread group ID". */
0d62e5e8 121
54a0b537 122struct inferior_list all_lwps;
0d62e5e8 123
24a09b5f
DJ
124/* A list of all unknown processes which receive stop signals. Some other
125 process will presumably claim each of these as forked children
126 momentarily. */
127
128struct inferior_list stopped_pids;
129
0d62e5e8
DJ
130/* FIXME this is a bit of a hack, and could be removed. */
131int stopping_threads;
132
133/* FIXME make into a target method? */
24a09b5f 134int using_threads = 1;
24a09b5f 135
fa593d66
PA
136/* True if we're presently stabilizing threads (moving them out of
137 jump pads). */
138static int stabilizing_threads;
139
95954743
PA
140/* This flag is true iff we've just created or attached to our first
141 inferior but it has not stopped yet. As soon as it does, we need
142 to call the low target's arch_setup callback. Doing this only on
143 the first inferior avoids reinializing the architecture on every
144 inferior, and avoids messing with the register caches of the
145 already running inferiors. NOTE: this assumes all inferiors under
146 control of gdbserver have the same architecture. */
d61ddec4
UW
147static int new_inferior;
148
2acc282a 149static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 150 int step, int signal, siginfo_t *info);
2bd7c093 151static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
152static void stop_all_lwps (int suspend, struct lwp_info *except);
153static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
95954743 154static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
95954743 155static void *add_lwp (ptid_t ptid);
c35fafde 156static int linux_stopped_by_watchpoint (void);
95954743 157static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
dc146f7c 158static int linux_core_of_thread (ptid_t ptid);
d50171e4 159static void proceed_all_lwps (void);
d50171e4
PA
160static int finish_step_over (struct lwp_info *lwp);
161static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
162static int kill_lwp (unsigned long lwpid, int signo);
1e7fc18c 163static void linux_enable_event_reporting (int pid);
d50171e4
PA
164
165/* True if the low target can hardware single-step. Such targets
166 don't need a BREAKPOINT_REINSERT_ADDR callback. */
167
168static int
169can_hardware_single_step (void)
170{
171 return (the_low_target.breakpoint_reinsert_addr == NULL);
172}
173
174/* True if the low target supports memory breakpoints. If so, we'll
175 have a GET_PC implementation. */
176
177static int
178supports_breakpoints (void)
179{
180 return (the_low_target.get_pc != NULL);
181}
0d62e5e8 182
fa593d66
PA
183/* Returns true if this target can support fast tracepoints. This
184 does not mean that the in-process agent has been loaded in the
185 inferior. */
186
187static int
188supports_fast_tracepoints (void)
189{
190 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
191}
192
0d62e5e8
DJ
193struct pending_signals
194{
195 int signal;
32ca6d61 196 siginfo_t info;
0d62e5e8
DJ
197 struct pending_signals *prev;
198};
611cb4a5 199
14ce3065
DE
200#define PTRACE_ARG3_TYPE void *
201#define PTRACE_ARG4_TYPE void *
c6ecbae5 202#define PTRACE_XFER_TYPE long
da6d8c04 203
58caa3dc 204#ifdef HAVE_LINUX_REGSETS
52fa2412
UW
205static char *disabled_regsets;
206static int num_regsets;
58caa3dc
DJ
207#endif
208
bd99dc85
PA
209/* The read/write ends of the pipe registered as waitable file in the
210 event loop. */
211static int linux_event_pipe[2] = { -1, -1 };
212
213/* True if we're currently in async mode. */
214#define target_is_async_p() (linux_event_pipe[0] != -1)
215
02fc4de7 216static void send_sigstop (struct lwp_info *lwp);
bd99dc85
PA
217static void wait_for_sigstop (struct inferior_list_entry *entry);
218
d0722149
DE
219/* Accepts an integer PID; Returns a string representing a file that
220 can be opened to get info for the child process.
221 Space for the result is malloc'd, caller must free. */
222
223char *
224linux_child_pid_to_exec_file (int pid)
225{
226 char *name1, *name2;
227
228 name1 = xmalloc (MAXPATHLEN);
229 name2 = xmalloc (MAXPATHLEN);
230 memset (name2, 0, MAXPATHLEN);
231
232 sprintf (name1, "/proc/%d/exe", pid);
233 if (readlink (name1, name2, MAXPATHLEN) > 0)
234 {
235 free (name1);
236 return name2;
237 }
238 else
239 {
240 free (name2);
241 return name1;
242 }
243}
244
245/* Return non-zero if HEADER is a 64-bit ELF file. */
246
247static int
957f3f49 248elf_64_header_p (const Elf64_Ehdr *header)
d0722149
DE
249{
250 return (header->e_ident[EI_MAG0] == ELFMAG0
251 && header->e_ident[EI_MAG1] == ELFMAG1
252 && header->e_ident[EI_MAG2] == ELFMAG2
253 && header->e_ident[EI_MAG3] == ELFMAG3
254 && header->e_ident[EI_CLASS] == ELFCLASS64);
255}
256
257/* Return non-zero if FILE is a 64-bit ELF file,
258 zero if the file is not a 64-bit ELF file,
259 and -1 if the file is not accessible or doesn't exist. */
260
261int
262elf_64_file_p (const char *file)
263{
957f3f49 264 Elf64_Ehdr header;
d0722149
DE
265 int fd;
266
267 fd = open (file, O_RDONLY);
268 if (fd < 0)
269 return -1;
270
271 if (read (fd, &header, sizeof (header)) != sizeof (header))
272 {
273 close (fd);
274 return 0;
275 }
276 close (fd);
277
278 return elf_64_header_p (&header);
279}
280
bd99dc85
PA
281static void
282delete_lwp (struct lwp_info *lwp)
283{
284 remove_thread (get_lwp_thread (lwp));
285 remove_inferior (&all_lwps, &lwp->head);
aa5ca48f 286 free (lwp->arch_private);
bd99dc85
PA
287 free (lwp);
288}
289
95954743
PA
290/* Add a process to the common process list, and set its private
291 data. */
292
293static struct process_info *
294linux_add_process (int pid, int attached)
295{
296 struct process_info *proc;
297
298 /* Is this the first process? If so, then set the arch. */
299 if (all_processes.head == NULL)
300 new_inferior = 1;
301
302 proc = add_process (pid, attached);
303 proc->private = xcalloc (1, sizeof (*proc->private));
304
aa5ca48f
DE
305 if (the_low_target.new_process != NULL)
306 proc->private->arch_private = the_low_target.new_process ();
307
95954743
PA
308 return proc;
309}
310
07d4f67e
DE
311/* Wrapper function for waitpid which handles EINTR, and emulates
312 __WALL for systems where that is not available. */
313
314static int
315my_waitpid (int pid, int *status, int flags)
316{
317 int ret, out_errno;
318
319 if (debug_threads)
320 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
321
322 if (flags & __WALL)
323 {
324 sigset_t block_mask, org_mask, wake_mask;
325 int wnohang;
326
327 wnohang = (flags & WNOHANG) != 0;
328 flags &= ~(__WALL | __WCLONE);
329 flags |= WNOHANG;
330
331 /* Block all signals while here. This avoids knowing about
332 LinuxThread's signals. */
333 sigfillset (&block_mask);
334 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
335
336 /* ... except during the sigsuspend below. */
337 sigemptyset (&wake_mask);
338
339 while (1)
340 {
341 /* Since all signals are blocked, there's no need to check
342 for EINTR here. */
343 ret = waitpid (pid, status, flags);
344 out_errno = errno;
345
346 if (ret == -1 && out_errno != ECHILD)
347 break;
348 else if (ret > 0)
349 break;
350
351 if (flags & __WCLONE)
352 {
353 /* We've tried both flavors now. If WNOHANG is set,
354 there's nothing else to do, just bail out. */
355 if (wnohang)
356 break;
357
358 if (debug_threads)
359 fprintf (stderr, "blocking\n");
360
361 /* Block waiting for signals. */
362 sigsuspend (&wake_mask);
363 }
364
365 flags ^= __WCLONE;
366 }
367
368 sigprocmask (SIG_SETMASK, &org_mask, NULL);
369 }
370 else
371 {
372 do
373 ret = waitpid (pid, status, flags);
374 while (ret == -1 && errno == EINTR);
375 out_errno = errno;
376 }
377
378 if (debug_threads)
379 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
380 pid, flags, status ? *status : -1, ret);
381
382 errno = out_errno;
383 return ret;
384}
385
bd99dc85
PA
386/* Handle a GNU/Linux extended wait response. If we see a clone
387 event, we need to add the new LWP to our list (and not report the
388 trap to higher layers). */
0d62e5e8 389
24a09b5f 390static void
54a0b537 391handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
392{
393 int event = wstat >> 16;
54a0b537 394 struct lwp_info *new_lwp;
24a09b5f
DJ
395
396 if (event == PTRACE_EVENT_CLONE)
397 {
95954743 398 ptid_t ptid;
24a09b5f 399 unsigned long new_pid;
836acd6d 400 int ret, status = W_STOPCODE (SIGSTOP);
24a09b5f 401
bd99dc85 402 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
24a09b5f
DJ
403
404 /* If we haven't already seen the new PID stop, wait for it now. */
405 if (! pull_pid_from_list (&stopped_pids, new_pid))
406 {
407 /* The new child has a pending SIGSTOP. We can't affect it until it
408 hits the SIGSTOP, but we're already attached. */
409
97438e3f 410 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
411
412 if (ret == -1)
413 perror_with_name ("waiting for new child");
414 else if (ret != new_pid)
415 warning ("wait returned unexpected PID %d", ret);
da5898ce 416 else if (!WIFSTOPPED (status))
24a09b5f
DJ
417 warning ("wait returned unexpected status 0x%x", status);
418 }
419
1e7fc18c 420 linux_enable_event_reporting (new_pid);
24a09b5f 421
95954743
PA
422 ptid = ptid_build (pid_of (event_child), new_pid, 0);
423 new_lwp = (struct lwp_info *) add_lwp (ptid);
424 add_thread (ptid, new_lwp);
24a09b5f 425
e27d73f6
DE
426 /* Either we're going to immediately resume the new thread
427 or leave it stopped. linux_resume_one_lwp is a nop if it
428 thinks the thread is currently running, so set this first
429 before calling linux_resume_one_lwp. */
430 new_lwp->stopped = 1;
431
da5898ce
DJ
432 /* Normally we will get the pending SIGSTOP. But in some cases
433 we might get another signal delivered to the group first.
f21cc1a2 434 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
435 if (WSTOPSIG (status) == SIGSTOP)
436 {
d50171e4
PA
437 if (stopping_threads)
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 else
e27d73f6 440 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 441 }
24a09b5f 442 else
da5898ce 443 {
54a0b537 444 new_lwp->stop_expected = 1;
d50171e4 445
da5898ce
DJ
446 if (stopping_threads)
447 {
d50171e4 448 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
449 new_lwp->status_pending_p = 1;
450 new_lwp->status_pending = status;
da5898ce
DJ
451 }
452 else
453 /* Pass the signal on. This is what GDB does - except
454 shouldn't we really report it instead? */
e27d73f6 455 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 456 }
24a09b5f
DJ
457
458 /* Always resume the current thread. If we are stopping
459 threads, it will have a pending SIGSTOP; we may as well
460 collect it now. */
2acc282a 461 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
462 }
463}
464
d50171e4
PA
465/* Return the PC as read from the regcache of LWP, without any
466 adjustment. */
467
468static CORE_ADDR
469get_pc (struct lwp_info *lwp)
470{
471 struct thread_info *saved_inferior;
472 struct regcache *regcache;
473 CORE_ADDR pc;
474
475 if (the_low_target.get_pc == NULL)
476 return 0;
477
478 saved_inferior = current_inferior;
479 current_inferior = get_lwp_thread (lwp);
480
481 regcache = get_thread_regcache (current_inferior, 1);
482 pc = (*the_low_target.get_pc) (regcache);
483
484 if (debug_threads)
485 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
486
487 current_inferior = saved_inferior;
488 return pc;
489}
490
491/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
492 The SIGTRAP could mean several things.
493
494 On i386, where decr_pc_after_break is non-zero:
495 If we were single-stepping this process using PTRACE_SINGLESTEP,
496 we will get only the one SIGTRAP (even if the instruction we
497 stepped over was a breakpoint). The value of $eip will be the
498 next instruction.
499 If we continue the process using PTRACE_CONT, we will get a
500 SIGTRAP when we hit a breakpoint. The value of $eip will be
501 the instruction after the breakpoint (i.e. needs to be
502 decremented). If we report the SIGTRAP to GDB, we must also
503 report the undecremented PC. If we cancel the SIGTRAP, we
504 must resume at the decremented PC.
505
506 (Presumably, not yet tested) On a non-decr_pc_after_break machine
507 with hardware or kernel single-step:
508 If we single-step over a breakpoint instruction, our PC will
509 point at the following instruction. If we continue and hit a
510 breakpoint instruction, our PC will point at the breakpoint
511 instruction. */
512
513static CORE_ADDR
d50171e4 514get_stop_pc (struct lwp_info *lwp)
0d62e5e8 515{
d50171e4
PA
516 CORE_ADDR stop_pc;
517
518 if (the_low_target.get_pc == NULL)
519 return 0;
0d62e5e8 520
d50171e4
PA
521 stop_pc = get_pc (lwp);
522
bdabb078
PA
523 if (WSTOPSIG (lwp->last_status) == SIGTRAP
524 && !lwp->stepping
525 && !lwp->stopped_by_watchpoint
526 && lwp->last_status >> 16 == 0)
47c0c975
DE
527 stop_pc -= the_low_target.decr_pc_after_break;
528
529 if (debug_threads)
530 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
531
532 return stop_pc;
0d62e5e8 533}
ce3a066d 534
0d62e5e8 535static void *
95954743 536add_lwp (ptid_t ptid)
611cb4a5 537{
54a0b537 538 struct lwp_info *lwp;
0d62e5e8 539
54a0b537
PA
540 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
541 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 542
95954743 543 lwp->head.id = ptid;
0d62e5e8 544
aa5ca48f
DE
545 if (the_low_target.new_thread != NULL)
546 lwp->arch_private = the_low_target.new_thread ();
547
54a0b537 548 add_inferior_to_list (&all_lwps, &lwp->head);
0d62e5e8 549
54a0b537 550 return lwp;
0d62e5e8 551}
611cb4a5 552
da6d8c04
DJ
553/* Start an inferior process and returns its pid.
554 ALLARGS is a vector of program-name and args. */
555
ce3a066d
DJ
556static int
557linux_create_inferior (char *program, char **allargs)
da6d8c04 558{
a6dbe5df 559 struct lwp_info *new_lwp;
da6d8c04 560 int pid;
95954743 561 ptid_t ptid;
da6d8c04 562
42c81e2a 563#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
564 pid = vfork ();
565#else
da6d8c04 566 pid = fork ();
52fb6437 567#endif
da6d8c04
DJ
568 if (pid < 0)
569 perror_with_name ("fork");
570
571 if (pid == 0)
572 {
573 ptrace (PTRACE_TRACEME, 0, 0, 0);
574
1a981360 575#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 576 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 577#endif
0d62e5e8 578
a9fa9f7d
DJ
579 setpgid (0, 0);
580
2b876972
DJ
581 execv (program, allargs);
582 if (errno == ENOENT)
583 execvp (program, allargs);
da6d8c04
DJ
584
585 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 586 strerror (errno));
da6d8c04
DJ
587 fflush (stderr);
588 _exit (0177);
589 }
590
95954743
PA
591 linux_add_process (pid, 0);
592
593 ptid = ptid_build (pid, pid, 0);
594 new_lwp = add_lwp (ptid);
595 add_thread (ptid, new_lwp);
a6dbe5df 596 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 597
a9fa9f7d 598 return pid;
da6d8c04
DJ
599}
600
601/* Attach to an inferior process. */
602
95954743
PA
603static void
604linux_attach_lwp_1 (unsigned long lwpid, int initial)
da6d8c04 605{
95954743 606 ptid_t ptid;
54a0b537 607 struct lwp_info *new_lwp;
611cb4a5 608
95954743 609 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
da6d8c04 610 {
95954743 611 if (!initial)
2d717e4f
DJ
612 {
613 /* If we fail to attach to an LWP, just warn. */
95954743 614 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
2d717e4f
DJ
615 strerror (errno), errno);
616 fflush (stderr);
617 return;
618 }
619 else
620 /* If we fail to attach to a process, report an error. */
95954743 621 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
43d5792c 622 strerror (errno), errno);
da6d8c04
DJ
623 }
624
95954743
PA
625 if (initial)
626 /* NOTE/FIXME: This lwp might have not been the tgid. */
627 ptid = ptid_build (lwpid, lwpid, 0);
628 else
629 {
630 /* Note that extracting the pid from the current inferior is
631 safe, since we're always called in the context of the same
632 process as this new thread. */
633 int pid = pid_of (get_thread_lwp (current_inferior));
634 ptid = ptid_build (pid, lwpid, 0);
635 }
24a09b5f 636
95954743
PA
637 new_lwp = (struct lwp_info *) add_lwp (ptid);
638 add_thread (ptid, new_lwp);
0d62e5e8 639
a6dbe5df
PA
640 /* We need to wait for SIGSTOP before being able to make the next
641 ptrace call on this LWP. */
642 new_lwp->must_set_ptrace_flags = 1;
643
0d62e5e8 644 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
645 brings it to a halt.
646
647 There are several cases to consider here:
648
649 1) gdbserver has already attached to the process and is being notified
1b3f6016 650 of a new thread that is being created.
d50171e4
PA
651 In this case we should ignore that SIGSTOP and resume the
652 process. This is handled below by setting stop_expected = 1,
8336d594 653 and the fact that add_thread sets last_resume_kind ==
d50171e4 654 resume_continue.
0e21c1ec
DE
655
656 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
657 to it via attach_inferior.
658 In this case we want the process thread to stop.
d50171e4
PA
659 This is handled by having linux_attach set last_resume_kind ==
660 resume_stop after we return.
1b3f6016
PA
661 ??? If the process already has several threads we leave the other
662 threads running.
0e21c1ec
DE
663
664 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
665 existing threads.
666 In this case we want the thread to stop.
667 FIXME: This case is currently not properly handled.
668 We should wait for the SIGSTOP but don't. Things work apparently
669 because enough time passes between when we ptrace (ATTACH) and when
670 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
671
672 On the other hand, if we are currently trying to stop all threads, we
673 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 674 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
675 end of the list, and so the new thread has not yet reached
676 wait_for_sigstop (but will). */
d50171e4 677 new_lwp->stop_expected = 1;
0d62e5e8
DJ
678}
679
95954743
PA
680void
681linux_attach_lwp (unsigned long lwpid)
682{
683 linux_attach_lwp_1 (lwpid, 0);
684}
685
0d62e5e8 686int
a1928bad 687linux_attach (unsigned long pid)
0d62e5e8 688{
95954743 689 linux_attach_lwp_1 (pid, 1);
95954743 690 linux_add_process (pid, 1);
0d62e5e8 691
bd99dc85
PA
692 if (!non_stop)
693 {
8336d594
PA
694 struct thread_info *thread;
695
696 /* Don't ignore the initial SIGSTOP if we just attached to this
697 process. It will be collected by wait shortly. */
698 thread = find_thread_ptid (ptid_build (pid, pid, 0));
699 thread->last_resume_kind = resume_stop;
bd99dc85 700 }
0d62e5e8 701
95954743
PA
702 return 0;
703}
704
705struct counter
706{
707 int pid;
708 int count;
709};
710
711static int
712second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
713{
714 struct counter *counter = args;
715
716 if (ptid_get_pid (entry->id) == counter->pid)
717 {
718 if (++counter->count > 1)
719 return 1;
720 }
d61ddec4 721
da6d8c04
DJ
722 return 0;
723}
724
95954743
PA
725static int
726last_thread_of_process_p (struct thread_info *thread)
727{
728 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
729 int pid = ptid_get_pid (ptid);
730 struct counter counter = { pid , 0 };
da6d8c04 731
95954743
PA
732 return (find_inferior (&all_threads,
733 second_thread_of_pid_p, &counter) == NULL);
734}
735
736/* Kill the inferior lwp. */
737
738static int
739linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
da6d8c04 740{
0d62e5e8 741 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 742 struct lwp_info *lwp = get_thread_lwp (thread);
0d62e5e8 743 int wstat;
95954743
PA
744 int pid = * (int *) args;
745
746 if (ptid_get_pid (entry->id) != pid)
747 return 0;
0d62e5e8 748
fd500816
DJ
749 /* We avoid killing the first thread here, because of a Linux kernel (at
750 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
751 the children get a chance to be reaped, it will remain a zombie
752 forever. */
95954743 753
12b42a12 754 if (lwpid_of (lwp) == pid)
95954743
PA
755 {
756 if (debug_threads)
757 fprintf (stderr, "lkop: is last of process %s\n",
758 target_pid_to_str (entry->id));
759 return 0;
760 }
fd500816 761
0d62e5e8
DJ
762 do
763 {
bd99dc85 764 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
0d62e5e8
DJ
765
766 /* Make sure it died. The loop is most likely unnecessary. */
95954743 767 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
bd99dc85 768 } while (pid > 0 && WIFSTOPPED (wstat));
95954743
PA
769
770 return 0;
da6d8c04
DJ
771}
772
95954743
PA
773static int
774linux_kill (int pid)
0d62e5e8 775{
95954743 776 struct process_info *process;
54a0b537 777 struct lwp_info *lwp;
95954743 778 struct thread_info *thread;
fd500816 779 int wstat;
95954743 780 int lwpid;
fd500816 781
95954743
PA
782 process = find_process_pid (pid);
783 if (process == NULL)
784 return -1;
9d606399 785
f9e39928
PA
786 /* If we're killing a running inferior, make sure it is stopped
787 first, as PTRACE_KILL will not work otherwise. */
7984d532 788 stop_all_lwps (0, NULL);
f9e39928 789
95954743 790 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
fd500816 791
54a0b537 792 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 793 thread in the list, so do so now. */
95954743
PA
794 lwp = find_lwp_pid (pid_to_ptid (pid));
795 thread = get_lwp_thread (lwp);
bd99dc85
PA
796
797 if (debug_threads)
95954743
PA
798 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
799 lwpid_of (lwp), pid);
bd99dc85 800
fd500816
DJ
801 do
802 {
bd99dc85 803 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
fd500816
DJ
804
805 /* Make sure it died. The loop is most likely unnecessary. */
95954743
PA
806 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
807 } while (lwpid > 0 && WIFSTOPPED (wstat));
2d717e4f 808
8336d594 809 the_target->mourn (process);
f9e39928
PA
810
811 /* Since we presently can only stop all lwps of all processes, we
812 need to unstop lwps of other processes. */
7984d532 813 unstop_all_lwps (0, NULL);
95954743 814 return 0;
0d62e5e8
DJ
815}
816
95954743
PA
817static int
818linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
819{
820 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 821 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
822 int pid = * (int *) args;
823
824 if (ptid_get_pid (entry->id) != pid)
825 return 0;
6ad8ae5c 826
ae13219e
DJ
827 /* If this process is stopped but is expecting a SIGSTOP, then make
828 sure we take care of that now. This isn't absolutely guaranteed
829 to collect the SIGSTOP, but is fairly likely to. */
54a0b537 830 if (lwp->stop_expected)
ae13219e 831 {
bd99dc85 832 int wstat;
ae13219e 833 /* Clear stop_expected, so that the SIGSTOP will be reported. */
54a0b537 834 lwp->stop_expected = 0;
f9e39928 835 linux_resume_one_lwp (lwp, 0, 0, NULL);
95954743 836 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
ae13219e
DJ
837 }
838
839 /* Flush any pending changes to the process's registers. */
840 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 841 get_lwp_thread (lwp));
ae13219e
DJ
842
843 /* Finally, let it resume. */
bd99dc85
PA
844 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
845
846 delete_lwp (lwp);
95954743 847 return 0;
6ad8ae5c
DJ
848}
849
95954743
PA
850static int
851linux_detach (int pid)
852{
853 struct process_info *process;
854
855 process = find_process_pid (pid);
856 if (process == NULL)
857 return -1;
858
f9e39928
PA
859 /* Stop all threads before detaching. First, ptrace requires that
860 the thread is stopped to sucessfully detach. Second, thread_db
861 may need to uninstall thread event breakpoints from memory, which
862 only works with a stopped process anyway. */
7984d532 863 stop_all_lwps (0, NULL);
f9e39928 864
ca5c370d 865#ifdef USE_THREAD_DB
8336d594 866 thread_db_detach (process);
ca5c370d
PA
867#endif
868
fa593d66
PA
869 /* Stabilize threads (move out of jump pads). */
870 stabilize_threads ();
871
95954743 872 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
873
874 the_target->mourn (process);
f9e39928
PA
875
876 /* Since we presently can only stop all lwps of all processes, we
877 need to unstop lwps of other processes. */
7984d532 878 unstop_all_lwps (0, NULL);
f9e39928
PA
879 return 0;
880}
881
882/* Remove all LWPs that belong to process PROC from the lwp list. */
883
884static int
885delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
886{
887 struct lwp_info *lwp = (struct lwp_info *) entry;
888 struct process_info *process = proc;
889
890 if (pid_of (lwp) == pid_of (process))
891 delete_lwp (lwp);
892
dd6953e1 893 return 0;
6ad8ae5c
DJ
894}
895
8336d594
PA
896static void
897linux_mourn (struct process_info *process)
898{
899 struct process_info_private *priv;
900
901#ifdef USE_THREAD_DB
902 thread_db_mourn (process);
903#endif
904
f9e39928
PA
905 find_inferior (&all_lwps, delete_lwp_callback, process);
906
8336d594
PA
907 /* Freeing all private data. */
908 priv = process->private;
909 free (priv->arch_private);
910 free (priv);
911 process->private = NULL;
505106cd
PA
912
913 remove_process (process);
8336d594
PA
914}
915
444d6139 916static void
95954743 917linux_join (int pid)
444d6139 918{
444d6139 919 int status, ret;
95954743 920 struct process_info *process;
bd99dc85 921
95954743
PA
922 process = find_process_pid (pid);
923 if (process == NULL)
924 return;
444d6139
PA
925
926 do {
95954743 927 ret = my_waitpid (pid, &status, 0);
444d6139
PA
928 if (WIFEXITED (status) || WIFSIGNALED (status))
929 break;
930 } while (ret != -1 || errno != ECHILD);
931}
932
6ad8ae5c 933/* Return nonzero if the given thread is still alive. */
0d62e5e8 934static int
95954743 935linux_thread_alive (ptid_t ptid)
0d62e5e8 936{
95954743
PA
937 struct lwp_info *lwp = find_lwp_pid (ptid);
938
939 /* We assume we always know if a thread exits. If a whole process
940 exited but we still haven't been able to report it to GDB, we'll
941 hold on to the last lwp of the dead process. */
942 if (lwp != NULL)
943 return !lwp->dead;
0d62e5e8
DJ
944 else
945 return 0;
946}
947
6bf5e0ba 948/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 949static int
d50171e4 950status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 951{
54a0b537 952 struct lwp_info *lwp = (struct lwp_info *) entry;
95954743 953 ptid_t ptid = * (ptid_t *) arg;
7984d532 954 struct thread_info *thread;
95954743
PA
955
956 /* Check if we're only interested in events from a specific process
957 or its lwps. */
958 if (!ptid_equal (minus_one_ptid, ptid)
959 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
960 return 0;
0d62e5e8 961
d50171e4
PA
962 thread = get_lwp_thread (lwp);
963
964 /* If we got a `vCont;t', but we haven't reported a stop yet, do
965 report any status pending the LWP may have. */
8336d594 966 if (thread->last_resume_kind == resume_stop
7984d532 967 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 968 return 0;
0d62e5e8 969
d50171e4 970 return lwp->status_pending_p;
0d62e5e8
DJ
971}
972
95954743
PA
973static int
974same_lwp (struct inferior_list_entry *entry, void *data)
975{
976 ptid_t ptid = *(ptid_t *) data;
977 int lwp;
978
979 if (ptid_get_lwp (ptid) != 0)
980 lwp = ptid_get_lwp (ptid);
981 else
982 lwp = ptid_get_pid (ptid);
983
984 if (ptid_get_lwp (entry->id) == lwp)
985 return 1;
986
987 return 0;
988}
989
990struct lwp_info *
991find_lwp_pid (ptid_t ptid)
992{
993 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
994}
995
bd99dc85 996static struct lwp_info *
95954743 997linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
611cb4a5 998{
0d62e5e8 999 int ret;
95954743 1000 int to_wait_for = -1;
bd99dc85 1001 struct lwp_info *child = NULL;
0d62e5e8 1002
bd99dc85 1003 if (debug_threads)
95954743
PA
1004 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1005
1006 if (ptid_equal (ptid, minus_one_ptid))
1007 to_wait_for = -1; /* any child */
1008 else
1009 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
0d62e5e8 1010
bd99dc85 1011 options |= __WALL;
0d62e5e8 1012
bd99dc85 1013retry:
0d62e5e8 1014
bd99dc85
PA
1015 ret = my_waitpid (to_wait_for, wstatp, options);
1016 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1017 return NULL;
1018 else if (ret == -1)
1019 perror_with_name ("waitpid");
0d62e5e8
DJ
1020
1021 if (debug_threads
1022 && (!WIFSTOPPED (*wstatp)
1023 || (WSTOPSIG (*wstatp) != 32
1024 && WSTOPSIG (*wstatp) != 33)))
1025 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1026
95954743 1027 child = find_lwp_pid (pid_to_ptid (ret));
0d62e5e8 1028
24a09b5f
DJ
1029 /* If we didn't find a process, one of two things presumably happened:
1030 - A process we started and then detached from has exited. Ignore it.
1031 - A process we are controlling has forked and the new child's stop
1032 was reported to us by the kernel. Save its PID. */
bd99dc85 1033 if (child == NULL && WIFSTOPPED (*wstatp))
24a09b5f
DJ
1034 {
1035 add_pid_to_list (&stopped_pids, ret);
1036 goto retry;
1037 }
bd99dc85 1038 else if (child == NULL)
24a09b5f
DJ
1039 goto retry;
1040
bd99dc85 1041 child->stopped = 1;
0d62e5e8 1042
bd99dc85 1043 child->last_status = *wstatp;
32ca6d61 1044
d61ddec4
UW
1045 /* Architecture-specific setup after inferior is running.
1046 This needs to happen after we have attached to the inferior
1047 and it is stopped for the first time, but before we access
1048 any inferior registers. */
1049 if (new_inferior)
1050 {
1051 the_low_target.arch_setup ();
52fa2412
UW
1052#ifdef HAVE_LINUX_REGSETS
1053 memset (disabled_regsets, 0, num_regsets);
1054#endif
d61ddec4
UW
1055 new_inferior = 0;
1056 }
1057
c3adc08c
PA
1058 /* Fetch the possibly triggered data watchpoint info and store it in
1059 CHILD.
1060
1061 On some archs, like x86, that use debug registers to set
1062 watchpoints, it's possible that the way to know which watched
1063 address trapped, is to check the register that is used to select
1064 which address to watch. Problem is, between setting the
1065 watchpoint and reading back which data address trapped, the user
1066 may change the set of watchpoints, and, as a consequence, GDB
1067 changes the debug registers in the inferior. To avoid reading
1068 back a stale stopped-data-address when that happens, we cache in
1069 LP the fact that a watchpoint trapped, and the corresponding data
1070 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1071 changes the debug registers meanwhile, we have the cached data we
1072 can rely on. */
1073
1074 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1075 {
1076 if (the_low_target.stopped_by_watchpoint == NULL)
1077 {
1078 child->stopped_by_watchpoint = 0;
1079 }
1080 else
1081 {
1082 struct thread_info *saved_inferior;
1083
1084 saved_inferior = current_inferior;
1085 current_inferior = get_lwp_thread (child);
1086
1087 child->stopped_by_watchpoint
1088 = the_low_target.stopped_by_watchpoint ();
1089
1090 if (child->stopped_by_watchpoint)
1091 {
1092 if (the_low_target.stopped_data_address != NULL)
1093 child->stopped_data_address
1094 = the_low_target.stopped_data_address ();
1095 else
1096 child->stopped_data_address = 0;
1097 }
1098
1099 current_inferior = saved_inferior;
1100 }
1101 }
1102
d50171e4
PA
1103 /* Store the STOP_PC, with adjustment applied. This depends on the
1104 architecture being defined already (so that CHILD has a valid
1105 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1106 not). */
1107 if (WIFSTOPPED (*wstatp))
1108 child->stop_pc = get_stop_pc (child);
1109
0d62e5e8 1110 if (debug_threads
47c0c975
DE
1111 && WIFSTOPPED (*wstatp)
1112 && the_low_target.get_pc != NULL)
0d62e5e8 1113 {
896c7fbb 1114 struct thread_info *saved_inferior = current_inferior;
bce522a2 1115 struct regcache *regcache;
47c0c975
DE
1116 CORE_ADDR pc;
1117
d50171e4 1118 current_inferior = get_lwp_thread (child);
bce522a2 1119 regcache = get_thread_regcache (current_inferior, 1);
442ea881 1120 pc = (*the_low_target.get_pc) (regcache);
47c0c975 1121 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
896c7fbb 1122 current_inferior = saved_inferior;
0d62e5e8 1123 }
bd99dc85
PA
1124
1125 return child;
0d62e5e8 1126}
611cb4a5 1127
219f2f23
PA
1128/* This function should only be called if the LWP got a SIGTRAP.
1129
1130 Handle any tracepoint steps or hits. Return true if a tracepoint
1131 event was handled, 0 otherwise. */
1132
1133static int
1134handle_tracepoints (struct lwp_info *lwp)
1135{
1136 struct thread_info *tinfo = get_lwp_thread (lwp);
1137 int tpoint_related_event = 0;
1138
7984d532
PA
1139 /* If this tracepoint hit causes a tracing stop, we'll immediately
1140 uninsert tracepoints. To do this, we temporarily pause all
1141 threads, unpatch away, and then unpause threads. We need to make
1142 sure the unpausing doesn't resume LWP too. */
1143 lwp->suspended++;
1144
219f2f23
PA
1145 /* And we need to be sure that any all-threads-stopping doesn't try
1146 to move threads out of the jump pads, as it could deadlock the
1147 inferior (LWP could be in the jump pad, maybe even holding the
1148 lock.) */
1149
1150 /* Do any necessary step collect actions. */
1151 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1152
fa593d66
PA
1153 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1154
219f2f23
PA
1155 /* See if we just hit a tracepoint and do its main collect
1156 actions. */
1157 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1158
7984d532
PA
1159 lwp->suspended--;
1160
1161 gdb_assert (lwp->suspended == 0);
fa593d66 1162 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1163
219f2f23
PA
1164 if (tpoint_related_event)
1165 {
1166 if (debug_threads)
1167 fprintf (stderr, "got a tracepoint event\n");
1168 return 1;
1169 }
1170
1171 return 0;
1172}
1173
fa593d66
PA
1174/* Convenience wrapper. Returns true if LWP is presently collecting a
1175 fast tracepoint. */
1176
1177static int
1178linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1179 struct fast_tpoint_collect_status *status)
1180{
1181 CORE_ADDR thread_area;
1182
1183 if (the_low_target.get_thread_area == NULL)
1184 return 0;
1185
1186 /* Get the thread area address. This is used to recognize which
1187 thread is which when tracing with the in-process agent library.
1188 We don't read anything from the address, and treat it as opaque;
1189 it's the address itself that we assume is unique per-thread. */
1190 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1191 return 0;
1192
1193 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1194}
1195
1196/* The reason we resume in the caller, is because we want to be able
1197 to pass lwp->status_pending as WSTAT, and we need to clear
1198 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1199 refuses to resume. */
1200
1201static int
1202maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1203{
1204 struct thread_info *saved_inferior;
1205
1206 saved_inferior = current_inferior;
1207 current_inferior = get_lwp_thread (lwp);
1208
1209 if ((wstat == NULL
1210 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1211 && supports_fast_tracepoints ()
1212 && in_process_agent_loaded ())
1213 {
1214 struct fast_tpoint_collect_status status;
1215 int r;
1216
1217 if (debug_threads)
1218 fprintf (stderr, "\
1219Checking whether LWP %ld needs to move out of the jump pad.\n",
1220 lwpid_of (lwp));
1221
1222 r = linux_fast_tracepoint_collecting (lwp, &status);
1223
1224 if (wstat == NULL
1225 || (WSTOPSIG (*wstat) != SIGILL
1226 && WSTOPSIG (*wstat) != SIGFPE
1227 && WSTOPSIG (*wstat) != SIGSEGV
1228 && WSTOPSIG (*wstat) != SIGBUS))
1229 {
1230 lwp->collecting_fast_tracepoint = r;
1231
1232 if (r != 0)
1233 {
1234 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1235 {
1236 /* Haven't executed the original instruction yet.
1237 Set breakpoint there, and wait till it's hit,
1238 then single-step until exiting the jump pad. */
1239 lwp->exit_jump_pad_bkpt
1240 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1241 }
1242
1243 if (debug_threads)
1244 fprintf (stderr, "\
1245Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1246 lwpid_of (lwp));
1247
1248 return 1;
1249 }
1250 }
1251 else
1252 {
1253 /* If we get a synchronous signal while collecting, *and*
1254 while executing the (relocated) original instruction,
1255 reset the PC to point at the tpoint address, before
1256 reporting to GDB. Otherwise, it's an IPA lib bug: just
1257 report the signal to GDB, and pray for the best. */
1258
1259 lwp->collecting_fast_tracepoint = 0;
1260
1261 if (r != 0
1262 && (status.adjusted_insn_addr <= lwp->stop_pc
1263 && lwp->stop_pc < status.adjusted_insn_addr_end))
1264 {
1265 siginfo_t info;
1266 struct regcache *regcache;
1267
1268 /* The si_addr on a few signals references the address
1269 of the faulting instruction. Adjust that as
1270 well. */
1271 if ((WSTOPSIG (*wstat) == SIGILL
1272 || WSTOPSIG (*wstat) == SIGFPE
1273 || WSTOPSIG (*wstat) == SIGBUS
1274 || WSTOPSIG (*wstat) == SIGSEGV)
1275 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1276 /* Final check just to make sure we don't clobber
1277 the siginfo of non-kernel-sent signals. */
1278 && (uintptr_t) info.si_addr == lwp->stop_pc)
1279 {
1280 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1281 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1282 }
1283
1284 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1285 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1286 lwp->stop_pc = status.tpoint_addr;
1287
1288 /* Cancel any fast tracepoint lock this thread was
1289 holding. */
1290 force_unlock_trace_buffer ();
1291 }
1292
1293 if (lwp->exit_jump_pad_bkpt != NULL)
1294 {
1295 if (debug_threads)
1296 fprintf (stderr,
1297 "Cancelling fast exit-jump-pad: removing bkpt. "
1298 "stopping all threads momentarily.\n");
1299
1300 stop_all_lwps (1, lwp);
1301 cancel_breakpoints ();
1302
1303 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1304 lwp->exit_jump_pad_bkpt = NULL;
1305
1306 unstop_all_lwps (1, lwp);
1307
1308 gdb_assert (lwp->suspended >= 0);
1309 }
1310 }
1311 }
1312
1313 if (debug_threads)
1314 fprintf (stderr, "\
1315Checking whether LWP %ld needs to move out of the jump pad...no\n",
1316 lwpid_of (lwp));
1317 return 0;
1318}
1319
1320/* Enqueue one signal in the "signals to report later when out of the
1321 jump pad" list. */
1322
1323static void
1324enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1325{
1326 struct pending_signals *p_sig;
1327
1328 if (debug_threads)
1329 fprintf (stderr, "\
1330Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1331
1332 if (debug_threads)
1333 {
1334 struct pending_signals *sig;
1335
1336 for (sig = lwp->pending_signals_to_report;
1337 sig != NULL;
1338 sig = sig->prev)
1339 fprintf (stderr,
1340 " Already queued %d\n",
1341 sig->signal);
1342
1343 fprintf (stderr, " (no more currently queued signals)\n");
1344 }
1345
1a981360
PA
1346 /* Don't enqueue non-RT signals if they are already in the deferred
1347 queue. (SIGSTOP being the easiest signal to see ending up here
1348 twice) */
1349 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1350 {
1351 struct pending_signals *sig;
1352
1353 for (sig = lwp->pending_signals_to_report;
1354 sig != NULL;
1355 sig = sig->prev)
1356 {
1357 if (sig->signal == WSTOPSIG (*wstat))
1358 {
1359 if (debug_threads)
1360 fprintf (stderr,
1361 "Not requeuing already queued non-RT signal %d"
1362 " for LWP %ld\n",
1363 sig->signal,
1364 lwpid_of (lwp));
1365 return;
1366 }
1367 }
1368 }
1369
fa593d66
PA
1370 p_sig = xmalloc (sizeof (*p_sig));
1371 p_sig->prev = lwp->pending_signals_to_report;
1372 p_sig->signal = WSTOPSIG (*wstat);
1373 memset (&p_sig->info, 0, sizeof (siginfo_t));
1374 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1375
1376 lwp->pending_signals_to_report = p_sig;
1377}
1378
1379/* Dequeue one signal from the "signals to report later when out of
1380 the jump pad" list. */
1381
1382static int
1383dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1384{
1385 if (lwp->pending_signals_to_report != NULL)
1386 {
1387 struct pending_signals **p_sig;
1388
1389 p_sig = &lwp->pending_signals_to_report;
1390 while ((*p_sig)->prev != NULL)
1391 p_sig = &(*p_sig)->prev;
1392
1393 *wstat = W_STOPCODE ((*p_sig)->signal);
1394 if ((*p_sig)->info.si_signo != 0)
1395 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1396 free (*p_sig);
1397 *p_sig = NULL;
1398
1399 if (debug_threads)
1400 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1401 WSTOPSIG (*wstat), lwpid_of (lwp));
1402
1403 if (debug_threads)
1404 {
1405 struct pending_signals *sig;
1406
1407 for (sig = lwp->pending_signals_to_report;
1408 sig != NULL;
1409 sig = sig->prev)
1410 fprintf (stderr,
1411 " Still queued %d\n",
1412 sig->signal);
1413
1414 fprintf (stderr, " (no more queued signals)\n");
1415 }
1416
1417 return 1;
1418 }
1419
1420 return 0;
1421}
1422
d50171e4
PA
1423/* Arrange for a breakpoint to be hit again later. We don't keep the
1424 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1425 will handle the current event, eventually we will resume this LWP,
1426 and this breakpoint will trap again. */
1427
1428static int
1429cancel_breakpoint (struct lwp_info *lwp)
1430{
1431 struct thread_info *saved_inferior;
d50171e4
PA
1432
1433 /* There's nothing to do if we don't support breakpoints. */
1434 if (!supports_breakpoints ())
1435 return 0;
1436
d50171e4
PA
1437 /* breakpoint_at reads from current inferior. */
1438 saved_inferior = current_inferior;
1439 current_inferior = get_lwp_thread (lwp);
1440
1441 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1442 {
1443 if (debug_threads)
1444 fprintf (stderr,
1445 "CB: Push back breakpoint for %s\n",
fc7238bb 1446 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1447
1448 /* Back up the PC if necessary. */
1449 if (the_low_target.decr_pc_after_break)
1450 {
1451 struct regcache *regcache
fc7238bb 1452 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1453 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1454 }
1455
1456 current_inferior = saved_inferior;
1457 return 1;
1458 }
1459 else
1460 {
1461 if (debug_threads)
1462 fprintf (stderr,
1463 "CB: No breakpoint found at %s for [%s]\n",
1464 paddress (lwp->stop_pc),
fc7238bb 1465 target_pid_to_str (ptid_of (lwp)));
d50171e4
PA
1466 }
1467
1468 current_inferior = saved_inferior;
1469 return 0;
1470}
1471
1472/* When the event-loop is doing a step-over, this points at the thread
1473 being stepped. */
1474ptid_t step_over_bkpt;
1475
bd99dc85
PA
1476/* Wait for an event from child PID. If PID is -1, wait for any
1477 child. Store the stop status through the status pointer WSTAT.
1478 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1479 event was found and OPTIONS contains WNOHANG. Return the PID of
1480 the stopped child otherwise. */
1481
0d62e5e8 1482static int
95954743 1483linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
0d62e5e8 1484{
d50171e4
PA
1485 struct lwp_info *event_child, *requested_child;
1486
d50171e4
PA
1487 event_child = NULL;
1488 requested_child = NULL;
0d62e5e8 1489
95954743 1490 /* Check for a lwp with a pending status. */
bd99dc85 1491
95954743
PA
1492 if (ptid_equal (ptid, minus_one_ptid)
1493 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
0d62e5e8 1494 {
54a0b537 1495 event_child = (struct lwp_info *)
d50171e4 1496 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
0d62e5e8 1497 if (debug_threads && event_child)
bd99dc85 1498 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
0d62e5e8
DJ
1499 }
1500 else
1501 {
95954743 1502 requested_child = find_lwp_pid (ptid);
d50171e4 1503
fa593d66
PA
1504 if (!stopping_threads
1505 && requested_child->status_pending_p
1506 && requested_child->collecting_fast_tracepoint)
1507 {
1508 enqueue_one_deferred_signal (requested_child,
1509 &requested_child->status_pending);
1510 requested_child->status_pending_p = 0;
1511 requested_child->status_pending = 0;
1512 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1513 }
1514
1515 if (requested_child->suspended
1516 && requested_child->status_pending_p)
1517 fatal ("requesting an event out of a suspended child?");
1518
d50171e4 1519 if (requested_child->status_pending_p)
bd99dc85 1520 event_child = requested_child;
0d62e5e8 1521 }
611cb4a5 1522
0d62e5e8
DJ
1523 if (event_child != NULL)
1524 {
bd99dc85
PA
1525 if (debug_threads)
1526 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1527 lwpid_of (event_child), event_child->status_pending);
1528 *wstat = event_child->status_pending;
1529 event_child->status_pending_p = 0;
1530 event_child->status_pending = 0;
1531 current_inferior = get_lwp_thread (event_child);
1532 return lwpid_of (event_child);
0d62e5e8
DJ
1533 }
1534
1535 /* We only enter this loop if no process has a pending wait status. Thus
1536 any action taken in response to a wait status inside this loop is
1537 responding as soon as we detect the status, not after any pending
1538 events. */
1539 while (1)
1540 {
6bf5e0ba 1541 event_child = linux_wait_for_lwp (ptid, wstat, options);
0d62e5e8 1542
bd99dc85 1543 if ((options & WNOHANG) && event_child == NULL)
d50171e4
PA
1544 {
1545 if (debug_threads)
1546 fprintf (stderr, "WNOHANG set, no event found\n");
1547 return 0;
1548 }
0d62e5e8
DJ
1549
1550 if (event_child == NULL)
1551 error ("event from unknown child");
611cb4a5 1552
bd99dc85 1553 current_inferior = get_lwp_thread (event_child);
0d62e5e8 1554
89be2091 1555 /* Check for thread exit. */
bd99dc85 1556 if (! WIFSTOPPED (*wstat))
0d62e5e8 1557 {
89be2091 1558 if (debug_threads)
95954743 1559 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
89be2091
DJ
1560
1561 /* If the last thread is exiting, just return. */
95954743 1562 if (last_thread_of_process_p (current_inferior))
bd99dc85
PA
1563 {
1564 if (debug_threads)
95954743
PA
1565 fprintf (stderr, "LWP %ld is last lwp of process\n",
1566 lwpid_of (event_child));
bd99dc85
PA
1567 return lwpid_of (event_child);
1568 }
89be2091 1569
bd99dc85
PA
1570 if (!non_stop)
1571 {
1572 current_inferior = (struct thread_info *) all_threads.head;
1573 if (debug_threads)
1574 fprintf (stderr, "Current inferior is now %ld\n",
1575 lwpid_of (get_thread_lwp (current_inferior)));
1576 }
1577 else
1578 {
1579 current_inferior = NULL;
1580 if (debug_threads)
1581 fprintf (stderr, "Current inferior is now <NULL>\n");
1582 }
89be2091
DJ
1583
1584 /* If we were waiting for this particular child to do something...
1585 well, it did something. */
bd99dc85 1586 if (requested_child != NULL)
d50171e4
PA
1587 {
1588 int lwpid = lwpid_of (event_child);
1589
1590 /* Cancel the step-over operation --- the thread that
1591 started it is gone. */
1592 if (finish_step_over (event_child))
7984d532 1593 unstop_all_lwps (1, event_child);
d50171e4
PA
1594 delete_lwp (event_child);
1595 return lwpid;
1596 }
1597
1598 delete_lwp (event_child);
89be2091
DJ
1599
1600 /* Wait for a more interesting event. */
1601 continue;
1602 }
1603
a6dbe5df
PA
1604 if (event_child->must_set_ptrace_flags)
1605 {
1e7fc18c 1606 linux_enable_event_reporting (lwpid_of (event_child));
a6dbe5df
PA
1607 event_child->must_set_ptrace_flags = 0;
1608 }
1609
bd99dc85
PA
1610 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1611 && *wstat >> 16 != 0)
24a09b5f 1612 {
bd99dc85 1613 handle_extended_wait (event_child, *wstat);
24a09b5f
DJ
1614 continue;
1615 }
1616
d50171e4
PA
1617 if (WIFSTOPPED (*wstat)
1618 && WSTOPSIG (*wstat) == SIGSTOP
1619 && event_child->stop_expected)
1620 {
1621 int should_stop;
1622
1623 if (debug_threads)
1624 fprintf (stderr, "Expected stop.\n");
1625 event_child->stop_expected = 0;
1626
8336d594 1627 should_stop = (current_inferior->last_resume_kind == resume_stop
d50171e4
PA
1628 || stopping_threads);
1629
1630 if (!should_stop)
1631 {
1632 linux_resume_one_lwp (event_child,
1633 event_child->stepping, 0, NULL);
1634 continue;
1635 }
1636 }
1637
bd99dc85 1638 return lwpid_of (event_child);
611cb4a5 1639 }
0d62e5e8 1640
611cb4a5
DJ
1641 /* NOTREACHED */
1642 return 0;
1643}
1644
95954743
PA
1645static int
1646linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1647{
1648 ptid_t wait_ptid;
1649
1650 if (ptid_is_pid (ptid))
1651 {
1652 /* A request to wait for a specific tgid. This is not possible
1653 with waitpid, so instead, we wait for any child, and leave
1654 children we're not interested in right now with a pending
1655 status to report later. */
1656 wait_ptid = minus_one_ptid;
1657 }
1658 else
1659 wait_ptid = ptid;
1660
1661 while (1)
1662 {
1663 int event_pid;
1664
1665 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1666
1667 if (event_pid > 0
1668 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1669 {
1670 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1671
1672 if (! WIFSTOPPED (*wstat))
1673 mark_lwp_dead (event_child, *wstat);
1674 else
1675 {
1676 event_child->status_pending_p = 1;
1677 event_child->status_pending = *wstat;
1678 }
1679 }
1680 else
1681 return event_pid;
1682 }
1683}
1684
6bf5e0ba
PA
1685
1686/* Count the LWP's that have had events. */
1687
1688static int
1689count_events_callback (struct inferior_list_entry *entry, void *data)
1690{
1691 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1692 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1693 int *count = data;
1694
1695 gdb_assert (count != NULL);
1696
1697 /* Count only resumed LWPs that have a SIGTRAP event pending that
1698 should be reported to GDB. */
8336d594
PA
1699 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1700 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
1701 && lp->status_pending_p
1702 && WIFSTOPPED (lp->status_pending)
1703 && WSTOPSIG (lp->status_pending) == SIGTRAP
1704 && !breakpoint_inserted_here (lp->stop_pc))
1705 (*count)++;
1706
1707 return 0;
1708}
1709
1710/* Select the LWP (if any) that is currently being single-stepped. */
1711
1712static int
1713select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1714{
1715 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1716 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba 1717
8336d594
PA
1718 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1719 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
1720 && lp->status_pending_p)
1721 return 1;
1722 else
1723 return 0;
1724}
1725
1726/* Select the Nth LWP that has had a SIGTRAP event that should be
1727 reported to GDB. */
1728
1729static int
1730select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1731{
1732 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1733 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1734 int *selector = data;
1735
1736 gdb_assert (selector != NULL);
1737
1738 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
1739 if (thread->last_resume_kind != resume_stop
1740 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1741 && lp->status_pending_p
1742 && WIFSTOPPED (lp->status_pending)
1743 && WSTOPSIG (lp->status_pending) == SIGTRAP
1744 && !breakpoint_inserted_here (lp->stop_pc))
1745 if ((*selector)-- == 0)
1746 return 1;
1747
1748 return 0;
1749}
1750
1751static int
1752cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1753{
1754 struct lwp_info *lp = (struct lwp_info *) entry;
8336d594 1755 struct thread_info *thread = get_lwp_thread (lp);
6bf5e0ba
PA
1756 struct lwp_info *event_lp = data;
1757
1758 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1759 if (lp == event_lp)
1760 return 0;
1761
1762 /* If a LWP other than the LWP that we're reporting an event for has
1763 hit a GDB breakpoint (as opposed to some random trap signal),
1764 then just arrange for it to hit it again later. We don't keep
1765 the SIGTRAP status and don't forward the SIGTRAP signal to the
1766 LWP. We will handle the current event, eventually we will resume
1767 all LWPs, and this one will get its breakpoint trap again.
1768
1769 If we do not do this, then we run the risk that the user will
1770 delete or disable the breakpoint, but the LWP will have already
1771 tripped on it. */
1772
8336d594
PA
1773 if (thread->last_resume_kind != resume_stop
1774 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
1775 && lp->status_pending_p
1776 && WIFSTOPPED (lp->status_pending)
1777 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
1778 && !lp->stepping
1779 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
1780 && cancel_breakpoint (lp))
1781 /* Throw away the SIGTRAP. */
1782 lp->status_pending_p = 0;
1783
1784 return 0;
1785}
1786
7984d532
PA
1787static void
1788linux_cancel_breakpoints (void)
1789{
1790 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1791}
1792
6bf5e0ba
PA
1793/* Select one LWP out of those that have events pending. */
1794
1795static void
1796select_event_lwp (struct lwp_info **orig_lp)
1797{
1798 int num_events = 0;
1799 int random_selector;
1800 struct lwp_info *event_lp;
1801
1802 /* Give preference to any LWP that is being single-stepped. */
1803 event_lp
1804 = (struct lwp_info *) find_inferior (&all_lwps,
1805 select_singlestep_lwp_callback, NULL);
1806 if (event_lp != NULL)
1807 {
1808 if (debug_threads)
1809 fprintf (stderr,
1810 "SEL: Select single-step %s\n",
1811 target_pid_to_str (ptid_of (event_lp)));
1812 }
1813 else
1814 {
1815 /* No single-stepping LWP. Select one at random, out of those
1816 which have had SIGTRAP events. */
1817
1818 /* First see how many SIGTRAP events we have. */
1819 find_inferior (&all_lwps, count_events_callback, &num_events);
1820
1821 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1822 random_selector = (int)
1823 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1824
1825 if (debug_threads && num_events > 1)
1826 fprintf (stderr,
1827 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1828 num_events, random_selector);
1829
1830 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1831 select_event_lwp_callback,
1832 &random_selector);
1833 }
1834
1835 if (event_lp != NULL)
1836 {
1837 /* Switch the event LWP. */
1838 *orig_lp = event_lp;
1839 }
1840}
1841
7984d532
PA
1842/* Decrement the suspend count of an LWP. */
1843
1844static int
1845unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1846{
1847 struct lwp_info *lwp = (struct lwp_info *) entry;
1848
1849 /* Ignore EXCEPT. */
1850 if (lwp == except)
1851 return 0;
1852
1853 lwp->suspended--;
1854
1855 gdb_assert (lwp->suspended >= 0);
1856 return 0;
1857}
1858
1859/* Decrement the suspend count of all LWPs, except EXCEPT, if non
1860 NULL. */
1861
1862static void
1863unsuspend_all_lwps (struct lwp_info *except)
1864{
1865 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1866}
1867
fa593d66
PA
1868static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1869static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1870 void *data);
1871static int lwp_running (struct inferior_list_entry *entry, void *data);
1872static ptid_t linux_wait_1 (ptid_t ptid,
1873 struct target_waitstatus *ourstatus,
1874 int target_options);
1875
1876/* Stabilize threads (move out of jump pads).
1877
1878 If a thread is midway collecting a fast tracepoint, we need to
1879 finish the collection and move it out of the jump pad before
1880 reporting the signal.
1881
1882 This avoids recursion while collecting (when a signal arrives
1883 midway, and the signal handler itself collects), which would trash
1884 the trace buffer. In case the user set a breakpoint in a signal
1885 handler, this avoids the backtrace showing the jump pad, etc..
1886 Most importantly, there are certain things we can't do safely if
1887 threads are stopped in a jump pad (or in its callee's). For
1888 example:
1889
1890 - starting a new trace run. A thread still collecting the
1891 previous run, could trash the trace buffer when resumed. The trace
1892 buffer control structures would have been reset but the thread had
1893 no way to tell. The thread could even midway memcpy'ing to the
1894 buffer, which would mean that when resumed, it would clobber the
1895 trace buffer that had been set for a new run.
1896
1897 - we can't rewrite/reuse the jump pads for new tracepoints
1898 safely. Say you do tstart while a thread is stopped midway while
1899 collecting. When the thread is later resumed, it finishes the
1900 collection, and returns to the jump pad, to execute the original
1901 instruction that was under the tracepoint jump at the time the
1902 older run had been started. If the jump pad had been rewritten
1903 since for something else in the new run, the thread would now
1904 execute the wrong / random instructions. */
1905
1906static void
1907linux_stabilize_threads (void)
1908{
1909 struct thread_info *save_inferior;
1910 struct lwp_info *lwp_stuck;
1911
1912 lwp_stuck
1913 = (struct lwp_info *) find_inferior (&all_lwps,
1914 stuck_in_jump_pad_callback, NULL);
1915 if (lwp_stuck != NULL)
1916 {
b4d51a55
PA
1917 if (debug_threads)
1918 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1919 lwpid_of (lwp_stuck));
fa593d66
PA
1920 return;
1921 }
1922
1923 save_inferior = current_inferior;
1924
1925 stabilizing_threads = 1;
1926
1927 /* Kick 'em all. */
1928 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1929
1930 /* Loop until all are stopped out of the jump pads. */
1931 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1932 {
1933 struct target_waitstatus ourstatus;
1934 struct lwp_info *lwp;
1935 ptid_t ptid;
1936 int wstat;
1937
1938 /* Note that we go through the full wait even loop. While
1939 moving threads out of jump pad, we need to be able to step
1940 over internal breakpoints and such. */
1941 ptid = linux_wait_1 (minus_one_ptid, &ourstatus, 0);
1942
1943 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
1944 {
1945 lwp = get_thread_lwp (current_inferior);
1946
1947 /* Lock it. */
1948 lwp->suspended++;
1949
1950 if (ourstatus.value.sig != TARGET_SIGNAL_0
1951 || current_inferior->last_resume_kind == resume_stop)
1952 {
1953 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
1954 enqueue_one_deferred_signal (lwp, &wstat);
1955 }
1956 }
1957 }
1958
1959 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
1960
1961 stabilizing_threads = 0;
1962
1963 current_inferior = save_inferior;
1964
b4d51a55 1965 if (debug_threads)
fa593d66 1966 {
b4d51a55
PA
1967 lwp_stuck
1968 = (struct lwp_info *) find_inferior (&all_lwps,
1969 stuck_in_jump_pad_callback, NULL);
1970 if (lwp_stuck != NULL)
fa593d66
PA
1971 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
1972 lwpid_of (lwp_stuck));
1973 }
1974}
1975
0d62e5e8 1976/* Wait for process, returns status. */
da6d8c04 1977
95954743
PA
1978static ptid_t
1979linux_wait_1 (ptid_t ptid,
1980 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 1981{
e5f1222d 1982 int w;
fc7238bb 1983 struct lwp_info *event_child;
bd99dc85 1984 int options;
bd99dc85 1985 int pid;
6bf5e0ba
PA
1986 int step_over_finished;
1987 int bp_explains_trap;
1988 int maybe_internal_trap;
1989 int report_to_gdb;
219f2f23 1990 int trace_event;
bd99dc85
PA
1991
1992 /* Translate generic target options into linux options. */
1993 options = __WALL;
1994 if (target_options & TARGET_WNOHANG)
1995 options |= WNOHANG;
0d62e5e8
DJ
1996
1997retry:
fa593d66
PA
1998 bp_explains_trap = 0;
1999 trace_event = 0;
bd99dc85
PA
2000 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2001
0d62e5e8
DJ
2002 /* If we were only supposed to resume one thread, only wait for
2003 that thread - if it's still alive. If it died, however - which
2004 can happen if we're coming from the thread death case below -
2005 then we need to make sure we restart the other threads. We could
2006 pick a thread at random or restart all; restarting all is less
2007 arbitrary. */
95954743
PA
2008 if (!non_stop
2009 && !ptid_equal (cont_thread, null_ptid)
2010 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2011 {
fc7238bb
PA
2012 struct thread_info *thread;
2013
bd99dc85
PA
2014 thread = (struct thread_info *) find_inferior_id (&all_threads,
2015 cont_thread);
0d62e5e8
DJ
2016
2017 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2018 if (thread == NULL)
64386c31
DJ
2019 {
2020 struct thread_resume resume_info;
95954743 2021 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2022 resume_info.kind = resume_continue;
2023 resume_info.sig = 0;
2bd7c093 2024 linux_resume (&resume_info, 1);
64386c31 2025 }
bd99dc85 2026 else
95954743 2027 ptid = cont_thread;
0d62e5e8 2028 }
da6d8c04 2029
6bf5e0ba
PA
2030 if (ptid_equal (step_over_bkpt, null_ptid))
2031 pid = linux_wait_for_event (ptid, &w, options);
2032 else
2033 {
2034 if (debug_threads)
2035 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2036 target_pid_to_str (step_over_bkpt));
2037 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2038 }
2039
bd99dc85 2040 if (pid == 0) /* only if TARGET_WNOHANG */
95954743 2041 return null_ptid;
bd99dc85 2042
6bf5e0ba 2043 event_child = get_thread_lwp (current_inferior);
da6d8c04 2044
0d62e5e8
DJ
2045 /* If we are waiting for a particular child, and it exited,
2046 linux_wait_for_event will return its exit status. Similarly if
2047 the last child exited. If this is not the last child, however,
2048 do not report it as exited until there is a 'thread exited' response
2049 available in the remote protocol. Instead, just wait for another event.
2050 This should be safe, because if the thread crashed we will already
2051 have reported the termination signal to GDB; that should stop any
2052 in-progress stepping operations, etc.
2053
2054 Report the exit status of the last thread to exit. This matches
2055 LinuxThreads' behavior. */
2056
95954743 2057 if (last_thread_of_process_p (current_inferior))
da6d8c04 2058 {
bd99dc85 2059 if (WIFEXITED (w) || WIFSIGNALED (w))
0d62e5e8 2060 {
bd99dc85
PA
2061 if (WIFEXITED (w))
2062 {
2063 ourstatus->kind = TARGET_WAITKIND_EXITED;
2064 ourstatus->value.integer = WEXITSTATUS (w);
2065
2066 if (debug_threads)
2067 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
2068 }
2069 else
2070 {
2071 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2072 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2073
2074 if (debug_threads)
2075 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
2076
2077 }
5b1c542e 2078
3e4c1235 2079 return ptid_of (event_child);
0d62e5e8 2080 }
da6d8c04 2081 }
0d62e5e8 2082 else
da6d8c04 2083 {
0d62e5e8
DJ
2084 if (!WIFSTOPPED (w))
2085 goto retry;
da6d8c04
DJ
2086 }
2087
6bf5e0ba
PA
2088 /* If this event was not handled before, and is not a SIGTRAP, we
2089 report it. SIGILL and SIGSEGV are also treated as traps in case
2090 a breakpoint is inserted at the current PC. If this target does
2091 not support internal breakpoints at all, we also report the
2092 SIGTRAP without further processing; it's of no concern to us. */
2093 maybe_internal_trap
2094 = (supports_breakpoints ()
2095 && (WSTOPSIG (w) == SIGTRAP
2096 || ((WSTOPSIG (w) == SIGILL
2097 || WSTOPSIG (w) == SIGSEGV)
2098 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2099
2100 if (maybe_internal_trap)
2101 {
2102 /* Handle anything that requires bookkeeping before deciding to
2103 report the event or continue waiting. */
2104
2105 /* First check if we can explain the SIGTRAP with an internal
2106 breakpoint, or if we should possibly report the event to GDB.
2107 Do this before anything that may remove or insert a
2108 breakpoint. */
2109 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2110
2111 /* We have a SIGTRAP, possibly a step-over dance has just
2112 finished. If so, tweak the state machine accordingly,
2113 reinsert breakpoints and delete any reinsert (software
2114 single-step) breakpoints. */
2115 step_over_finished = finish_step_over (event_child);
2116
2117 /* Now invoke the callbacks of any internal breakpoints there. */
2118 check_breakpoints (event_child->stop_pc);
2119
219f2f23
PA
2120 /* Handle tracepoint data collecting. This may overflow the
2121 trace buffer, and cause a tracing stop, removing
2122 breakpoints. */
2123 trace_event = handle_tracepoints (event_child);
2124
6bf5e0ba
PA
2125 if (bp_explains_trap)
2126 {
2127 /* If we stepped or ran into an internal breakpoint, we've
2128 already handled it. So next time we resume (from this
2129 PC), we should step over it. */
2130 if (debug_threads)
2131 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2132
8b07ae33
PA
2133 if (breakpoint_here (event_child->stop_pc))
2134 event_child->need_step_over = 1;
6bf5e0ba
PA
2135 }
2136 }
2137 else
2138 {
2139 /* We have some other signal, possibly a step-over dance was in
2140 progress, and it should be cancelled too. */
2141 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2142 }
2143
2144 /* We have all the data we need. Either report the event to GDB, or
2145 resume threads and keep waiting for more. */
2146
2147 /* If we're collecting a fast tracepoint, finish the collection and
2148 move out of the jump pad before delivering a signal. See
2149 linux_stabilize_threads. */
2150
2151 if (WIFSTOPPED (w)
2152 && WSTOPSIG (w) != SIGTRAP
2153 && supports_fast_tracepoints ()
2154 && in_process_agent_loaded ())
2155 {
2156 if (debug_threads)
2157 fprintf (stderr,
2158 "Got signal %d for LWP %ld. Check if we need "
2159 "to defer or adjust it.\n",
2160 WSTOPSIG (w), lwpid_of (event_child));
2161
2162 /* Allow debugging the jump pad itself. */
2163 if (current_inferior->last_resume_kind != resume_step
2164 && maybe_move_out_of_jump_pad (event_child, &w))
2165 {
2166 enqueue_one_deferred_signal (event_child, &w);
2167
2168 if (debug_threads)
2169 fprintf (stderr,
2170 "Signal %d for LWP %ld deferred (in jump pad)\n",
2171 WSTOPSIG (w), lwpid_of (event_child));
2172
2173 linux_resume_one_lwp (event_child, 0, 0, NULL);
2174 goto retry;
2175 }
2176 }
219f2f23 2177
fa593d66
PA
2178 if (event_child->collecting_fast_tracepoint)
2179 {
2180 if (debug_threads)
2181 fprintf (stderr, "\
2182LWP %ld was trying to move out of the jump pad (%d). \
2183Check if we're already there.\n",
2184 lwpid_of (event_child),
2185 event_child->collecting_fast_tracepoint);
2186
2187 trace_event = 1;
2188
2189 event_child->collecting_fast_tracepoint
2190 = linux_fast_tracepoint_collecting (event_child, NULL);
2191
2192 if (event_child->collecting_fast_tracepoint != 1)
2193 {
2194 /* No longer need this breakpoint. */
2195 if (event_child->exit_jump_pad_bkpt != NULL)
2196 {
2197 if (debug_threads)
2198 fprintf (stderr,
2199 "No longer need exit-jump-pad bkpt; removing it."
2200 "stopping all threads momentarily.\n");
2201
2202 /* Other running threads could hit this breakpoint.
2203 We don't handle moribund locations like GDB does,
2204 instead we always pause all threads when removing
2205 breakpoints, so that any step-over or
2206 decr_pc_after_break adjustment is always taken
2207 care of while the breakpoint is still
2208 inserted. */
2209 stop_all_lwps (1, event_child);
2210 cancel_breakpoints ();
2211
2212 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2213 event_child->exit_jump_pad_bkpt = NULL;
2214
2215 unstop_all_lwps (1, event_child);
2216
2217 gdb_assert (event_child->suspended >= 0);
2218 }
2219 }
2220
2221 if (event_child->collecting_fast_tracepoint == 0)
2222 {
2223 if (debug_threads)
2224 fprintf (stderr,
2225 "fast tracepoint finished "
2226 "collecting successfully.\n");
2227
2228 /* We may have a deferred signal to report. */
2229 if (dequeue_one_deferred_signal (event_child, &w))
2230 {
2231 if (debug_threads)
2232 fprintf (stderr, "dequeued one signal.\n");
2233 }
3c11dd79 2234 else
fa593d66 2235 {
3c11dd79
PA
2236 if (debug_threads)
2237 fprintf (stderr, "no deferred signals.\n");
fa593d66
PA
2238
2239 if (stabilizing_threads)
2240 {
2241 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2242 ourstatus->value.sig = TARGET_SIGNAL_0;
2243 return ptid_of (event_child);
2244 }
2245 }
2246 }
6bf5e0ba
PA
2247 }
2248
e471f25b
PA
2249 /* Check whether GDB would be interested in this event. */
2250
2251 /* If GDB is not interested in this signal, don't stop other
2252 threads, and don't report it to GDB. Just resume the inferior
2253 right away. We do this for threading-related signals as well as
2254 any that GDB specifically requested we ignore. But never ignore
2255 SIGSTOP if we sent it ourselves, and do not ignore signals when
2256 stepping - they may require special handling to skip the signal
2257 handler. */
2258 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2259 thread library? */
2260 if (WIFSTOPPED (w)
2261 && current_inferior->last_resume_kind != resume_step
2262 && (
1a981360 2263#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2264 (current_process ()->private->thread_db != NULL
2265 && (WSTOPSIG (w) == __SIGRTMIN
2266 || WSTOPSIG (w) == __SIGRTMIN + 1))
2267 ||
2268#endif
2269 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2270 && !(WSTOPSIG (w) == SIGSTOP
2271 && current_inferior->last_resume_kind == resume_stop))))
2272 {
2273 siginfo_t info, *info_p;
2274
2275 if (debug_threads)
2276 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2277 WSTOPSIG (w), lwpid_of (event_child));
2278
2279 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2280 info_p = &info;
2281 else
2282 info_p = NULL;
2283 linux_resume_one_lwp (event_child, event_child->stepping,
2284 WSTOPSIG (w), info_p);
2285 goto retry;
2286 }
2287
2288 /* If GDB wanted this thread to single step, we always want to
2289 report the SIGTRAP, and let GDB handle it. Watchpoints should
2290 always be reported. So should signals we can't explain. A
2291 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2292 not support Z0 breakpoints. If we do, we're be able to handle
2293 GDB breakpoints on top of internal breakpoints, by handling the
2294 internal breakpoint and still reporting the event to GDB. If we
2295 don't, we're out of luck, GDB won't see the breakpoint hit. */
6bf5e0ba 2296 report_to_gdb = (!maybe_internal_trap
8336d594 2297 || current_inferior->last_resume_kind == resume_step
6bf5e0ba 2298 || event_child->stopped_by_watchpoint
219f2f23 2299 || (!step_over_finished && !bp_explains_trap && !trace_event)
8b07ae33 2300 || gdb_breakpoint_here (event_child->stop_pc));
6bf5e0ba
PA
2301
2302 /* We found no reason GDB would want us to stop. We either hit one
2303 of our own breakpoints, or finished an internal step GDB
2304 shouldn't know about. */
2305 if (!report_to_gdb)
2306 {
2307 if (debug_threads)
2308 {
2309 if (bp_explains_trap)
2310 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2311 if (step_over_finished)
2312 fprintf (stderr, "Step-over finished.\n");
219f2f23
PA
2313 if (trace_event)
2314 fprintf (stderr, "Tracepoint event.\n");
6bf5e0ba
PA
2315 }
2316
2317 /* We're not reporting this breakpoint to GDB, so apply the
2318 decr_pc_after_break adjustment to the inferior's regcache
2319 ourselves. */
2320
2321 if (the_low_target.set_pc != NULL)
2322 {
2323 struct regcache *regcache
2324 = get_thread_regcache (get_lwp_thread (event_child), 1);
2325 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2326 }
2327
7984d532
PA
2328 /* We may have finished stepping over a breakpoint. If so,
2329 we've stopped and suspended all LWPs momentarily except the
2330 stepping one. This is where we resume them all again. We're
2331 going to keep waiting, so use proceed, which handles stepping
2332 over the next breakpoint. */
6bf5e0ba
PA
2333 if (debug_threads)
2334 fprintf (stderr, "proceeding all threads.\n");
7984d532
PA
2335
2336 if (step_over_finished)
2337 unsuspend_all_lwps (event_child);
2338
6bf5e0ba
PA
2339 proceed_all_lwps ();
2340 goto retry;
2341 }
2342
2343 if (debug_threads)
2344 {
8336d594 2345 if (current_inferior->last_resume_kind == resume_step)
6bf5e0ba
PA
2346 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2347 if (event_child->stopped_by_watchpoint)
2348 fprintf (stderr, "Stopped by watchpoint.\n");
8b07ae33
PA
2349 if (gdb_breakpoint_here (event_child->stop_pc))
2350 fprintf (stderr, "Stopped by GDB breakpoint.\n");
6bf5e0ba
PA
2351 if (debug_threads)
2352 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2353 }
2354
2355 /* Alright, we're going to report a stop. */
2356
fa593d66 2357 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2358 {
2359 /* In all-stop, stop all threads. */
7984d532 2360 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2361
2362 /* If we're not waiting for a specific LWP, choose an event LWP
2363 from among those that have had events. Giving equal priority
2364 to all LWPs that have had events helps prevent
2365 starvation. */
2366 if (ptid_equal (ptid, minus_one_ptid))
2367 {
2368 event_child->status_pending_p = 1;
2369 event_child->status_pending = w;
2370
2371 select_event_lwp (&event_child);
2372
2373 event_child->status_pending_p = 0;
2374 w = event_child->status_pending;
2375 }
2376
2377 /* Now that we've selected our final event LWP, cancel any
2378 breakpoints in other LWPs that have hit a GDB breakpoint.
2379 See the comment in cancel_breakpoints_callback to find out
2380 why. */
2381 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
fa593d66
PA
2382
2383 /* Stabilize threads (move out of jump pads). */
2384 stabilize_threads ();
6bf5e0ba
PA
2385 }
2386 else
2387 {
2388 /* If we just finished a step-over, then all threads had been
2389 momentarily paused. In all-stop, that's fine, we want
2390 threads stopped by now anyway. In non-stop, we need to
2391 re-resume threads that GDB wanted to be running. */
2392 if (step_over_finished)
7984d532 2393 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2394 }
2395
5b1c542e 2396 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2397
8336d594
PA
2398 if (current_inferior->last_resume_kind == resume_stop
2399 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2400 {
2401 /* A thread that has been requested to stop by GDB with vCont;t,
2402 and it stopped cleanly, so report as SIG0. The use of
2403 SIGSTOP is an implementation detail. */
2404 ourstatus->value.sig = TARGET_SIGNAL_0;
2405 }
8336d594
PA
2406 else if (current_inferior->last_resume_kind == resume_stop
2407 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2408 {
2409 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2410 but, it stopped for other reasons. */
bd99dc85
PA
2411 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2412 }
2413 else
2414 {
2415 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2416 }
2417
d50171e4
PA
2418 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2419
bd99dc85 2420 if (debug_threads)
95954743 2421 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
6bf5e0ba 2422 target_pid_to_str (ptid_of (event_child)),
bd99dc85
PA
2423 ourstatus->kind,
2424 ourstatus->value.sig);
2425
6bf5e0ba 2426 return ptid_of (event_child);
bd99dc85
PA
2427}
2428
2429/* Get rid of any pending event in the pipe. */
2430static void
2431async_file_flush (void)
2432{
2433 int ret;
2434 char buf;
2435
2436 do
2437 ret = read (linux_event_pipe[0], &buf, 1);
2438 while (ret >= 0 || (ret == -1 && errno == EINTR));
2439}
2440
2441/* Put something in the pipe, so the event loop wakes up. */
2442static void
2443async_file_mark (void)
2444{
2445 int ret;
2446
2447 async_file_flush ();
2448
2449 do
2450 ret = write (linux_event_pipe[1], "+", 1);
2451 while (ret == 0 || (ret == -1 && errno == EINTR));
2452
2453 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2454 be awakened anyway. */
2455}
2456
95954743
PA
2457static ptid_t
2458linux_wait (ptid_t ptid,
2459 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 2460{
95954743 2461 ptid_t event_ptid;
bd99dc85
PA
2462
2463 if (debug_threads)
95954743 2464 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
bd99dc85
PA
2465
2466 /* Flush the async file first. */
2467 if (target_is_async_p ())
2468 async_file_flush ();
2469
95954743 2470 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
2471
2472 /* If at least one stop was reported, there may be more. A single
2473 SIGCHLD can signal more than one child stop. */
2474 if (target_is_async_p ()
2475 && (target_options & TARGET_WNOHANG) != 0
95954743 2476 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
2477 async_file_mark ();
2478
2479 return event_ptid;
da6d8c04
DJ
2480}
2481
c5f62d5f 2482/* Send a signal to an LWP. */
fd500816
DJ
2483
2484static int
a1928bad 2485kill_lwp (unsigned long lwpid, int signo)
fd500816 2486{
c5f62d5f
DE
2487 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2488 fails, then we are not using nptl threads and we should be using kill. */
fd500816 2489
c5f62d5f
DE
2490#ifdef __NR_tkill
2491 {
2492 static int tkill_failed;
fd500816 2493
c5f62d5f
DE
2494 if (!tkill_failed)
2495 {
2496 int ret;
2497
2498 errno = 0;
2499 ret = syscall (__NR_tkill, lwpid, signo);
2500 if (errno != ENOSYS)
2501 return ret;
2502 tkill_failed = 1;
2503 }
2504 }
fd500816
DJ
2505#endif
2506
2507 return kill (lwpid, signo);
2508}
2509
964e4306
PA
2510void
2511linux_stop_lwp (struct lwp_info *lwp)
2512{
2513 send_sigstop (lwp);
2514}
2515
0d62e5e8 2516static void
02fc4de7 2517send_sigstop (struct lwp_info *lwp)
0d62e5e8 2518{
bd99dc85 2519 int pid;
0d62e5e8 2520
bd99dc85
PA
2521 pid = lwpid_of (lwp);
2522
0d62e5e8
DJ
2523 /* If we already have a pending stop signal for this process, don't
2524 send another. */
54a0b537 2525 if (lwp->stop_expected)
0d62e5e8 2526 {
ae13219e 2527 if (debug_threads)
bd99dc85 2528 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
ae13219e 2529
0d62e5e8
DJ
2530 return;
2531 }
2532
2533 if (debug_threads)
bd99dc85 2534 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
0d62e5e8 2535
d50171e4 2536 lwp->stop_expected = 1;
bd99dc85 2537 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
2538}
2539
7984d532
PA
2540static int
2541send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7
PA
2542{
2543 struct lwp_info *lwp = (struct lwp_info *) entry;
2544
7984d532
PA
2545 /* Ignore EXCEPT. */
2546 if (lwp == except)
2547 return 0;
2548
02fc4de7 2549 if (lwp->stopped)
7984d532 2550 return 0;
02fc4de7
PA
2551
2552 send_sigstop (lwp);
7984d532
PA
2553 return 0;
2554}
2555
2556/* Increment the suspend count of an LWP, and stop it, if not stopped
2557 yet. */
2558static int
2559suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2560 void *except)
2561{
2562 struct lwp_info *lwp = (struct lwp_info *) entry;
2563
2564 /* Ignore EXCEPT. */
2565 if (lwp == except)
2566 return 0;
2567
2568 lwp->suspended++;
2569
2570 return send_sigstop_callback (entry, except);
02fc4de7
PA
2571}
2572
95954743
PA
2573static void
2574mark_lwp_dead (struct lwp_info *lwp, int wstat)
2575{
2576 /* It's dead, really. */
2577 lwp->dead = 1;
2578
2579 /* Store the exit status for later. */
2580 lwp->status_pending_p = 1;
2581 lwp->status_pending = wstat;
2582
95954743
PA
2583 /* Prevent trying to stop it. */
2584 lwp->stopped = 1;
2585
2586 /* No further stops are expected from a dead lwp. */
2587 lwp->stop_expected = 0;
2588}
2589
0d62e5e8
DJ
2590static void
2591wait_for_sigstop (struct inferior_list_entry *entry)
2592{
54a0b537 2593 struct lwp_info *lwp = (struct lwp_info *) entry;
bd99dc85 2594 struct thread_info *saved_inferior;
a1928bad 2595 int wstat;
95954743
PA
2596 ptid_t saved_tid;
2597 ptid_t ptid;
d50171e4 2598 int pid;
0d62e5e8 2599
54a0b537 2600 if (lwp->stopped)
d50171e4
PA
2601 {
2602 if (debug_threads)
2603 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2604 lwpid_of (lwp));
2605 return;
2606 }
0d62e5e8
DJ
2607
2608 saved_inferior = current_inferior;
bd99dc85
PA
2609 if (saved_inferior != NULL)
2610 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2611 else
95954743 2612 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 2613
95954743 2614 ptid = lwp->head.id;
bd99dc85 2615
d50171e4
PA
2616 if (debug_threads)
2617 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2618
2619 pid = linux_wait_for_event (ptid, &wstat, __WALL);
0d62e5e8
DJ
2620
2621 /* If we stopped with a non-SIGSTOP signal, save it for later
2622 and record the pending SIGSTOP. If the process exited, just
2623 return. */
d50171e4 2624 if (WIFSTOPPED (wstat))
0d62e5e8
DJ
2625 {
2626 if (debug_threads)
d50171e4
PA
2627 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2628 lwpid_of (lwp), WSTOPSIG (wstat));
c35fafde 2629
d50171e4 2630 if (WSTOPSIG (wstat) != SIGSTOP)
c35fafde
PA
2631 {
2632 if (debug_threads)
d50171e4
PA
2633 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2634 lwpid_of (lwp), wstat);
2635
c35fafde
PA
2636 lwp->status_pending_p = 1;
2637 lwp->status_pending = wstat;
2638 }
0d62e5e8 2639 }
d50171e4 2640 else
95954743
PA
2641 {
2642 if (debug_threads)
d50171e4 2643 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
95954743 2644
d50171e4
PA
2645 lwp = find_lwp_pid (pid_to_ptid (pid));
2646 if (lwp)
2647 {
2648 /* Leave this status pending for the next time we're able to
2649 report it. In the mean time, we'll report this lwp as
2650 dead to GDB, so GDB doesn't try to read registers and
2651 memory from it. This can only happen if this was the
2652 last thread of the process; otherwise, PID is removed
2653 from the thread tables before linux_wait_for_event
2654 returns. */
2655 mark_lwp_dead (lwp, wstat);
2656 }
95954743 2657 }
0d62e5e8 2658
bd99dc85 2659 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
2660 current_inferior = saved_inferior;
2661 else
2662 {
2663 if (debug_threads)
2664 fprintf (stderr, "Previously current thread died.\n");
2665
bd99dc85
PA
2666 if (non_stop)
2667 {
2668 /* We can't change the current inferior behind GDB's back,
2669 otherwise, a subsequent command may apply to the wrong
2670 process. */
2671 current_inferior = NULL;
2672 }
2673 else
2674 {
2675 /* Set a valid thread as current. */
2676 set_desired_inferior (0);
2677 }
0d62e5e8
DJ
2678 }
2679}
2680
fa593d66
PA
2681/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2682 move it out, because we need to report the stop event to GDB. For
2683 example, if the user puts a breakpoint in the jump pad, it's
2684 because she wants to debug it. */
2685
2686static int
2687stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2688{
2689 struct lwp_info *lwp = (struct lwp_info *) entry;
2690 struct thread_info *thread = get_lwp_thread (lwp);
2691
2692 gdb_assert (lwp->suspended == 0);
2693 gdb_assert (lwp->stopped);
2694
2695 /* Allow debugging the jump pad, gdb_collect, etc.. */
2696 return (supports_fast_tracepoints ()
2697 && in_process_agent_loaded ()
2698 && (gdb_breakpoint_here (lwp->stop_pc)
2699 || lwp->stopped_by_watchpoint
2700 || thread->last_resume_kind == resume_step)
2701 && linux_fast_tracepoint_collecting (lwp, NULL));
2702}
2703
2704static void
2705move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2706{
2707 struct lwp_info *lwp = (struct lwp_info *) entry;
2708 struct thread_info *thread = get_lwp_thread (lwp);
2709 int *wstat;
2710
2711 gdb_assert (lwp->suspended == 0);
2712 gdb_assert (lwp->stopped);
2713
2714 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2715
2716 /* Allow debugging the jump pad, gdb_collect, etc. */
2717 if (!gdb_breakpoint_here (lwp->stop_pc)
2718 && !lwp->stopped_by_watchpoint
2719 && thread->last_resume_kind != resume_step
2720 && maybe_move_out_of_jump_pad (lwp, wstat))
2721 {
2722 if (debug_threads)
2723 fprintf (stderr,
2724 "LWP %ld needs stabilizing (in jump pad)\n",
2725 lwpid_of (lwp));
2726
2727 if (wstat)
2728 {
2729 lwp->status_pending_p = 0;
2730 enqueue_one_deferred_signal (lwp, wstat);
2731
2732 if (debug_threads)
2733 fprintf (stderr,
2734 "Signal %d for LWP %ld deferred "
2735 "(in jump pad)\n",
2736 WSTOPSIG (*wstat), lwpid_of (lwp));
2737 }
2738
2739 linux_resume_one_lwp (lwp, 0, 0, NULL);
2740 }
2741 else
2742 lwp->suspended++;
2743}
2744
2745static int
2746lwp_running (struct inferior_list_entry *entry, void *data)
2747{
2748 struct lwp_info *lwp = (struct lwp_info *) entry;
2749
2750 if (lwp->dead)
2751 return 0;
2752 if (lwp->stopped)
2753 return 0;
2754 return 1;
2755}
2756
7984d532
PA
2757/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2758 If SUSPEND, then also increase the suspend count of every LWP,
2759 except EXCEPT. */
2760
0d62e5e8 2761static void
7984d532 2762stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8
DJ
2763{
2764 stopping_threads = 1;
7984d532
PA
2765
2766 if (suspend)
2767 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2768 else
2769 find_inferior (&all_lwps, send_sigstop_callback, except);
54a0b537 2770 for_each_inferior (&all_lwps, wait_for_sigstop);
0d62e5e8
DJ
2771 stopping_threads = 0;
2772}
2773
da6d8c04
DJ
2774/* Resume execution of the inferior process.
2775 If STEP is nonzero, single-step it.
2776 If SIGNAL is nonzero, give it that signal. */
2777
ce3a066d 2778static void
2acc282a 2779linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 2780 int step, int signal, siginfo_t *info)
da6d8c04 2781{
0d62e5e8 2782 struct thread_info *saved_inferior;
fa593d66 2783 int fast_tp_collecting;
0d62e5e8 2784
54a0b537 2785 if (lwp->stopped == 0)
0d62e5e8
DJ
2786 return;
2787
fa593d66
PA
2788 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2789
2790 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2791
219f2f23
PA
2792 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2793 user used the "jump" command, or "set $pc = foo"). */
2794 if (lwp->stop_pc != get_pc (lwp))
2795 {
2796 /* Collecting 'while-stepping' actions doesn't make sense
2797 anymore. */
2798 release_while_stepping_state_list (get_lwp_thread (lwp));
2799 }
2800
0d62e5e8
DJ
2801 /* If we have pending signals or status, and a new signal, enqueue the
2802 signal. Also enqueue the signal if we are waiting to reinsert a
2803 breakpoint; it will be picked up again below. */
2804 if (signal != 0
fa593d66
PA
2805 && (lwp->status_pending_p
2806 || lwp->pending_signals != NULL
2807 || lwp->bp_reinsert != 0
2808 || fast_tp_collecting))
0d62e5e8
DJ
2809 {
2810 struct pending_signals *p_sig;
bca929d3 2811 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 2812 p_sig->prev = lwp->pending_signals;
0d62e5e8 2813 p_sig->signal = signal;
32ca6d61
DJ
2814 if (info == NULL)
2815 memset (&p_sig->info, 0, sizeof (siginfo_t));
2816 else
2817 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 2818 lwp->pending_signals = p_sig;
0d62e5e8
DJ
2819 }
2820
d50171e4
PA
2821 if (lwp->status_pending_p)
2822 {
2823 if (debug_threads)
2824 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2825 " has pending status\n",
2826 lwpid_of (lwp), step ? "step" : "continue", signal,
2827 lwp->stop_expected ? "expected" : "not expected");
2828 return;
2829 }
0d62e5e8
DJ
2830
2831 saved_inferior = current_inferior;
54a0b537 2832 current_inferior = get_lwp_thread (lwp);
0d62e5e8
DJ
2833
2834 if (debug_threads)
1b3f6016 2835 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
bd99dc85 2836 lwpid_of (lwp), step ? "step" : "continue", signal,
54a0b537 2837 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
2838
2839 /* This bit needs some thinking about. If we get a signal that
2840 we must report while a single-step reinsert is still pending,
2841 we often end up resuming the thread. It might be better to
2842 (ew) allow a stack of pending events; then we could be sure that
2843 the reinsert happened right away and not lose any signals.
2844
2845 Making this stack would also shrink the window in which breakpoints are
54a0b537 2846 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
2847 complete correctness, so it won't solve that problem. It may be
2848 worthwhile just to solve this one, however. */
54a0b537 2849 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
2850 {
2851 if (debug_threads)
d50171e4
PA
2852 fprintf (stderr, " pending reinsert at 0x%s\n",
2853 paddress (lwp->bp_reinsert));
2854
2855 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2856 {
fa593d66
PA
2857 if (fast_tp_collecting == 0)
2858 {
2859 if (step == 0)
2860 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2861 if (lwp->suspended)
2862 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2863 lwp->suspended);
2864 }
d50171e4
PA
2865
2866 step = 1;
2867 }
0d62e5e8
DJ
2868
2869 /* Postpone any pending signal. It was enqueued above. */
2870 signal = 0;
2871 }
2872
fa593d66
PA
2873 if (fast_tp_collecting == 1)
2874 {
2875 if (debug_threads)
2876 fprintf (stderr, "\
2877lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2878 lwpid_of (lwp));
2879
2880 /* Postpone any pending signal. It was enqueued above. */
2881 signal = 0;
2882 }
2883 else if (fast_tp_collecting == 2)
2884 {
2885 if (debug_threads)
2886 fprintf (stderr, "\
2887lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2888 lwpid_of (lwp));
2889
2890 if (can_hardware_single_step ())
2891 step = 1;
2892 else
2893 fatal ("moving out of jump pad single-stepping"
2894 " not implemented on this target");
2895
2896 /* Postpone any pending signal. It was enqueued above. */
2897 signal = 0;
2898 }
2899
219f2f23
PA
2900 /* If we have while-stepping actions in this thread set it stepping.
2901 If we have a signal to deliver, it may or may not be set to
2902 SIG_IGN, we don't know. Assume so, and allow collecting
2903 while-stepping into a signal handler. A possible smart thing to
2904 do would be to set an internal breakpoint at the signal return
2905 address, continue, and carry on catching this while-stepping
2906 action only when that breakpoint is hit. A future
2907 enhancement. */
2908 if (get_lwp_thread (lwp)->while_stepping != NULL
2909 && can_hardware_single_step ())
2910 {
2911 if (debug_threads)
2912 fprintf (stderr,
2913 "lwp %ld has a while-stepping action -> forcing step.\n",
2914 lwpid_of (lwp));
2915 step = 1;
2916 }
2917
aa691b87 2918 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 2919 {
442ea881
PA
2920 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2921 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
47c0c975 2922 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
2923 }
2924
fa593d66
PA
2925 /* If we have pending signals, consume one unless we are trying to
2926 reinsert a breakpoint or we're trying to finish a fast tracepoint
2927 collect. */
2928 if (lwp->pending_signals != NULL
2929 && lwp->bp_reinsert == 0
2930 && fast_tp_collecting == 0)
0d62e5e8
DJ
2931 {
2932 struct pending_signals **p_sig;
2933
54a0b537 2934 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
2935 while ((*p_sig)->prev != NULL)
2936 p_sig = &(*p_sig)->prev;
2937
2938 signal = (*p_sig)->signal;
32ca6d61 2939 if ((*p_sig)->info.si_signo != 0)
bd99dc85 2940 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
32ca6d61 2941
0d62e5e8
DJ
2942 free (*p_sig);
2943 *p_sig = NULL;
2944 }
2945
aa5ca48f
DE
2946 if (the_low_target.prepare_to_resume != NULL)
2947 the_low_target.prepare_to_resume (lwp);
2948
0d62e5e8 2949 regcache_invalidate_one ((struct inferior_list_entry *)
54a0b537 2950 get_lwp_thread (lwp));
da6d8c04 2951 errno = 0;
54a0b537 2952 lwp->stopped = 0;
c3adc08c 2953 lwp->stopped_by_watchpoint = 0;
54a0b537 2954 lwp->stepping = step;
14ce3065
DE
2955 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2956 /* Coerce to a uintptr_t first to avoid potential gcc warning
2957 of coercing an 8 byte integer to a 4 byte pointer. */
2958 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
0d62e5e8
DJ
2959
2960 current_inferior = saved_inferior;
da6d8c04 2961 if (errno)
3221518c
UW
2962 {
2963 /* ESRCH from ptrace either means that the thread was already
2964 running (an error) or that it is gone (a race condition). If
2965 it's gone, we will get a notification the next time we wait,
2966 so we can ignore the error. We could differentiate these
2967 two, but it's tricky without waiting; the thread still exists
2968 as a zombie, so sending it signal 0 would succeed. So just
2969 ignore ESRCH. */
2970 if (errno == ESRCH)
2971 return;
2972
2973 perror_with_name ("ptrace");
2974 }
da6d8c04
DJ
2975}
2976
2bd7c093
PA
2977struct thread_resume_array
2978{
2979 struct thread_resume *resume;
2980 size_t n;
2981};
64386c31
DJ
2982
2983/* This function is called once per thread. We look up the thread
5544ad89
DJ
2984 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2985 resume request.
2986
2987 This algorithm is O(threads * resume elements), but resume elements
2988 is small (and will remain small at least until GDB supports thread
2989 suspension). */
2bd7c093
PA
2990static int
2991linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 2992{
54a0b537 2993 struct lwp_info *lwp;
64386c31 2994 struct thread_info *thread;
5544ad89 2995 int ndx;
2bd7c093 2996 struct thread_resume_array *r;
64386c31
DJ
2997
2998 thread = (struct thread_info *) entry;
54a0b537 2999 lwp = get_thread_lwp (thread);
2bd7c093 3000 r = arg;
64386c31 3001
2bd7c093 3002 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3003 {
3004 ptid_t ptid = r->resume[ndx].thread;
3005 if (ptid_equal (ptid, minus_one_ptid)
3006 || ptid_equal (ptid, entry->id)
3007 || (ptid_is_pid (ptid)
3008 && (ptid_get_pid (ptid) == pid_of (lwp)))
3009 || (ptid_get_lwp (ptid) == -1
3010 && (ptid_get_pid (ptid) == pid_of (lwp))))
3011 {
d50171e4 3012 if (r->resume[ndx].kind == resume_stop
8336d594 3013 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3014 {
3015 if (debug_threads)
3016 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3017 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3018 ? "stopped"
3019 : "stopping",
3020 lwpid_of (lwp));
3021
3022 continue;
3023 }
3024
95954743 3025 lwp->resume = &r->resume[ndx];
8336d594 3026 thread->last_resume_kind = lwp->resume->kind;
fa593d66
PA
3027
3028 /* If we had a deferred signal to report, dequeue one now.
3029 This can happen if LWP gets more than one signal while
3030 trying to get out of a jump pad. */
3031 if (lwp->stopped
3032 && !lwp->status_pending_p
3033 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3034 {
3035 lwp->status_pending_p = 1;
3036
3037 if (debug_threads)
3038 fprintf (stderr,
3039 "Dequeueing deferred signal %d for LWP %ld, "
3040 "leaving status pending.\n",
3041 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3042 }
3043
95954743
PA
3044 return 0;
3045 }
3046 }
2bd7c093
PA
3047
3048 /* No resume action for this thread. */
3049 lwp->resume = NULL;
64386c31 3050
2bd7c093 3051 return 0;
5544ad89
DJ
3052}
3053
5544ad89 3054
bd99dc85
PA
3055/* Set *FLAG_P if this lwp has an interesting status pending. */
3056static int
3057resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3058{
bd99dc85 3059 struct lwp_info *lwp = (struct lwp_info *) entry;
5544ad89 3060
bd99dc85
PA
3061 /* LWPs which will not be resumed are not interesting, because
3062 we might not wait for them next time through linux_wait. */
2bd7c093 3063 if (lwp->resume == NULL)
bd99dc85 3064 return 0;
64386c31 3065
bd99dc85 3066 if (lwp->status_pending_p)
d50171e4
PA
3067 * (int *) flag_p = 1;
3068
3069 return 0;
3070}
3071
3072/* Return 1 if this lwp that GDB wants running is stopped at an
3073 internal breakpoint that we need to step over. It assumes that any
3074 required STOP_PC adjustment has already been propagated to the
3075 inferior's regcache. */
3076
3077static int
3078need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3079{
3080 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3081 struct thread_info *thread;
d50171e4
PA
3082 struct thread_info *saved_inferior;
3083 CORE_ADDR pc;
3084
3085 /* LWPs which will not be resumed are not interesting, because we
3086 might not wait for them next time through linux_wait. */
3087
3088 if (!lwp->stopped)
3089 {
3090 if (debug_threads)
3091 fprintf (stderr,
3092 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3093 lwpid_of (lwp));
3094 return 0;
3095 }
3096
8336d594
PA
3097 thread = get_lwp_thread (lwp);
3098
3099 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3100 {
3101 if (debug_threads)
3102 fprintf (stderr,
3103 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3104 lwpid_of (lwp));
3105 return 0;
3106 }
3107
7984d532
PA
3108 gdb_assert (lwp->suspended >= 0);
3109
3110 if (lwp->suspended)
3111 {
3112 if (debug_threads)
3113 fprintf (stderr,
3114 "Need step over [LWP %ld]? Ignoring, suspended\n",
3115 lwpid_of (lwp));
3116 return 0;
3117 }
3118
d50171e4
PA
3119 if (!lwp->need_step_over)
3120 {
3121 if (debug_threads)
3122 fprintf (stderr,
3123 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3124 }
5544ad89 3125
bd99dc85 3126 if (lwp->status_pending_p)
d50171e4
PA
3127 {
3128 if (debug_threads)
3129 fprintf (stderr,
3130 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3131 lwpid_of (lwp));
3132 return 0;
3133 }
3134
3135 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3136 or we have. */
3137 pc = get_pc (lwp);
3138
3139 /* If the PC has changed since we stopped, then don't do anything,
3140 and let the breakpoint/tracepoint be hit. This happens if, for
3141 instance, GDB handled the decr_pc_after_break subtraction itself,
3142 GDB is OOL stepping this thread, or the user has issued a "jump"
3143 command, or poked thread's registers herself. */
3144 if (pc != lwp->stop_pc)
3145 {
3146 if (debug_threads)
3147 fprintf (stderr,
3148 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3149 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3150 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3151
3152 lwp->need_step_over = 0;
3153 return 0;
3154 }
3155
3156 saved_inferior = current_inferior;
8336d594 3157 current_inferior = thread;
d50171e4 3158
8b07ae33 3159 /* We can only step over breakpoints we know about. */
fa593d66 3160 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3161 {
8b07ae33
PA
3162 /* Don't step over a breakpoint that GDB expects to hit
3163 though. */
3164 if (gdb_breakpoint_here (pc))
3165 {
3166 if (debug_threads)
3167 fprintf (stderr,
3168 "Need step over [LWP %ld]? yes, but found"
3169 " GDB breakpoint at 0x%s; skipping step over\n",
3170 lwpid_of (lwp), paddress (pc));
d50171e4 3171
8b07ae33
PA
3172 current_inferior = saved_inferior;
3173 return 0;
3174 }
3175 else
3176 {
3177 if (debug_threads)
3178 fprintf (stderr,
3179 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
3180 lwpid_of (lwp), paddress (pc));
d50171e4 3181
8b07ae33
PA
3182 /* We've found an lwp that needs stepping over --- return 1 so
3183 that find_inferior stops looking. */
3184 current_inferior = saved_inferior;
3185
3186 /* If the step over is cancelled, this is set again. */
3187 lwp->need_step_over = 0;
3188 return 1;
3189 }
d50171e4
PA
3190 }
3191
3192 current_inferior = saved_inferior;
3193
3194 if (debug_threads)
3195 fprintf (stderr,
3196 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3197 lwpid_of (lwp), paddress (pc));
c6ecbae5 3198
bd99dc85 3199 return 0;
5544ad89
DJ
3200}
3201
d50171e4
PA
3202/* Start a step-over operation on LWP. When LWP stopped at a
3203 breakpoint, to make progress, we need to remove the breakpoint out
3204 of the way. If we let other threads run while we do that, they may
3205 pass by the breakpoint location and miss hitting it. To avoid
3206 that, a step-over momentarily stops all threads while LWP is
3207 single-stepped while the breakpoint is temporarily uninserted from
3208 the inferior. When the single-step finishes, we reinsert the
3209 breakpoint, and let all threads that are supposed to be running,
3210 run again.
3211
3212 On targets that don't support hardware single-step, we don't
3213 currently support full software single-stepping. Instead, we only
3214 support stepping over the thread event breakpoint, by asking the
3215 low target where to place a reinsert breakpoint. Since this
3216 routine assumes the breakpoint being stepped over is a thread event
3217 breakpoint, it usually assumes the return address of the current
3218 function is a good enough place to set the reinsert breakpoint. */
3219
3220static int
3221start_step_over (struct lwp_info *lwp)
3222{
3223 struct thread_info *saved_inferior;
3224 CORE_ADDR pc;
3225 int step;
3226
3227 if (debug_threads)
3228 fprintf (stderr,
3229 "Starting step-over on LWP %ld. Stopping all threads\n",
3230 lwpid_of (lwp));
3231
7984d532
PA
3232 stop_all_lwps (1, lwp);
3233 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3234
3235 if (debug_threads)
3236 fprintf (stderr, "Done stopping all threads for step-over.\n");
3237
3238 /* Note, we should always reach here with an already adjusted PC,
3239 either by GDB (if we're resuming due to GDB's request), or by our
3240 caller, if we just finished handling an internal breakpoint GDB
3241 shouldn't care about. */
3242 pc = get_pc (lwp);
3243
3244 saved_inferior = current_inferior;
3245 current_inferior = get_lwp_thread (lwp);
3246
3247 lwp->bp_reinsert = pc;
3248 uninsert_breakpoints_at (pc);
fa593d66 3249 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3250
3251 if (can_hardware_single_step ())
3252 {
3253 step = 1;
3254 }
3255 else
3256 {
3257 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3258 set_reinsert_breakpoint (raddr);
3259 step = 0;
3260 }
3261
3262 current_inferior = saved_inferior;
3263
3264 linux_resume_one_lwp (lwp, step, 0, NULL);
3265
3266 /* Require next event from this LWP. */
3267 step_over_bkpt = lwp->head.id;
3268 return 1;
3269}
3270
3271/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3272 start_step_over, if still there, and delete any reinsert
3273 breakpoints we've set, on non hardware single-step targets. */
3274
3275static int
3276finish_step_over (struct lwp_info *lwp)
3277{
3278 if (lwp->bp_reinsert != 0)
3279 {
3280 if (debug_threads)
3281 fprintf (stderr, "Finished step over.\n");
3282
3283 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3284 may be no breakpoint to reinsert there by now. */
3285 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3286 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3287
3288 lwp->bp_reinsert = 0;
3289
3290 /* Delete any software-single-step reinsert breakpoints. No
3291 longer needed. We don't have to worry about other threads
3292 hitting this trap, and later not being able to explain it,
3293 because we were stepping over a breakpoint, and we hold all
3294 threads but LWP stopped while doing that. */
3295 if (!can_hardware_single_step ())
3296 delete_reinsert_breakpoints ();
3297
3298 step_over_bkpt = null_ptid;
3299 return 1;
3300 }
3301 else
3302 return 0;
3303}
3304
5544ad89
DJ
3305/* This function is called once per thread. We check the thread's resume
3306 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3307 stopped; and what signal, if any, it should be sent.
5544ad89 3308
bd99dc85
PA
3309 For threads which we aren't explicitly told otherwise, we preserve
3310 the stepping flag; this is used for stepping over gdbserver-placed
3311 breakpoints.
3312
3313 If pending_flags was set in any thread, we queue any needed
3314 signals, since we won't actually resume. We already have a pending
3315 event to report, so we don't need to preserve any step requests;
3316 they should be re-issued if necessary. */
3317
3318static int
3319linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3320{
54a0b537 3321 struct lwp_info *lwp;
5544ad89 3322 struct thread_info *thread;
bd99dc85 3323 int step;
d50171e4
PA
3324 int leave_all_stopped = * (int *) arg;
3325 int leave_pending;
5544ad89
DJ
3326
3327 thread = (struct thread_info *) entry;
54a0b537 3328 lwp = get_thread_lwp (thread);
5544ad89 3329
2bd7c093 3330 if (lwp->resume == NULL)
bd99dc85 3331 return 0;
5544ad89 3332
bd99dc85 3333 if (lwp->resume->kind == resume_stop)
5544ad89 3334 {
bd99dc85 3335 if (debug_threads)
d50171e4 3336 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
bd99dc85
PA
3337
3338 if (!lwp->stopped)
3339 {
3340 if (debug_threads)
d50171e4 3341 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
bd99dc85 3342
d50171e4
PA
3343 /* Stop the thread, and wait for the event asynchronously,
3344 through the event loop. */
02fc4de7 3345 send_sigstop (lwp);
bd99dc85
PA
3346 }
3347 else
3348 {
3349 if (debug_threads)
d50171e4
PA
3350 fprintf (stderr, "already stopped LWP %ld\n",
3351 lwpid_of (lwp));
3352
3353 /* The LWP may have been stopped in an internal event that
3354 was not meant to be notified back to GDB (e.g., gdbserver
3355 breakpoint), so we should be reporting a stop event in
3356 this case too. */
3357
3358 /* If the thread already has a pending SIGSTOP, this is a
3359 no-op. Otherwise, something later will presumably resume
3360 the thread and this will cause it to cancel any pending
3361 operation, due to last_resume_kind == resume_stop. If
3362 the thread already has a pending status to report, we
3363 will still report it the next time we wait - see
3364 status_pending_p_callback. */
1a981360
PA
3365
3366 /* If we already have a pending signal to report, then
3367 there's no need to queue a SIGSTOP, as this means we're
3368 midway through moving the LWP out of the jumppad, and we
3369 will report the pending signal as soon as that is
3370 finished. */
3371 if (lwp->pending_signals_to_report == NULL)
3372 send_sigstop (lwp);
bd99dc85 3373 }
32ca6d61 3374
bd99dc85
PA
3375 /* For stop requests, we're done. */
3376 lwp->resume = NULL;
fc7238bb 3377 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3378 return 0;
5544ad89
DJ
3379 }
3380
bd99dc85
PA
3381 /* If this thread which is about to be resumed has a pending status,
3382 then don't resume any threads - we can just report the pending
3383 status. Make sure to queue any signals that would otherwise be
3384 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3385 thread has a pending status. If there's a thread that needs the
3386 step-over-breakpoint dance, then don't resume any other thread
3387 but that particular one. */
3388 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3389
d50171e4 3390 if (!leave_pending)
bd99dc85
PA
3391 {
3392 if (debug_threads)
3393 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
5544ad89 3394
d50171e4 3395 step = (lwp->resume->kind == resume_step);
2acc282a 3396 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3397 }
3398 else
3399 {
3400 if (debug_threads)
3401 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
5544ad89 3402
bd99dc85
PA
3403 /* If we have a new signal, enqueue the signal. */
3404 if (lwp->resume->sig != 0)
3405 {
3406 struct pending_signals *p_sig;
3407 p_sig = xmalloc (sizeof (*p_sig));
3408 p_sig->prev = lwp->pending_signals;
3409 p_sig->signal = lwp->resume->sig;
3410 memset (&p_sig->info, 0, sizeof (siginfo_t));
3411
3412 /* If this is the same signal we were previously stopped by,
3413 make sure to queue its siginfo. We can ignore the return
3414 value of ptrace; if it fails, we'll skip
3415 PTRACE_SETSIGINFO. */
3416 if (WIFSTOPPED (lwp->last_status)
3417 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3418 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3419
3420 lwp->pending_signals = p_sig;
3421 }
3422 }
5544ad89 3423
fc7238bb 3424 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3425 lwp->resume = NULL;
5544ad89 3426 return 0;
0d62e5e8
DJ
3427}
3428
3429static void
2bd7c093 3430linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3431{
2bd7c093 3432 struct thread_resume_array array = { resume_info, n };
d50171e4
PA
3433 struct lwp_info *need_step_over = NULL;
3434 int any_pending;
3435 int leave_all_stopped;
c6ecbae5 3436
2bd7c093 3437 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3438
d50171e4
PA
3439 /* If there is a thread which would otherwise be resumed, which has
3440 a pending status, then don't resume any threads - we can just
3441 report the pending status. Make sure to queue any signals that
3442 would otherwise be sent. In non-stop mode, we'll apply this
3443 logic to each thread individually. We consume all pending events
3444 before considering to start a step-over (in all-stop). */
3445 any_pending = 0;
bd99dc85 3446 if (!non_stop)
d50171e4
PA
3447 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3448
3449 /* If there is a thread which would otherwise be resumed, which is
3450 stopped at a breakpoint that needs stepping over, then don't
3451 resume any threads - have it step over the breakpoint with all
3452 other threads stopped, then resume all threads again. Make sure
3453 to queue any signals that would otherwise be delivered or
3454 queued. */
3455 if (!any_pending && supports_breakpoints ())
3456 need_step_over
3457 = (struct lwp_info *) find_inferior (&all_lwps,
3458 need_step_over_p, NULL);
3459
3460 leave_all_stopped = (need_step_over != NULL || any_pending);
3461
3462 if (debug_threads)
3463 {
3464 if (need_step_over != NULL)
3465 fprintf (stderr, "Not resuming all, need step over\n");
3466 else if (any_pending)
3467 fprintf (stderr,
3468 "Not resuming, all-stop and found "
3469 "an LWP with pending status\n");
3470 else
3471 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3472 }
3473
3474 /* Even if we're leaving threads stopped, queue all signals we'd
3475 otherwise deliver. */
3476 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3477
3478 if (need_step_over)
3479 start_step_over (need_step_over);
3480}
3481
3482/* This function is called once per thread. We check the thread's
3483 last resume request, which will tell us whether to resume, step, or
3484 leave the thread stopped. Any signal the client requested to be
3485 delivered has already been enqueued at this point.
3486
3487 If any thread that GDB wants running is stopped at an internal
3488 breakpoint that needs stepping over, we start a step-over operation
3489 on that particular thread, and leave all others stopped. */
3490
7984d532
PA
3491static int
3492proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 3493{
7984d532 3494 struct lwp_info *lwp = (struct lwp_info *) entry;
8336d594 3495 struct thread_info *thread;
d50171e4
PA
3496 int step;
3497
7984d532
PA
3498 if (lwp == except)
3499 return 0;
d50171e4
PA
3500
3501 if (debug_threads)
3502 fprintf (stderr,
3503 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3504
3505 if (!lwp->stopped)
3506 {
3507 if (debug_threads)
3508 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
7984d532 3509 return 0;
d50171e4
PA
3510 }
3511
8336d594
PA
3512 thread = get_lwp_thread (lwp);
3513
02fc4de7
PA
3514 if (thread->last_resume_kind == resume_stop
3515 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
3516 {
3517 if (debug_threads)
02fc4de7
PA
3518 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3519 lwpid_of (lwp));
7984d532 3520 return 0;
d50171e4
PA
3521 }
3522
3523 if (lwp->status_pending_p)
3524 {
3525 if (debug_threads)
3526 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3527 lwpid_of (lwp));
7984d532 3528 return 0;
d50171e4
PA
3529 }
3530
7984d532
PA
3531 gdb_assert (lwp->suspended >= 0);
3532
d50171e4
PA
3533 if (lwp->suspended)
3534 {
3535 if (debug_threads)
3536 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
7984d532 3537 return 0;
d50171e4
PA
3538 }
3539
1a981360
PA
3540 if (thread->last_resume_kind == resume_stop
3541 && lwp->pending_signals_to_report == NULL
3542 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
3543 {
3544 /* We haven't reported this LWP as stopped yet (otherwise, the
3545 last_status.kind check above would catch it, and we wouldn't
3546 reach here. This LWP may have been momentarily paused by a
3547 stop_all_lwps call while handling for example, another LWP's
3548 step-over. In that case, the pending expected SIGSTOP signal
3549 that was queued at vCont;t handling time will have already
3550 been consumed by wait_for_sigstop, and so we need to requeue
3551 another one here. Note that if the LWP already has a SIGSTOP
3552 pending, this is a no-op. */
3553
3554 if (debug_threads)
3555 fprintf (stderr,
3556 "Client wants LWP %ld to stop. "
3557 "Making sure it has a SIGSTOP pending\n",
3558 lwpid_of (lwp));
3559
3560 send_sigstop (lwp);
3561 }
3562
8336d594 3563 step = thread->last_resume_kind == resume_step;
d50171e4 3564 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
3565 return 0;
3566}
3567
3568static int
3569unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3570{
3571 struct lwp_info *lwp = (struct lwp_info *) entry;
3572
3573 if (lwp == except)
3574 return 0;
3575
3576 lwp->suspended--;
3577 gdb_assert (lwp->suspended >= 0);
3578
3579 return proceed_one_lwp (entry, except);
d50171e4
PA
3580}
3581
3582/* When we finish a step-over, set threads running again. If there's
3583 another thread that may need a step-over, now's the time to start
3584 it. Eventually, we'll move all threads past their breakpoints. */
3585
3586static void
3587proceed_all_lwps (void)
3588{
3589 struct lwp_info *need_step_over;
3590
3591 /* If there is a thread which would otherwise be resumed, which is
3592 stopped at a breakpoint that needs stepping over, then don't
3593 resume any threads - have it step over the breakpoint with all
3594 other threads stopped, then resume all threads again. */
3595
3596 if (supports_breakpoints ())
3597 {
3598 need_step_over
3599 = (struct lwp_info *) find_inferior (&all_lwps,
3600 need_step_over_p, NULL);
3601
3602 if (need_step_over != NULL)
3603 {
3604 if (debug_threads)
3605 fprintf (stderr, "proceed_all_lwps: found "
3606 "thread %ld needing a step-over\n",
3607 lwpid_of (need_step_over));
3608
3609 start_step_over (need_step_over);
3610 return;
3611 }
3612 }
5544ad89 3613
d50171e4
PA
3614 if (debug_threads)
3615 fprintf (stderr, "Proceeding, no step-over needed\n");
3616
7984d532 3617 find_inferior (&all_lwps, proceed_one_lwp, NULL);
d50171e4
PA
3618}
3619
3620/* Stopped LWPs that the client wanted to be running, that don't have
3621 pending statuses, are set to run again, except for EXCEPT, if not
3622 NULL. This undoes a stop_all_lwps call. */
3623
3624static void
7984d532 3625unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 3626{
5544ad89
DJ
3627 if (debug_threads)
3628 {
d50171e4
PA
3629 if (except)
3630 fprintf (stderr,
3631 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
5544ad89 3632 else
d50171e4
PA
3633 fprintf (stderr,
3634 "unstopping all lwps\n");
5544ad89
DJ
3635 }
3636
7984d532
PA
3637 if (unsuspend)
3638 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3639 else
3640 find_inferior (&all_lwps, proceed_one_lwp, except);
0d62e5e8
DJ
3641}
3642
3643#ifdef HAVE_LINUX_USRREGS
da6d8c04
DJ
3644
3645int
0a30fbc4 3646register_addr (int regnum)
da6d8c04
DJ
3647{
3648 int addr;
3649
2ec06d2e 3650 if (regnum < 0 || regnum >= the_low_target.num_regs)
da6d8c04
DJ
3651 error ("Invalid register number %d.", regnum);
3652
2ec06d2e 3653 addr = the_low_target.regmap[regnum];
da6d8c04
DJ
3654
3655 return addr;
3656}
3657
58caa3dc 3658/* Fetch one register. */
da6d8c04 3659static void
442ea881 3660fetch_register (struct regcache *regcache, int regno)
da6d8c04
DJ
3661{
3662 CORE_ADDR regaddr;
48d93c75 3663 int i, size;
0d62e5e8 3664 char *buf;
95954743 3665 int pid;
da6d8c04 3666
2ec06d2e 3667 if (regno >= the_low_target.num_regs)
0a30fbc4 3668 return;
2ec06d2e 3669 if ((*the_low_target.cannot_fetch_register) (regno))
0a30fbc4 3670 return;
da6d8c04 3671
0a30fbc4
DJ
3672 regaddr = register_addr (regno);
3673 if (regaddr == -1)
3674 return;
95954743
PA
3675
3676 pid = lwpid_of (get_thread_lwp (current_inferior));
1b3f6016
PA
3677 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3678 & - sizeof (PTRACE_XFER_TYPE));
48d93c75
UW
3679 buf = alloca (size);
3680 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04
DJ
3681 {
3682 errno = 0;
0d62e5e8 3683 *(PTRACE_XFER_TYPE *) (buf + i) =
14ce3065
DE
3684 ptrace (PTRACE_PEEKUSER, pid,
3685 /* Coerce to a uintptr_t first to avoid potential gcc warning
3686 of coercing an 8 byte integer to a 4 byte pointer. */
3687 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
da6d8c04
DJ
3688 regaddr += sizeof (PTRACE_XFER_TYPE);
3689 if (errno != 0)
f52cd8cd 3690 error ("reading register %d: %s", regno, strerror (errno));
da6d8c04 3691 }
ee1a7ae4
UW
3692
3693 if (the_low_target.supply_ptrace_register)
442ea881 3694 the_low_target.supply_ptrace_register (regcache, regno, buf);
5a1f5858 3695 else
442ea881 3696 supply_register (regcache, regno, buf);
da6d8c04
DJ
3697}
3698
3699/* Fetch all registers, or just one, from the child process. */
58caa3dc 3700static void
442ea881 3701usr_fetch_inferior_registers (struct regcache *regcache, int regno)
da6d8c04 3702{
4463ce24 3703 if (regno == -1)
2ec06d2e 3704 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3705 fetch_register (regcache, regno);
da6d8c04 3706 else
442ea881 3707 fetch_register (regcache, regno);
da6d8c04
DJ
3708}
3709
3710/* Store our register values back into the inferior.
3711 If REGNO is -1, do this for all registers.
3712 Otherwise, REGNO specifies which register (so we can save time). */
58caa3dc 3713static void
442ea881 3714usr_store_inferior_registers (struct regcache *regcache, int regno)
da6d8c04
DJ
3715{
3716 CORE_ADDR regaddr;
48d93c75 3717 int i, size;
0d62e5e8 3718 char *buf;
55ac2b99 3719 int pid;
da6d8c04
DJ
3720
3721 if (regno >= 0)
3722 {
2ec06d2e 3723 if (regno >= the_low_target.num_regs)
0a30fbc4
DJ
3724 return;
3725
bc1e36ca 3726 if ((*the_low_target.cannot_store_register) (regno) == 1)
0a30fbc4
DJ
3727 return;
3728
3729 regaddr = register_addr (regno);
3730 if (regaddr == -1)
da6d8c04 3731 return;
da6d8c04 3732 errno = 0;
48d93c75
UW
3733 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3734 & - sizeof (PTRACE_XFER_TYPE);
3735 buf = alloca (size);
3736 memset (buf, 0, size);
ee1a7ae4
UW
3737
3738 if (the_low_target.collect_ptrace_register)
442ea881 3739 the_low_target.collect_ptrace_register (regcache, regno, buf);
5a1f5858 3740 else
442ea881 3741 collect_register (regcache, regno, buf);
ee1a7ae4 3742
95954743 3743 pid = lwpid_of (get_thread_lwp (current_inferior));
48d93c75 3744 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
da6d8c04 3745 {
0a30fbc4 3746 errno = 0;
14ce3065
DE
3747 ptrace (PTRACE_POKEUSER, pid,
3748 /* Coerce to a uintptr_t first to avoid potential gcc warning
3749 about coercing an 8 byte integer to a 4 byte pointer. */
3750 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3751 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
da6d8c04
DJ
3752 if (errno != 0)
3753 {
1b3f6016
PA
3754 /* At this point, ESRCH should mean the process is
3755 already gone, in which case we simply ignore attempts
3756 to change its registers. See also the related
3757 comment in linux_resume_one_lwp. */
3221518c
UW
3758 if (errno == ESRCH)
3759 return;
3760
bc1e36ca 3761 if ((*the_low_target.cannot_store_register) (regno) == 0)
f52cd8cd 3762 error ("writing register %d: %s", regno, strerror (errno));
da6d8c04 3763 }
2ff29de4 3764 regaddr += sizeof (PTRACE_XFER_TYPE);
da6d8c04 3765 }
da6d8c04
DJ
3766 }
3767 else
2ec06d2e 3768 for (regno = 0; regno < the_low_target.num_regs; regno++)
442ea881 3769 usr_store_inferior_registers (regcache, regno);
da6d8c04 3770}
58caa3dc
DJ
3771#endif /* HAVE_LINUX_USRREGS */
3772
3773
3774
3775#ifdef HAVE_LINUX_REGSETS
3776
3777static int
442ea881 3778regsets_fetch_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3779{
3780 struct regset_info *regset;
e9d25b98 3781 int saw_general_regs = 0;
95954743 3782 int pid;
1570b33e 3783 struct iovec iov;
58caa3dc
DJ
3784
3785 regset = target_regsets;
3786
95954743 3787 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3788 while (regset->size >= 0)
3789 {
1570b33e
L
3790 void *buf, *data;
3791 int nt_type, res;
58caa3dc 3792
52fa2412 3793 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3794 {
3795 regset ++;
3796 continue;
3797 }
3798
bca929d3 3799 buf = xmalloc (regset->size);
1570b33e
L
3800
3801 nt_type = regset->nt_type;
3802 if (nt_type)
3803 {
3804 iov.iov_base = buf;
3805 iov.iov_len = regset->size;
3806 data = (void *) &iov;
3807 }
3808 else
3809 data = buf;
3810
dfb64f85 3811#ifndef __sparc__
1570b33e 3812 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3813#else
1570b33e 3814 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 3815#endif
58caa3dc
DJ
3816 if (res < 0)
3817 {
3818 if (errno == EIO)
3819 {
52fa2412
UW
3820 /* If we get EIO on a regset, do not try it again for
3821 this process. */
3822 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3823 free (buf);
52fa2412 3824 continue;
58caa3dc
DJ
3825 }
3826 else
3827 {
0d62e5e8 3828 char s[256];
95954743
PA
3829 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3830 pid);
0d62e5e8 3831 perror (s);
58caa3dc
DJ
3832 }
3833 }
e9d25b98
DJ
3834 else if (regset->type == GENERAL_REGS)
3835 saw_general_regs = 1;
442ea881 3836 regset->store_function (regcache, buf);
58caa3dc 3837 regset ++;
fdeb2a12 3838 free (buf);
58caa3dc 3839 }
e9d25b98
DJ
3840 if (saw_general_regs)
3841 return 0;
3842 else
3843 return 1;
58caa3dc
DJ
3844}
3845
3846static int
442ea881 3847regsets_store_inferior_registers (struct regcache *regcache)
58caa3dc
DJ
3848{
3849 struct regset_info *regset;
e9d25b98 3850 int saw_general_regs = 0;
95954743 3851 int pid;
1570b33e 3852 struct iovec iov;
58caa3dc
DJ
3853
3854 regset = target_regsets;
3855
95954743 3856 pid = lwpid_of (get_thread_lwp (current_inferior));
58caa3dc
DJ
3857 while (regset->size >= 0)
3858 {
1570b33e
L
3859 void *buf, *data;
3860 int nt_type, res;
58caa3dc 3861
52fa2412 3862 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
58caa3dc
DJ
3863 {
3864 regset ++;
3865 continue;
3866 }
3867
bca929d3 3868 buf = xmalloc (regset->size);
545587ee
DJ
3869
3870 /* First fill the buffer with the current register set contents,
3871 in case there are any items in the kernel's regset that are
3872 not in gdbserver's regcache. */
1570b33e
L
3873
3874 nt_type = regset->nt_type;
3875 if (nt_type)
3876 {
3877 iov.iov_base = buf;
3878 iov.iov_len = regset->size;
3879 data = (void *) &iov;
3880 }
3881 else
3882 data = buf;
3883
dfb64f85 3884#ifndef __sparc__
1570b33e 3885 res = ptrace (regset->get_request, pid, nt_type, data);
dfb64f85 3886#else
1570b33e 3887 res = ptrace (regset->get_request, pid, &iov, data);
dfb64f85 3888#endif
545587ee
DJ
3889
3890 if (res == 0)
3891 {
3892 /* Then overlay our cached registers on that. */
442ea881 3893 regset->fill_function (regcache, buf);
545587ee
DJ
3894
3895 /* Only now do we write the register set. */
dfb64f85 3896#ifndef __sparc__
1570b33e 3897 res = ptrace (regset->set_request, pid, nt_type, data);
dfb64f85 3898#else
1570b33e 3899 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 3900#endif
545587ee
DJ
3901 }
3902
58caa3dc
DJ
3903 if (res < 0)
3904 {
3905 if (errno == EIO)
3906 {
52fa2412
UW
3907 /* If we get EIO on a regset, do not try it again for
3908 this process. */
3909 disabled_regsets[regset - target_regsets] = 1;
fdeb2a12 3910 free (buf);
52fa2412 3911 continue;
58caa3dc 3912 }
3221518c
UW
3913 else if (errno == ESRCH)
3914 {
1b3f6016
PA
3915 /* At this point, ESRCH should mean the process is
3916 already gone, in which case we simply ignore attempts
3917 to change its registers. See also the related
3918 comment in linux_resume_one_lwp. */
fdeb2a12 3919 free (buf);
3221518c
UW
3920 return 0;
3921 }
58caa3dc
DJ
3922 else
3923 {
ce3a066d 3924 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
3925 }
3926 }
e9d25b98
DJ
3927 else if (regset->type == GENERAL_REGS)
3928 saw_general_regs = 1;
58caa3dc 3929 regset ++;
09ec9b38 3930 free (buf);
58caa3dc 3931 }
e9d25b98
DJ
3932 if (saw_general_regs)
3933 return 0;
3934 else
3935 return 1;
ce3a066d 3936 return 0;
58caa3dc
DJ
3937}
3938
3939#endif /* HAVE_LINUX_REGSETS */
3940
3941
3942void
442ea881 3943linux_fetch_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3944{
3945#ifdef HAVE_LINUX_REGSETS
442ea881 3946 if (regsets_fetch_inferior_registers (regcache) == 0)
52fa2412 3947 return;
58caa3dc
DJ
3948#endif
3949#ifdef HAVE_LINUX_USRREGS
442ea881 3950 usr_fetch_inferior_registers (regcache, regno);
58caa3dc
DJ
3951#endif
3952}
3953
3954void
442ea881 3955linux_store_registers (struct regcache *regcache, int regno)
58caa3dc
DJ
3956{
3957#ifdef HAVE_LINUX_REGSETS
442ea881 3958 if (regsets_store_inferior_registers (regcache) == 0)
52fa2412 3959 return;
58caa3dc
DJ
3960#endif
3961#ifdef HAVE_LINUX_USRREGS
442ea881 3962 usr_store_inferior_registers (regcache, regno);
58caa3dc
DJ
3963#endif
3964}
3965
da6d8c04 3966
da6d8c04
DJ
3967/* Copy LEN bytes from inferior's memory starting at MEMADDR
3968 to debugger memory starting at MYADDR. */
3969
c3e735a6 3970static int
f450004a 3971linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04
DJ
3972{
3973 register int i;
3974 /* Round starting address down to longword boundary. */
3975 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3976 /* Round ending address up; get number of longwords that makes. */
aa691b87
RM
3977 register int count
3978 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
da6d8c04
DJ
3979 / sizeof (PTRACE_XFER_TYPE);
3980 /* Allocate buffer of that many longwords. */
aa691b87 3981 register PTRACE_XFER_TYPE *buffer
da6d8c04 3982 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
fd462a61
DJ
3983 int fd;
3984 char filename[64];
95954743 3985 int pid = lwpid_of (get_thread_lwp (current_inferior));
fd462a61
DJ
3986
3987 /* Try using /proc. Don't bother for one word. */
3988 if (len >= 3 * sizeof (long))
3989 {
3990 /* We could keep this file open and cache it - possibly one per
3991 thread. That requires some juggling, but is even faster. */
95954743 3992 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
3993 fd = open (filename, O_RDONLY | O_LARGEFILE);
3994 if (fd == -1)
3995 goto no_proc;
3996
3997 /* If pread64 is available, use it. It's faster if the kernel
3998 supports it (only one syscall), and it's 64-bit safe even on
3999 32-bit platforms (for instance, SPARC debugging a SPARC64
4000 application). */
4001#ifdef HAVE_PREAD64
4002 if (pread64 (fd, myaddr, len, memaddr) != len)
4003#else
1de1badb 4004 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
fd462a61
DJ
4005#endif
4006 {
4007 close (fd);
4008 goto no_proc;
4009 }
4010
4011 close (fd);
4012 return 0;
4013 }
da6d8c04 4014
fd462a61 4015 no_proc:
da6d8c04
DJ
4016 /* Read all the longwords */
4017 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4018 {
c3e735a6 4019 errno = 0;
14ce3065
DE
4020 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4021 about coercing an 8 byte integer to a 4 byte pointer. */
4022 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4023 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
c3e735a6
DJ
4024 if (errno)
4025 return errno;
da6d8c04
DJ
4026 }
4027
4028 /* Copy appropriate bytes out of the buffer. */
1b3f6016
PA
4029 memcpy (myaddr,
4030 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4031 len);
c3e735a6
DJ
4032
4033 return 0;
da6d8c04
DJ
4034}
4035
93ae6fdc
PA
4036/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4037 memory at MEMADDR. On failure (cannot write to the inferior)
da6d8c04
DJ
4038 returns the value of errno. */
4039
ce3a066d 4040static int
f450004a 4041linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4042{
4043 register int i;
4044 /* Round starting address down to longword boundary. */
4045 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4046 /* Round ending address up; get number of longwords that makes. */
4047 register int count
4048 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
4049 /* Allocate buffer of that many longwords. */
4050 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
95954743 4051 int pid = lwpid_of (get_thread_lwp (current_inferior));
da6d8c04 4052
0d62e5e8
DJ
4053 if (debug_threads)
4054 {
58d6951d
DJ
4055 /* Dump up to four bytes. */
4056 unsigned int val = * (unsigned int *) myaddr;
4057 if (len == 1)
4058 val = val & 0xff;
4059 else if (len == 2)
4060 val = val & 0xffff;
4061 else if (len == 3)
4062 val = val & 0xffffff;
4063 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4064 val, (long)memaddr);
0d62e5e8
DJ
4065 }
4066
da6d8c04
DJ
4067 /* Fill start and end extra bytes of buffer with existing memory data. */
4068
93ae6fdc 4069 errno = 0;
14ce3065
DE
4070 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4071 about coercing an 8 byte integer to a 4 byte pointer. */
4072 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4073 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
93ae6fdc
PA
4074 if (errno)
4075 return errno;
da6d8c04
DJ
4076
4077 if (count > 1)
4078 {
93ae6fdc 4079 errno = 0;
da6d8c04 4080 buffer[count - 1]
95954743 4081 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4082 /* Coerce to a uintptr_t first to avoid potential gcc warning
4083 about coercing an 8 byte integer to a 4 byte pointer. */
4084 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4085 * sizeof (PTRACE_XFER_TYPE)),
d844cde6 4086 0);
93ae6fdc
PA
4087 if (errno)
4088 return errno;
da6d8c04
DJ
4089 }
4090
93ae6fdc 4091 /* Copy data to be written over corresponding part of buffer. */
da6d8c04
DJ
4092
4093 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
4094
4095 /* Write the entire buffer. */
4096
4097 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4098 {
4099 errno = 0;
14ce3065
DE
4100 ptrace (PTRACE_POKETEXT, pid,
4101 /* Coerce to a uintptr_t first to avoid potential gcc warning
4102 about coercing an 8 byte integer to a 4 byte pointer. */
4103 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4104 (PTRACE_ARG4_TYPE) buffer[i]);
da6d8c04
DJ
4105 if (errno)
4106 return errno;
4107 }
4108
4109 return 0;
4110}
2f2893d9 4111
6076632b 4112/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
24a09b5f
DJ
4113static int linux_supports_tracefork_flag;
4114
1e7fc18c
PA
4115static void
4116linux_enable_event_reporting (int pid)
4117{
4118 if (!linux_supports_tracefork_flag)
4119 return;
4120
4121 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4122}
4123
51c2684e 4124/* Helper functions for linux_test_for_tracefork, called via clone (). */
24a09b5f 4125
51c2684e
DJ
4126static int
4127linux_tracefork_grandchild (void *arg)
4128{
4129 _exit (0);
4130}
4131
7407e2de
AS
4132#define STACK_SIZE 4096
4133
51c2684e
DJ
4134static int
4135linux_tracefork_child (void *arg)
24a09b5f
DJ
4136{
4137 ptrace (PTRACE_TRACEME, 0, 0, 0);
4138 kill (getpid (), SIGSTOP);
e4b7f41c
JK
4139
4140#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4141
4142 if (fork () == 0)
4143 linux_tracefork_grandchild (NULL);
4144
4145#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4146
7407e2de
AS
4147#ifdef __ia64__
4148 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4149 CLONE_VM | SIGCHLD, NULL);
4150#else
4151 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
4152 CLONE_VM | SIGCHLD, NULL);
4153#endif
e4b7f41c
JK
4154
4155#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4156
24a09b5f
DJ
4157 _exit (0);
4158}
4159
24a09b5f
DJ
4160/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4161 sure that we can enable the option, and that it had the desired
4162 effect. */
4163
4164static void
4165linux_test_for_tracefork (void)
4166{
4167 int child_pid, ret, status;
4168 long second_pid;
e4b7f41c 4169#if defined(__UCLIBC__) && defined(HAS_NOMMU)
bca929d3 4170 char *stack = xmalloc (STACK_SIZE * 4);
e4b7f41c 4171#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4172
4173 linux_supports_tracefork_flag = 0;
4174
e4b7f41c
JK
4175#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4176
4177 child_pid = fork ();
4178 if (child_pid == 0)
4179 linux_tracefork_child (NULL);
4180
4181#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4182
51c2684e 4183 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
7407e2de
AS
4184#ifdef __ia64__
4185 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4186 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c 4187#else /* !__ia64__ */
7407e2de
AS
4188 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4189 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
e4b7f41c
JK
4190#endif /* !__ia64__ */
4191
4192#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4193
24a09b5f 4194 if (child_pid == -1)
51c2684e 4195 perror_with_name ("clone");
24a09b5f
DJ
4196
4197 ret = my_waitpid (child_pid, &status, 0);
4198 if (ret == -1)
4199 perror_with_name ("waitpid");
4200 else if (ret != child_pid)
4201 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4202 if (! WIFSTOPPED (status))
4203 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4204
14ce3065
DE
4205 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4206 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
24a09b5f
DJ
4207 if (ret != 0)
4208 {
4209 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4210 if (ret != 0)
4211 {
4212 warning ("linux_test_for_tracefork: failed to kill child");
4213 return;
4214 }
4215
4216 ret = my_waitpid (child_pid, &status, 0);
4217 if (ret != child_pid)
4218 warning ("linux_test_for_tracefork: failed to wait for killed child");
4219 else if (!WIFSIGNALED (status))
4220 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4221 "killed child", status);
4222
4223 return;
4224 }
4225
4226 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4227 if (ret != 0)
4228 warning ("linux_test_for_tracefork: failed to resume child");
4229
4230 ret = my_waitpid (child_pid, &status, 0);
4231
4232 if (ret == child_pid && WIFSTOPPED (status)
4233 && status >> 16 == PTRACE_EVENT_FORK)
4234 {
4235 second_pid = 0;
4236 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4237 if (ret == 0 && second_pid != 0)
4238 {
4239 int second_status;
4240
4241 linux_supports_tracefork_flag = 1;
4242 my_waitpid (second_pid, &second_status, 0);
4243 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4244 if (ret != 0)
4245 warning ("linux_test_for_tracefork: failed to kill second child");
4246 my_waitpid (second_pid, &status, 0);
4247 }
4248 }
4249 else
4250 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4251 "(%d, status 0x%x)", ret, status);
4252
4253 do
4254 {
4255 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4256 if (ret != 0)
4257 warning ("linux_test_for_tracefork: failed to kill child");
4258 my_waitpid (child_pid, &status, 0);
4259 }
4260 while (WIFSTOPPED (status));
51c2684e 4261
e4b7f41c 4262#if defined(__UCLIBC__) && defined(HAS_NOMMU)
51c2684e 4263 free (stack);
e4b7f41c 4264#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
24a09b5f
DJ
4265}
4266
4267
2f2893d9
DJ
4268static void
4269linux_look_up_symbols (void)
4270{
0d62e5e8 4271#ifdef USE_THREAD_DB
95954743
PA
4272 struct process_info *proc = current_process ();
4273
cdbfd419 4274 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4275 return;
4276
6076632b
DE
4277 /* If the kernel supports tracing forks then it also supports tracing
4278 clones, and then we don't need to use the magic thread event breakpoint
4279 to learn about threads. */
cdbfd419 4280 thread_db_init (!linux_supports_tracefork_flag);
0d62e5e8
DJ
4281#endif
4282}
4283
e5379b03 4284static void
ef57601b 4285linux_request_interrupt (void)
e5379b03 4286{
a1928bad 4287 extern unsigned long signal_pid;
e5379b03 4288
95954743
PA
4289 if (!ptid_equal (cont_thread, null_ptid)
4290 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4291 {
54a0b537 4292 struct lwp_info *lwp;
bd99dc85 4293 int lwpid;
e5379b03 4294
54a0b537 4295 lwp = get_thread_lwp (current_inferior);
bd99dc85
PA
4296 lwpid = lwpid_of (lwp);
4297 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4298 }
4299 else
ef57601b 4300 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4301}
4302
aa691b87
RM
4303/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4304 to debugger memory starting at MYADDR. */
4305
4306static int
f450004a 4307linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4308{
4309 char filename[PATH_MAX];
4310 int fd, n;
95954743 4311 int pid = lwpid_of (get_thread_lwp (current_inferior));
aa691b87 4312
95954743 4313 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4314
4315 fd = open (filename, O_RDONLY);
4316 if (fd < 0)
4317 return -1;
4318
4319 if (offset != (CORE_ADDR) 0
4320 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4321 n = -1;
4322 else
4323 n = read (fd, myaddr, len);
4324
4325 close (fd);
4326
4327 return n;
4328}
4329
d993e290
PA
4330/* These breakpoint and watchpoint related wrapper functions simply
4331 pass on the function call if the target has registered a
4332 corresponding function. */
e013ee27
OF
4333
4334static int
d993e290 4335linux_insert_point (char type, CORE_ADDR addr, int len)
e013ee27 4336{
d993e290
PA
4337 if (the_low_target.insert_point != NULL)
4338 return the_low_target.insert_point (type, addr, len);
e013ee27
OF
4339 else
4340 /* Unsupported (see target.h). */
4341 return 1;
4342}
4343
4344static int
d993e290 4345linux_remove_point (char type, CORE_ADDR addr, int len)
e013ee27 4346{
d993e290
PA
4347 if (the_low_target.remove_point != NULL)
4348 return the_low_target.remove_point (type, addr, len);
e013ee27
OF
4349 else
4350 /* Unsupported (see target.h). */
4351 return 1;
4352}
4353
4354static int
4355linux_stopped_by_watchpoint (void)
4356{
c3adc08c
PA
4357 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4358
4359 return lwp->stopped_by_watchpoint;
e013ee27
OF
4360}
4361
4362static CORE_ADDR
4363linux_stopped_data_address (void)
4364{
c3adc08c
PA
4365 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4366
4367 return lwp->stopped_data_address;
e013ee27
OF
4368}
4369
42c81e2a 4370#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
4371#if defined(__mcoldfire__)
4372/* These should really be defined in the kernel's ptrace.h header. */
4373#define PT_TEXT_ADDR 49*4
4374#define PT_DATA_ADDR 50*4
4375#define PT_TEXT_END_ADDR 51*4
4376#endif
4377
4378/* Under uClinux, programs are loaded at non-zero offsets, which we need
4379 to tell gdb about. */
4380
4381static int
4382linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4383{
4384#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4385 unsigned long text, text_end, data;
bd99dc85 4386 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4387
4388 errno = 0;
4389
4390 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4391 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4392 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4393
4394 if (errno == 0)
4395 {
4396 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4397 used by gdb) are relative to the beginning of the program,
4398 with the data segment immediately following the text segment.
4399 However, the actual runtime layout in memory may put the data
4400 somewhere else, so when we send gdb a data base-address, we
4401 use the real data base address and subtract the compile-time
4402 data base-address from it (which is just the length of the
4403 text segment). BSS immediately follows data in both
4404 cases. */
52fb6437
NS
4405 *text_p = text;
4406 *data_p = data - (text_end - text);
1b3f6016 4407
52fb6437
NS
4408 return 1;
4409 }
4410#endif
4411 return 0;
4412}
4413#endif
4414
dc146f7c
VP
4415static int
4416compare_ints (const void *xa, const void *xb)
4417{
4418 int a = *(const int *)xa;
4419 int b = *(const int *)xb;
4420
4421 return a - b;
4422}
4423
4424static int *
4425unique (int *b, int *e)
4426{
4427 int *d = b;
4428 while (++b != e)
4429 if (*d != *b)
4430 *++d = *b;
4431 return ++d;
4432}
4433
4434/* Given PID, iterates over all threads in that process.
4435
4436 Information about each thread, in a format suitable for qXfer:osdata:thread
4437 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
4438 initialized, and the caller is responsible for finishing and appending '\0'
4439 to it.
4440
4441 The list of cores that threads are running on is assigned to *CORES, if it
4442 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
4443 should free *CORES. */
4444
4445static void
4446list_threads (int pid, struct buffer *buffer, char **cores)
4447{
4448 int count = 0;
4449 int allocated = 10;
4450 int *core_numbers = xmalloc (sizeof (int) * allocated);
4451 char pathname[128];
4452 DIR *dir;
4453 struct dirent *dp;
4454 struct stat statbuf;
4455
4456 sprintf (pathname, "/proc/%d/task", pid);
4457 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
4458 {
4459 dir = opendir (pathname);
4460 if (!dir)
4461 {
4462 free (core_numbers);
4463 return;
4464 }
4465
4466 while ((dp = readdir (dir)) != NULL)
4467 {
4468 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
4469
4470 if (lwp != 0)
4471 {
4472 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
4473
4474 if (core != -1)
4475 {
4476 char s[sizeof ("4294967295")];
4477 sprintf (s, "%u", core);
4478
4479 if (count == allocated)
4480 {
4481 allocated *= 2;
4482 core_numbers = realloc (core_numbers,
4483 sizeof (int) * allocated);
4484 }
4485 core_numbers[count++] = core;
4486 if (buffer)
4487 buffer_xml_printf (buffer,
4488 "<item>"
4489 "<column name=\"pid\">%d</column>"
4490 "<column name=\"tid\">%s</column>"
4491 "<column name=\"core\">%s</column>"
4492 "</item>", pid, dp->d_name, s);
4493 }
4494 else
4495 {
4496 if (buffer)
4497 buffer_xml_printf (buffer,
4498 "<item>"
4499 "<column name=\"pid\">%d</column>"
4500 "<column name=\"tid\">%s</column>"
4501 "</item>", pid, dp->d_name);
4502 }
4503 }
4504 }
4505 }
4506
4507 if (cores)
4508 {
4509 *cores = NULL;
4510 if (count > 0)
4511 {
4512 struct buffer buffer2;
4513 int *b;
4514 int *e;
4515 qsort (core_numbers, count, sizeof (int), compare_ints);
4516
4517 /* Remove duplicates. */
4518 b = core_numbers;
4519 e = unique (b, core_numbers + count);
4520
4521 buffer_init (&buffer2);
4522
4523 for (b = core_numbers; b != e; ++b)
4524 {
4525 char number[sizeof ("4294967295")];
4526 sprintf (number, "%u", *b);
4527 buffer_xml_printf (&buffer2, "%s%s",
4528 (b == core_numbers) ? "" : ",", number);
4529 }
4530 buffer_grow_str0 (&buffer2, "");
4531
4532 *cores = buffer_finish (&buffer2);
4533 }
4534 }
4535 free (core_numbers);
4536}
4537
4538static void
4539show_process (int pid, const char *username, struct buffer *buffer)
4540{
4541 char pathname[128];
4542 FILE *f;
4543 char cmd[MAXPATHLEN + 1];
4544
4545 sprintf (pathname, "/proc/%d/cmdline", pid);
4546
4547 if ((f = fopen (pathname, "r")) != NULL)
4548 {
4549 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4550 if (len > 0)
4551 {
4552 char *cores = 0;
4553 int i;
4554 for (i = 0; i < len; i++)
4555 if (cmd[i] == '\0')
4556 cmd[i] = ' ';
4557 cmd[len] = '\0';
4558
4559 buffer_xml_printf (buffer,
4560 "<item>"
4561 "<column name=\"pid\">%d</column>"
4562 "<column name=\"user\">%s</column>"
4563 "<column name=\"command\">%s</column>",
4564 pid,
4565 username,
4566 cmd);
4567
4568 /* This only collects core numbers, and does not print threads. */
4569 list_threads (pid, NULL, &cores);
4570
4571 if (cores)
4572 {
4573 buffer_xml_printf (buffer,
4574 "<column name=\"cores\">%s</column>", cores);
4575 free (cores);
4576 }
4577
4578 buffer_xml_printf (buffer, "</item>");
4579 }
4580 fclose (f);
4581 }
4582}
4583
07e059b5
VP
4584static int
4585linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4586 unsigned char *readbuf, unsigned const char *writebuf,
4587 CORE_ADDR offset, int len)
07e059b5
VP
4588{
4589 /* We make the process list snapshot when the object starts to be
4590 read. */
4591 static const char *buf;
4592 static long len_avail = -1;
4593 static struct buffer buffer;
dc146f7c
VP
4594 int processes = 0;
4595 int threads = 0;
07e059b5
VP
4596
4597 DIR *dirp;
4598
dc146f7c
VP
4599 if (strcmp (annex, "processes") == 0)
4600 processes = 1;
4601 else if (strcmp (annex, "threads") == 0)
4602 threads = 1;
4603 else
07e059b5
VP
4604 return 0;
4605
4606 if (!readbuf || writebuf)
4607 return 0;
4608
4609 if (offset == 0)
4610 {
4611 if (len_avail != -1 && len_avail != 0)
4612 buffer_free (&buffer);
4613 len_avail = 0;
4614 buf = NULL;
4615 buffer_init (&buffer);
dc146f7c
VP
4616 if (processes)
4617 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
4618 else if (threads)
4619 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
07e059b5
VP
4620
4621 dirp = opendir ("/proc");
4622 if (dirp)
4623 {
1b3f6016
PA
4624 struct dirent *dp;
4625 while ((dp = readdir (dirp)) != NULL)
4626 {
4627 struct stat statbuf;
4628 char procentry[sizeof ("/proc/4294967295")];
4629
4630 if (!isdigit (dp->d_name[0])
4631 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4632 continue;
4633
4634 sprintf (procentry, "/proc/%s", dp->d_name);
4635 if (stat (procentry, &statbuf) == 0
4636 && S_ISDIR (statbuf.st_mode))
4637 {
dc146f7c 4638 int pid = (int) strtoul (dp->d_name, NULL, 10);
1b3f6016 4639
dc146f7c 4640 if (processes)
1b3f6016 4641 {
dc146f7c
VP
4642 struct passwd *entry = getpwuid (statbuf.st_uid);
4643 show_process (pid, entry ? entry->pw_name : "?", &buffer);
4644 }
4645 else if (threads)
4646 {
4647 list_threads (pid, &buffer, NULL);
1b3f6016
PA
4648 }
4649 }
4650 }
07e059b5 4651
1b3f6016 4652 closedir (dirp);
07e059b5
VP
4653 }
4654 buffer_grow_str0 (&buffer, "</osdata>\n");
4655 buf = buffer_finish (&buffer);
4656 len_avail = strlen (buf);
4657 }
4658
4659 if (offset >= len_avail)
4660 {
4661 /* Done. Get rid of the data. */
4662 buffer_free (&buffer);
4663 buf = NULL;
4664 len_avail = 0;
4665 return 0;
4666 }
4667
4668 if (len > len_avail - offset)
4669 len = len_avail - offset;
4670 memcpy (readbuf, buf + offset, len);
4671
4672 return len;
4673}
4674
d0722149
DE
4675/* Convert a native/host siginfo object, into/from the siginfo in the
4676 layout of the inferiors' architecture. */
4677
4678static void
4679siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4680{
4681 int done = 0;
4682
4683 if (the_low_target.siginfo_fixup != NULL)
4684 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4685
4686 /* If there was no callback, or the callback didn't do anything,
4687 then just do a straight memcpy. */
4688 if (!done)
4689 {
4690 if (direction == 1)
4691 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4692 else
4693 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4694 }
4695}
4696
4aa995e1
PA
4697static int
4698linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4699 unsigned const char *writebuf, CORE_ADDR offset, int len)
4700{
d0722149 4701 int pid;
4aa995e1 4702 struct siginfo siginfo;
d0722149 4703 char inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4704
4705 if (current_inferior == NULL)
4706 return -1;
4707
bd99dc85 4708 pid = lwpid_of (get_thread_lwp (current_inferior));
4aa995e1
PA
4709
4710 if (debug_threads)
d0722149 4711 fprintf (stderr, "%s siginfo for lwp %d.\n",
4aa995e1
PA
4712 readbuf != NULL ? "Reading" : "Writing",
4713 pid);
4714
4715 if (offset > sizeof (siginfo))
4716 return -1;
4717
4718 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4719 return -1;
4720
d0722149
DE
4721 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4722 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4723 inferior with a 64-bit GDBSERVER should look the same as debugging it
4724 with a 32-bit GDBSERVER, we need to convert it. */
4725 siginfo_fixup (&siginfo, inf_siginfo, 0);
4726
4aa995e1
PA
4727 if (offset + len > sizeof (siginfo))
4728 len = sizeof (siginfo) - offset;
4729
4730 if (readbuf != NULL)
d0722149 4731 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4732 else
4733 {
d0722149
DE
4734 memcpy (inf_siginfo + offset, writebuf, len);
4735
4736 /* Convert back to ptrace layout before flushing it out. */
4737 siginfo_fixup (&siginfo, inf_siginfo, 1);
4738
4aa995e1
PA
4739 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4740 return -1;
4741 }
4742
4743 return len;
4744}
4745
bd99dc85
PA
4746/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4747 so we notice when children change state; as the handler for the
4748 sigsuspend in my_waitpid. */
4749
4750static void
4751sigchld_handler (int signo)
4752{
4753 int old_errno = errno;
4754
4755 if (debug_threads)
e581f2b4
PA
4756 {
4757 do
4758 {
4759 /* fprintf is not async-signal-safe, so call write
4760 directly. */
4761 if (write (2, "sigchld_handler\n",
4762 sizeof ("sigchld_handler\n") - 1) < 0)
4763 break; /* just ignore */
4764 } while (0);
4765 }
bd99dc85
PA
4766
4767 if (target_is_async_p ())
4768 async_file_mark (); /* trigger a linux_wait */
4769
4770 errno = old_errno;
4771}
4772
4773static int
4774linux_supports_non_stop (void)
4775{
4776 return 1;
4777}
4778
4779static int
4780linux_async (int enable)
4781{
4782 int previous = (linux_event_pipe[0] != -1);
4783
8336d594
PA
4784 if (debug_threads)
4785 fprintf (stderr, "linux_async (%d), previous=%d\n",
4786 enable, previous);
4787
bd99dc85
PA
4788 if (previous != enable)
4789 {
4790 sigset_t mask;
4791 sigemptyset (&mask);
4792 sigaddset (&mask, SIGCHLD);
4793
4794 sigprocmask (SIG_BLOCK, &mask, NULL);
4795
4796 if (enable)
4797 {
4798 if (pipe (linux_event_pipe) == -1)
4799 fatal ("creating event pipe failed.");
4800
4801 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4802 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4803
4804 /* Register the event loop handler. */
4805 add_file_handler (linux_event_pipe[0],
4806 handle_target_event, NULL);
4807
4808 /* Always trigger a linux_wait. */
4809 async_file_mark ();
4810 }
4811 else
4812 {
4813 delete_file_handler (linux_event_pipe[0]);
4814
4815 close (linux_event_pipe[0]);
4816 close (linux_event_pipe[1]);
4817 linux_event_pipe[0] = -1;
4818 linux_event_pipe[1] = -1;
4819 }
4820
4821 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4822 }
4823
4824 return previous;
4825}
4826
4827static int
4828linux_start_non_stop (int nonstop)
4829{
4830 /* Register or unregister from event-loop accordingly. */
4831 linux_async (nonstop);
4832 return 0;
4833}
4834
cf8fd78b
PA
4835static int
4836linux_supports_multi_process (void)
4837{
4838 return 1;
4839}
4840
efcbbd14
UW
4841
4842/* Enumerate spufs IDs for process PID. */
4843static int
4844spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4845{
4846 int pos = 0;
4847 int written = 0;
4848 char path[128];
4849 DIR *dir;
4850 struct dirent *entry;
4851
4852 sprintf (path, "/proc/%ld/fd", pid);
4853 dir = opendir (path);
4854 if (!dir)
4855 return -1;
4856
4857 rewinddir (dir);
4858 while ((entry = readdir (dir)) != NULL)
4859 {
4860 struct stat st;
4861 struct statfs stfs;
4862 int fd;
4863
4864 fd = atoi (entry->d_name);
4865 if (!fd)
4866 continue;
4867
4868 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4869 if (stat (path, &st) != 0)
4870 continue;
4871 if (!S_ISDIR (st.st_mode))
4872 continue;
4873
4874 if (statfs (path, &stfs) != 0)
4875 continue;
4876 if (stfs.f_type != SPUFS_MAGIC)
4877 continue;
4878
4879 if (pos >= offset && pos + 4 <= offset + len)
4880 {
4881 *(unsigned int *)(buf + pos - offset) = fd;
4882 written += 4;
4883 }
4884 pos += 4;
4885 }
4886
4887 closedir (dir);
4888 return written;
4889}
4890
4891/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4892 object type, using the /proc file system. */
4893static int
4894linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4895 unsigned const char *writebuf,
4896 CORE_ADDR offset, int len)
4897{
4898 long pid = lwpid_of (get_thread_lwp (current_inferior));
4899 char buf[128];
4900 int fd = 0;
4901 int ret = 0;
4902
4903 if (!writebuf && !readbuf)
4904 return -1;
4905
4906 if (!*annex)
4907 {
4908 if (!readbuf)
4909 return -1;
4910 else
4911 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4912 }
4913
4914 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4915 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4916 if (fd <= 0)
4917 return -1;
4918
4919 if (offset != 0
4920 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4921 {
4922 close (fd);
4923 return 0;
4924 }
4925
4926 if (writebuf)
4927 ret = write (fd, writebuf, (size_t) len);
4928 else
4929 ret = read (fd, readbuf, (size_t) len);
4930
4931 close (fd);
4932 return ret;
4933}
4934
dc146f7c
VP
4935static int
4936linux_core_of_thread (ptid_t ptid)
4937{
4938 char filename[sizeof ("/proc//task//stat")
4939 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4940 + 1];
4941 FILE *f;
4942 char *content = NULL;
4943 char *p;
4944 char *ts = 0;
4945 int content_read = 0;
4946 int i;
4947 int core;
4948
4949 sprintf (filename, "/proc/%d/task/%ld/stat",
4950 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4951 f = fopen (filename, "r");
4952 if (!f)
4953 return -1;
4954
4955 for (;;)
4956 {
4957 int n;
4958 content = realloc (content, content_read + 1024);
4959 n = fread (content + content_read, 1, 1024, f);
4960 content_read += n;
4961 if (n < 1024)
4962 {
4963 content[content_read] = '\0';
4964 break;
4965 }
4966 }
4967
4968 p = strchr (content, '(');
dc146f7c 4969
ca2a87a0
JK
4970 /* Skip ")". */
4971 if (p != NULL)
4972 p = strchr (p, ')');
4973 if (p != NULL)
4974 p++;
4975
4976 /* If the first field after program name has index 0, then core number is
4977 the field with index 36. There's no constant for that anywhere. */
4978 if (p != NULL)
4979 p = strtok_r (p, " ", &ts);
4980 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
4981 p = strtok_r (NULL, " ", &ts);
4982
ca2a87a0 4983 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
4984 core = -1;
4985
4986 free (content);
4987 fclose (f);
4988
4989 return core;
4990}
4991
1570b33e
L
4992static void
4993linux_process_qsupported (const char *query)
4994{
4995 if (the_low_target.process_qsupported != NULL)
4996 the_low_target.process_qsupported (query);
4997}
4998
219f2f23
PA
4999static int
5000linux_supports_tracepoints (void)
5001{
5002 if (*the_low_target.supports_tracepoints == NULL)
5003 return 0;
5004
5005 return (*the_low_target.supports_tracepoints) ();
5006}
5007
5008static CORE_ADDR
5009linux_read_pc (struct regcache *regcache)
5010{
5011 if (the_low_target.get_pc == NULL)
5012 return 0;
5013
5014 return (*the_low_target.get_pc) (regcache);
5015}
5016
5017static void
5018linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5019{
5020 gdb_assert (the_low_target.set_pc != NULL);
5021
5022 (*the_low_target.set_pc) (regcache, pc);
5023}
5024
8336d594
PA
5025static int
5026linux_thread_stopped (struct thread_info *thread)
5027{
5028 return get_thread_lwp (thread)->stopped;
5029}
5030
5031/* This exposes stop-all-threads functionality to other modules. */
5032
5033static void
7984d532 5034linux_pause_all (int freeze)
8336d594 5035{
7984d532
PA
5036 stop_all_lwps (freeze, NULL);
5037}
5038
5039/* This exposes unstop-all-threads functionality to other gdbserver
5040 modules. */
5041
5042static void
5043linux_unpause_all (int unfreeze)
5044{
5045 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5046}
5047
90d74c30
PA
5048static int
5049linux_prepare_to_access_memory (void)
5050{
5051 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5052 running LWP. */
5053 if (non_stop)
5054 linux_pause_all (1);
5055 return 0;
5056}
5057
5058static void
0146f85b 5059linux_done_accessing_memory (void)
90d74c30
PA
5060{
5061 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5062 running LWP. */
5063 if (non_stop)
5064 linux_unpause_all (1);
5065}
5066
fa593d66
PA
5067static int
5068linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5069 CORE_ADDR collector,
5070 CORE_ADDR lockaddr,
5071 ULONGEST orig_size,
5072 CORE_ADDR *jump_entry,
5073 unsigned char *jjump_pad_insn,
5074 ULONGEST *jjump_pad_insn_size,
5075 CORE_ADDR *adjusted_insn_addr,
5076 CORE_ADDR *adjusted_insn_addr_end)
5077{
5078 return (*the_low_target.install_fast_tracepoint_jump_pad)
5079 (tpoint, tpaddr, collector, lockaddr, orig_size,
5080 jump_entry, jjump_pad_insn, jjump_pad_insn_size,
5081 adjusted_insn_addr, adjusted_insn_addr_end);
5082}
5083
6a271cae
PA
5084static struct emit_ops *
5085linux_emit_ops (void)
5086{
5087 if (the_low_target.emit_ops != NULL)
5088 return (*the_low_target.emit_ops) ();
5089 else
5090 return NULL;
5091}
5092
ce3a066d
DJ
5093static struct target_ops linux_target_ops = {
5094 linux_create_inferior,
5095 linux_attach,
5096 linux_kill,
6ad8ae5c 5097 linux_detach,
8336d594 5098 linux_mourn,
444d6139 5099 linux_join,
ce3a066d
DJ
5100 linux_thread_alive,
5101 linux_resume,
5102 linux_wait,
5103 linux_fetch_registers,
5104 linux_store_registers,
90d74c30 5105 linux_prepare_to_access_memory,
0146f85b 5106 linux_done_accessing_memory,
ce3a066d
DJ
5107 linux_read_memory,
5108 linux_write_memory,
2f2893d9 5109 linux_look_up_symbols,
ef57601b 5110 linux_request_interrupt,
aa691b87 5111 linux_read_auxv,
d993e290
PA
5112 linux_insert_point,
5113 linux_remove_point,
e013ee27
OF
5114 linux_stopped_by_watchpoint,
5115 linux_stopped_data_address,
42c81e2a 5116#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437 5117 linux_read_offsets,
dae5f5cf
DJ
5118#else
5119 NULL,
5120#endif
5121#ifdef USE_THREAD_DB
5122 thread_db_get_tls_address,
5123#else
5124 NULL,
52fb6437 5125#endif
efcbbd14 5126 linux_qxfer_spu,
59a016f0 5127 hostio_last_error_from_errno,
07e059b5 5128 linux_qxfer_osdata,
4aa995e1 5129 linux_xfer_siginfo,
bd99dc85
PA
5130 linux_supports_non_stop,
5131 linux_async,
5132 linux_start_non_stop,
cdbfd419
PP
5133 linux_supports_multi_process,
5134#ifdef USE_THREAD_DB
dc146f7c 5135 thread_db_handle_monitor_command,
cdbfd419 5136#else
dc146f7c 5137 NULL,
cdbfd419 5138#endif
1570b33e 5139 linux_core_of_thread,
219f2f23
PA
5140 linux_process_qsupported,
5141 linux_supports_tracepoints,
5142 linux_read_pc,
8336d594
PA
5143 linux_write_pc,
5144 linux_thread_stopped,
7984d532 5145 NULL,
711e434b 5146 linux_pause_all,
7984d532 5147 linux_unpause_all,
fa593d66
PA
5148 linux_cancel_breakpoints,
5149 linux_stabilize_threads,
6a271cae
PA
5150 linux_install_fast_tracepoint_jump_pad,
5151 linux_emit_ops
ce3a066d
DJ
5152};
5153
0d62e5e8
DJ
5154static void
5155linux_init_signals ()
5156{
5157 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5158 to find what the cancel signal actually is. */
1a981360 5159#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 5160 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 5161#endif
0d62e5e8
DJ
5162}
5163
da6d8c04
DJ
5164void
5165initialize_low (void)
5166{
bd99dc85
PA
5167 struct sigaction sigchld_action;
5168 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 5169 set_target_ops (&linux_target_ops);
611cb4a5
DJ
5170 set_breakpoint_data (the_low_target.breakpoint,
5171 the_low_target.breakpoint_len);
0d62e5e8 5172 linux_init_signals ();
24a09b5f 5173 linux_test_for_tracefork ();
52fa2412
UW
5174#ifdef HAVE_LINUX_REGSETS
5175 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5176 ;
bca929d3 5177 disabled_regsets = xmalloc (num_regsets);
52fa2412 5178#endif
bd99dc85
PA
5179
5180 sigchld_action.sa_handler = sigchld_handler;
5181 sigemptyset (&sigchld_action.sa_mask);
5182 sigchld_action.sa_flags = SA_RESTART;
5183 sigaction (SIGCHLD, &sigchld_action, NULL);
da6d8c04 5184}
This page took 1.130801 seconds and 4 git commands to generate.