[ARM] Fix 32-bit host build failure.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
ecd75fc8 2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
da6d8c04 23
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
8bdce1ff 26#include "gdb_wait.h"
da6d8c04 27#include <stdio.h>
da6d8c04 28#include <sys/ptrace.h>
125f8a3d
GB
29#include "nat/linux-ptrace.h"
30#include "nat/linux-procfs.h"
da6d8c04
DJ
31#include <signal.h>
32#include <sys/ioctl.h>
33#include <fcntl.h>
d07c63e7 34#include <string.h>
0a30fbc4
DJ
35#include <stdlib.h>
36#include <unistd.h>
fa6a77dc 37#include <errno.h>
fd500816 38#include <sys/syscall.h>
f9387fc3 39#include <sched.h>
07e059b5
VP
40#include <ctype.h>
41#include <pwd.h>
42#include <sys/types.h>
43#include <dirent.h>
53ce3c39 44#include <sys/stat.h>
efcbbd14 45#include <sys/vfs.h>
1570b33e 46#include <sys/uio.h>
602e3198 47#include "filestuff.h"
c144c7a0 48#include "tracepoint.h"
533b0600 49#include "hostio.h"
957f3f49
DE
50#ifndef ELFMAG0
51/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55#include <elf.h>
56#endif
efcbbd14
UW
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
da6d8c04 61
03583c20
UW
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
fd462a61
DJ
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
72
ec8ebe72
DE
73#ifndef W_STOPCODE
74#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75#endif
76
1a981360
PA
77/* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79#ifndef __SIGRTMIN
80#define __SIGRTMIN 32
81#endif
82
db0dfaa0
LM
83/* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86#if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89#if defined(__mcoldfire__)
90/* These are still undefined in 3.10 kernels. */
91#define PT_TEXT_ADDR 49*4
92#define PT_DATA_ADDR 50*4
93#define PT_TEXT_END_ADDR 51*4
94/* BFIN already defines these since at least 2.6.32 kernels. */
95#elif defined(BFIN)
96#define PT_TEXT_ADDR 220
97#define PT_TEXT_END_ADDR 224
98#define PT_DATA_ADDR 228
99/* These are still undefined in 3.10 kernels. */
100#elif defined(__TMS320C6X__)
101#define PT_TEXT_ADDR (0x10000*4)
102#define PT_DATA_ADDR (0x10004*4)
103#define PT_TEXT_END_ADDR (0x10008*4)
104#endif
105#endif
106
9accd112 107#ifdef HAVE_LINUX_BTRACE
125f8a3d 108# include "nat/linux-btrace.h"
9accd112
MM
109#endif
110
8365dcf5
TJB
111#ifndef HAVE_ELF32_AUXV_T
112/* Copied from glibc's elf.h. */
113typedef struct
114{
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123} Elf32_auxv_t;
124#endif
125
126#ifndef HAVE_ELF64_AUXV_T
127/* Copied from glibc's elf.h. */
128typedef struct
129{
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138} Elf64_auxv_t;
139#endif
140
05044653
PA
141/* A list of all unknown processes which receive stop signals. Some
142 other process will presumably claim each of these as forked
143 children momentarily. */
24a09b5f 144
05044653
PA
145struct simple_pid_list
146{
147 /* The process ID. */
148 int pid;
149
150 /* The status as reported by waitpid. */
151 int status;
152
153 /* Next in chain. */
154 struct simple_pid_list *next;
155};
156struct simple_pid_list *stopped_pids;
157
158/* Trivial list manipulation functions to keep track of a list of new
159 stopped processes. */
160
161static void
162add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
163{
164 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
165
166 new_pid->pid = pid;
167 new_pid->status = status;
168 new_pid->next = *listp;
169 *listp = new_pid;
170}
171
172static int
173pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
174{
175 struct simple_pid_list **p;
176
177 for (p = listp; *p != NULL; p = &(*p)->next)
178 if ((*p)->pid == pid)
179 {
180 struct simple_pid_list *next = (*p)->next;
181
182 *statusp = (*p)->status;
183 xfree (*p);
184 *p = next;
185 return 1;
186 }
187 return 0;
188}
24a09b5f 189
bde24c0a
PA
190enum stopping_threads_kind
191 {
192 /* Not stopping threads presently. */
193 NOT_STOPPING_THREADS,
194
195 /* Stopping threads. */
196 STOPPING_THREADS,
197
198 /* Stopping and suspending threads. */
199 STOPPING_AND_SUSPENDING_THREADS
200 };
201
202/* This is set while stop_all_lwps is in effect. */
203enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
204
205/* FIXME make into a target method? */
24a09b5f 206int using_threads = 1;
24a09b5f 207
fa593d66
PA
208/* True if we're presently stabilizing threads (moving them out of
209 jump pads). */
210static int stabilizing_threads;
211
2acc282a 212static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 213 int step, int signal, siginfo_t *info);
2bd7c093 214static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
215static void stop_all_lwps (int suspend, struct lwp_info *except);
216static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
217static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
218 int *wstat, int options);
95954743 219static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 220static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 221static int linux_stopped_by_watchpoint (void);
95954743 222static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 223static void proceed_all_lwps (void);
d50171e4
PA
224static int finish_step_over (struct lwp_info *lwp);
225static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
226static int kill_lwp (unsigned long lwpid, int signo);
227
228/* True if the low target can hardware single-step. Such targets
229 don't need a BREAKPOINT_REINSERT_ADDR callback. */
230
231static int
232can_hardware_single_step (void)
233{
234 return (the_low_target.breakpoint_reinsert_addr == NULL);
235}
236
237/* True if the low target supports memory breakpoints. If so, we'll
238 have a GET_PC implementation. */
239
240static int
241supports_breakpoints (void)
242{
243 return (the_low_target.get_pc != NULL);
244}
0d62e5e8 245
fa593d66
PA
246/* Returns true if this target can support fast tracepoints. This
247 does not mean that the in-process agent has been loaded in the
248 inferior. */
249
250static int
251supports_fast_tracepoints (void)
252{
253 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
254}
255
c2d6af84
PA
256/* True if LWP is stopped in its stepping range. */
257
258static int
259lwp_in_step_range (struct lwp_info *lwp)
260{
261 CORE_ADDR pc = lwp->stop_pc;
262
263 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
264}
265
0d62e5e8
DJ
266struct pending_signals
267{
268 int signal;
32ca6d61 269 siginfo_t info;
0d62e5e8
DJ
270 struct pending_signals *prev;
271};
611cb4a5 272
bd99dc85
PA
273/* The read/write ends of the pipe registered as waitable file in the
274 event loop. */
275static int linux_event_pipe[2] = { -1, -1 };
276
277/* True if we're currently in async mode. */
278#define target_is_async_p() (linux_event_pipe[0] != -1)
279
02fc4de7 280static void send_sigstop (struct lwp_info *lwp);
fa96cb38 281static void wait_for_sigstop (void);
bd99dc85 282
d0722149
DE
283/* Return non-zero if HEADER is a 64-bit ELF file. */
284
285static int
214d508e 286elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 287{
214d508e
L
288 if (header->e_ident[EI_MAG0] == ELFMAG0
289 && header->e_ident[EI_MAG1] == ELFMAG1
290 && header->e_ident[EI_MAG2] == ELFMAG2
291 && header->e_ident[EI_MAG3] == ELFMAG3)
292 {
293 *machine = header->e_machine;
294 return header->e_ident[EI_CLASS] == ELFCLASS64;
295
296 }
297 *machine = EM_NONE;
298 return -1;
d0722149
DE
299}
300
301/* Return non-zero if FILE is a 64-bit ELF file,
302 zero if the file is not a 64-bit ELF file,
303 and -1 if the file is not accessible or doesn't exist. */
304
be07f1a2 305static int
214d508e 306elf_64_file_p (const char *file, unsigned int *machine)
d0722149 307{
957f3f49 308 Elf64_Ehdr header;
d0722149
DE
309 int fd;
310
311 fd = open (file, O_RDONLY);
312 if (fd < 0)
313 return -1;
314
315 if (read (fd, &header, sizeof (header)) != sizeof (header))
316 {
317 close (fd);
318 return 0;
319 }
320 close (fd);
321
214d508e 322 return elf_64_header_p (&header, machine);
d0722149
DE
323}
324
be07f1a2
PA
325/* Accepts an integer PID; Returns true if the executable PID is
326 running is a 64-bit ELF file.. */
327
328int
214d508e 329linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 330{
d8d2a3ee 331 char file[PATH_MAX];
be07f1a2
PA
332
333 sprintf (file, "/proc/%d/exe", pid);
214d508e 334 return elf_64_file_p (file, machine);
be07f1a2
PA
335}
336
bd99dc85
PA
337static void
338delete_lwp (struct lwp_info *lwp)
339{
fa96cb38
PA
340 struct thread_info *thr = get_lwp_thread (lwp);
341
342 if (debug_threads)
343 debug_printf ("deleting %ld\n", lwpid_of (thr));
344
345 remove_thread (thr);
aa5ca48f 346 free (lwp->arch_private);
bd99dc85
PA
347 free (lwp);
348}
349
95954743
PA
350/* Add a process to the common process list, and set its private
351 data. */
352
353static struct process_info *
354linux_add_process (int pid, int attached)
355{
356 struct process_info *proc;
357
95954743
PA
358 proc = add_process (pid, attached);
359 proc->private = xcalloc (1, sizeof (*proc->private));
360
3aee8918
PA
361 /* Set the arch when the first LWP stops. */
362 proc->private->new_inferior = 1;
363
aa5ca48f
DE
364 if (the_low_target.new_process != NULL)
365 proc->private->arch_private = the_low_target.new_process ();
366
95954743
PA
367 return proc;
368}
369
bd99dc85
PA
370/* Handle a GNU/Linux extended wait response. If we see a clone
371 event, we need to add the new LWP to our list (and not report the
372 trap to higher layers). */
0d62e5e8 373
24a09b5f 374static void
54a0b537 375handle_extended_wait (struct lwp_info *event_child, int wstat)
24a09b5f
DJ
376{
377 int event = wstat >> 16;
d86d4aaf 378 struct thread_info *event_thr = get_lwp_thread (event_child);
54a0b537 379 struct lwp_info *new_lwp;
24a09b5f
DJ
380
381 if (event == PTRACE_EVENT_CLONE)
382 {
95954743 383 ptid_t ptid;
24a09b5f 384 unsigned long new_pid;
05044653 385 int ret, status;
24a09b5f 386
d86d4aaf 387 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 388 &new_pid);
24a09b5f
DJ
389
390 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 391 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
392 {
393 /* The new child has a pending SIGSTOP. We can't affect it until it
394 hits the SIGSTOP, but we're already attached. */
395
97438e3f 396 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
397
398 if (ret == -1)
399 perror_with_name ("waiting for new child");
400 else if (ret != new_pid)
401 warning ("wait returned unexpected PID %d", ret);
da5898ce 402 else if (!WIFSTOPPED (status))
24a09b5f
DJ
403 warning ("wait returned unexpected status 0x%x", status);
404 }
405
fa96cb38
PA
406 if (debug_threads)
407 debug_printf ("HEW: Got clone event "
408 "from LWP %ld, new child is LWP %ld\n",
409 lwpid_of (event_thr), new_pid);
410
d86d4aaf 411 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 412 new_lwp = add_lwp (ptid);
24a09b5f 413
e27d73f6
DE
414 /* Either we're going to immediately resume the new thread
415 or leave it stopped. linux_resume_one_lwp is a nop if it
416 thinks the thread is currently running, so set this first
417 before calling linux_resume_one_lwp. */
418 new_lwp->stopped = 1;
419
bde24c0a
PA
420 /* If we're suspending all threads, leave this one suspended
421 too. */
422 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
423 new_lwp->suspended = 1;
424
da5898ce
DJ
425 /* Normally we will get the pending SIGSTOP. But in some cases
426 we might get another signal delivered to the group first.
f21cc1a2 427 If we do get another signal, be sure not to lose it. */
da5898ce
DJ
428 if (WSTOPSIG (status) == SIGSTOP)
429 {
bde24c0a 430 if (stopping_threads != NOT_STOPPING_THREADS)
d50171e4
PA
431 new_lwp->stop_pc = get_stop_pc (new_lwp);
432 else
e27d73f6 433 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
da5898ce 434 }
24a09b5f 435 else
da5898ce 436 {
54a0b537 437 new_lwp->stop_expected = 1;
d50171e4 438
bde24c0a 439 if (stopping_threads != NOT_STOPPING_THREADS)
da5898ce 440 {
d50171e4 441 new_lwp->stop_pc = get_stop_pc (new_lwp);
54a0b537
PA
442 new_lwp->status_pending_p = 1;
443 new_lwp->status_pending = status;
da5898ce
DJ
444 }
445 else
446 /* Pass the signal on. This is what GDB does - except
447 shouldn't we really report it instead? */
e27d73f6 448 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
da5898ce 449 }
24a09b5f
DJ
450
451 /* Always resume the current thread. If we are stopping
452 threads, it will have a pending SIGSTOP; we may as well
453 collect it now. */
2acc282a 454 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
24a09b5f
DJ
455 }
456}
457
d50171e4
PA
458/* Return the PC as read from the regcache of LWP, without any
459 adjustment. */
460
461static CORE_ADDR
462get_pc (struct lwp_info *lwp)
463{
464 struct thread_info *saved_inferior;
465 struct regcache *regcache;
466 CORE_ADDR pc;
467
468 if (the_low_target.get_pc == NULL)
469 return 0;
470
471 saved_inferior = current_inferior;
472 current_inferior = get_lwp_thread (lwp);
473
474 regcache = get_thread_regcache (current_inferior, 1);
475 pc = (*the_low_target.get_pc) (regcache);
476
477 if (debug_threads)
87ce2a04 478 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4
PA
479
480 current_inferior = saved_inferior;
481 return pc;
482}
483
484/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
485 The SIGTRAP could mean several things.
486
487 On i386, where decr_pc_after_break is non-zero:
488 If we were single-stepping this process using PTRACE_SINGLESTEP,
489 we will get only the one SIGTRAP (even if the instruction we
490 stepped over was a breakpoint). The value of $eip will be the
491 next instruction.
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If we cancel the SIGTRAP, we
497 must resume at the decremented PC.
498
499 (Presumably, not yet tested) On a non-decr_pc_after_break machine
500 with hardware or kernel single-step:
501 If we single-step over a breakpoint instruction, our PC will
502 point at the following instruction. If we continue and hit a
503 breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506static CORE_ADDR
d50171e4 507get_stop_pc (struct lwp_info *lwp)
0d62e5e8 508{
d50171e4
PA
509 CORE_ADDR stop_pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
0d62e5e8 513
d50171e4
PA
514 stop_pc = get_pc (lwp);
515
bdabb078
PA
516 if (WSTOPSIG (lwp->last_status) == SIGTRAP
517 && !lwp->stepping
518 && !lwp->stopped_by_watchpoint
519 && lwp->last_status >> 16 == 0)
47c0c975
DE
520 stop_pc -= the_low_target.decr_pc_after_break;
521
522 if (debug_threads)
87ce2a04 523 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
47c0c975
DE
524
525 return stop_pc;
0d62e5e8 526}
ce3a066d 527
b3312d80 528static struct lwp_info *
95954743 529add_lwp (ptid_t ptid)
611cb4a5 530{
54a0b537 531 struct lwp_info *lwp;
0d62e5e8 532
54a0b537
PA
533 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
534 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 535
aa5ca48f
DE
536 if (the_low_target.new_thread != NULL)
537 lwp->arch_private = the_low_target.new_thread ();
538
f7667f0d 539 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 540
54a0b537 541 return lwp;
0d62e5e8 542}
611cb4a5 543
da6d8c04
DJ
544/* Start an inferior process and returns its pid.
545 ALLARGS is a vector of program-name and args. */
546
ce3a066d
DJ
547static int
548linux_create_inferior (char *program, char **allargs)
da6d8c04 549{
03583c20
UW
550#ifdef HAVE_PERSONALITY
551 int personality_orig = 0, personality_set = 0;
552#endif
a6dbe5df 553 struct lwp_info *new_lwp;
da6d8c04 554 int pid;
95954743 555 ptid_t ptid;
da6d8c04 556
03583c20
UW
557#ifdef HAVE_PERSONALITY
558 if (disable_randomization)
559 {
560 errno = 0;
561 personality_orig = personality (0xffffffff);
562 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
563 {
564 personality_set = 1;
565 personality (personality_orig | ADDR_NO_RANDOMIZE);
566 }
567 if (errno != 0 || (personality_set
568 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
569 warning ("Error disabling address space randomization: %s",
570 strerror (errno));
571 }
572#endif
573
42c81e2a 574#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
575 pid = vfork ();
576#else
da6d8c04 577 pid = fork ();
52fb6437 578#endif
da6d8c04
DJ
579 if (pid < 0)
580 perror_with_name ("fork");
581
582 if (pid == 0)
583 {
602e3198 584 close_most_fds ();
b8e1b30e 585 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 586
1a981360 587#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 588 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 589#endif
0d62e5e8 590
a9fa9f7d
DJ
591 setpgid (0, 0);
592
e0f9f062
DE
593 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
594 stdout to stderr so that inferior i/o doesn't corrupt the connection.
595 Also, redirect stdin to /dev/null. */
596 if (remote_connection_is_stdio ())
597 {
598 close (0);
599 open ("/dev/null", O_RDONLY);
600 dup2 (2, 1);
3e52c33d
JK
601 if (write (2, "stdin/stdout redirected\n",
602 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
603 {
604 /* Errors ignored. */;
605 }
e0f9f062
DE
606 }
607
2b876972
DJ
608 execv (program, allargs);
609 if (errno == ENOENT)
610 execvp (program, allargs);
da6d8c04
DJ
611
612 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 613 strerror (errno));
da6d8c04
DJ
614 fflush (stderr);
615 _exit (0177);
616 }
617
03583c20
UW
618#ifdef HAVE_PERSONALITY
619 if (personality_set)
620 {
621 errno = 0;
622 personality (personality_orig);
623 if (errno != 0)
624 warning ("Error restoring address space randomization: %s",
625 strerror (errno));
626 }
627#endif
628
95954743
PA
629 linux_add_process (pid, 0);
630
631 ptid = ptid_build (pid, pid, 0);
632 new_lwp = add_lwp (ptid);
a6dbe5df 633 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 634
a9fa9f7d 635 return pid;
da6d8c04
DJ
636}
637
7ae1a6a6
PA
638char *
639linux_attach_fail_reason_string (ptid_t ptid, int err)
640{
641 static char *reason_string;
642 struct buffer buffer;
643 char *warnings;
644 long lwpid = ptid_get_lwp (ptid);
645
646 xfree (reason_string);
647
648 buffer_init (&buffer);
649 linux_ptrace_attach_fail_reason (lwpid, &buffer);
650 buffer_grow_str0 (&buffer, "");
651 warnings = buffer_finish (&buffer);
652 if (warnings[0] != '\0')
653 reason_string = xstrprintf ("%s (%d), %s",
654 strerror (err), err, warnings);
655 else
656 reason_string = xstrprintf ("%s (%d)",
657 strerror (err), err);
658 xfree (warnings);
659 return reason_string;
660}
661
da6d8c04
DJ
662/* Attach to an inferior process. */
663
7ae1a6a6
PA
664int
665linux_attach_lwp (ptid_t ptid)
da6d8c04 666{
54a0b537 667 struct lwp_info *new_lwp;
7ae1a6a6 668 int lwpid = ptid_get_lwp (ptid);
611cb4a5 669
b8e1b30e 670 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 671 != 0)
7ae1a6a6 672 return errno;
24a09b5f 673
b3312d80 674 new_lwp = add_lwp (ptid);
0d62e5e8 675
a6dbe5df
PA
676 /* We need to wait for SIGSTOP before being able to make the next
677 ptrace call on this LWP. */
678 new_lwp->must_set_ptrace_flags = 1;
679
644cebc9 680 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
681 {
682 if (debug_threads)
87ce2a04 683 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
684
685 /* The process is definitely stopped. It is in a job control
686 stop, unless the kernel predates the TASK_STOPPED /
687 TASK_TRACED distinction, in which case it might be in a
688 ptrace stop. Make sure it is in a ptrace stop; from there we
689 can kill it, signal it, et cetera.
690
691 First make sure there is a pending SIGSTOP. Since we are
692 already attached, the process can not transition from stopped
693 to running without a PTRACE_CONT; so we know this signal will
694 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
695 probably already in the queue (unless this kernel is old
696 enough to use TASK_STOPPED for ptrace stops); but since
697 SIGSTOP is not an RT signal, it can only be queued once. */
698 kill_lwp (lwpid, SIGSTOP);
699
700 /* Finally, resume the stopped process. This will deliver the
701 SIGSTOP (or a higher priority signal, just like normal
702 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 703 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
704 }
705
0d62e5e8 706 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
707 brings it to a halt.
708
709 There are several cases to consider here:
710
711 1) gdbserver has already attached to the process and is being notified
1b3f6016 712 of a new thread that is being created.
d50171e4
PA
713 In this case we should ignore that SIGSTOP and resume the
714 process. This is handled below by setting stop_expected = 1,
8336d594 715 and the fact that add_thread sets last_resume_kind ==
d50171e4 716 resume_continue.
0e21c1ec
DE
717
718 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
719 to it via attach_inferior.
720 In this case we want the process thread to stop.
d50171e4
PA
721 This is handled by having linux_attach set last_resume_kind ==
722 resume_stop after we return.
e3deef73
LM
723
724 If the pid we are attaching to is also the tgid, we attach to and
725 stop all the existing threads. Otherwise, we attach to pid and
726 ignore any other threads in the same group as this pid.
0e21c1ec
DE
727
728 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
729 existing threads.
730 In this case we want the thread to stop.
731 FIXME: This case is currently not properly handled.
732 We should wait for the SIGSTOP but don't. Things work apparently
733 because enough time passes between when we ptrace (ATTACH) and when
734 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
735
736 On the other hand, if we are currently trying to stop all threads, we
737 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 738 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
739 end of the list, and so the new thread has not yet reached
740 wait_for_sigstop (but will). */
d50171e4 741 new_lwp->stop_expected = 1;
0d62e5e8 742
7ae1a6a6 743 return 0;
95954743
PA
744}
745
e3deef73
LM
746/* Attach to PID. If PID is the tgid, attach to it and all
747 of its threads. */
748
c52daf70 749static int
a1928bad 750linux_attach (unsigned long pid)
0d62e5e8 751{
7ae1a6a6
PA
752 ptid_t ptid = ptid_build (pid, pid, 0);
753 int err;
754
e3deef73
LM
755 /* Attach to PID. We will check for other threads
756 soon. */
7ae1a6a6
PA
757 err = linux_attach_lwp (ptid);
758 if (err != 0)
759 error ("Cannot attach to process %ld: %s",
760 pid, linux_attach_fail_reason_string (ptid, err));
761
95954743 762 linux_add_process (pid, 1);
0d62e5e8 763
bd99dc85
PA
764 if (!non_stop)
765 {
8336d594
PA
766 struct thread_info *thread;
767
768 /* Don't ignore the initial SIGSTOP if we just attached to this
769 process. It will be collected by wait shortly. */
770 thread = find_thread_ptid (ptid_build (pid, pid, 0));
771 thread->last_resume_kind = resume_stop;
bd99dc85 772 }
0d62e5e8 773
e3deef73
LM
774 if (linux_proc_get_tgid (pid) == pid)
775 {
776 DIR *dir;
777 char pathname[128];
778
779 sprintf (pathname, "/proc/%ld/task", pid);
780
781 dir = opendir (pathname);
782
783 if (!dir)
784 {
785 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
786 fflush (stderr);
787 }
788 else
789 {
790 /* At this point we attached to the tgid. Scan the task for
791 existing threads. */
e3deef73
LM
792 int new_threads_found;
793 int iterations = 0;
e3deef73
LM
794
795 while (iterations < 2)
796 {
7ae1a6a6
PA
797 struct dirent *dp;
798
e3deef73
LM
799 new_threads_found = 0;
800 /* Add all the other threads. While we go through the
801 threads, new threads may be spawned. Cycle through
802 the list of threads until we have done two iterations without
803 finding new threads. */
804 while ((dp = readdir (dir)) != NULL)
805 {
7ae1a6a6
PA
806 unsigned long lwp;
807 ptid_t ptid;
808
e3deef73
LM
809 /* Fetch one lwp. */
810 lwp = strtoul (dp->d_name, NULL, 10);
811
7ae1a6a6
PA
812 ptid = ptid_build (pid, lwp, 0);
813
e3deef73 814 /* Is this a new thread? */
7ae1a6a6 815 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
e3deef73 816 {
7ae1a6a6 817 int err;
e3deef73
LM
818
819 if (debug_threads)
7ae1a6a6
PA
820 debug_printf ("Found new lwp %ld\n", lwp);
821
822 err = linux_attach_lwp (ptid);
823 if (err != 0)
824 warning ("Cannot attach to lwp %ld: %s",
825 lwp,
826 linux_attach_fail_reason_string (ptid, err));
827
828 new_threads_found++;
e3deef73
LM
829 }
830 }
831
832 if (!new_threads_found)
833 iterations++;
834 else
835 iterations = 0;
836
837 rewinddir (dir);
838 }
839 closedir (dir);
840 }
841 }
842
95954743
PA
843 return 0;
844}
845
846struct counter
847{
848 int pid;
849 int count;
850};
851
852static int
853second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
854{
855 struct counter *counter = args;
856
857 if (ptid_get_pid (entry->id) == counter->pid)
858 {
859 if (++counter->count > 1)
860 return 1;
861 }
d61ddec4 862
da6d8c04
DJ
863 return 0;
864}
865
95954743 866static int
fa96cb38 867last_thread_of_process_p (int pid)
95954743 868{
95954743 869 struct counter counter = { pid , 0 };
da6d8c04 870
95954743
PA
871 return (find_inferior (&all_threads,
872 second_thread_of_pid_p, &counter) == NULL);
873}
874
da84f473
PA
875/* Kill LWP. */
876
877static void
878linux_kill_one_lwp (struct lwp_info *lwp)
879{
d86d4aaf
DE
880 struct thread_info *thr = get_lwp_thread (lwp);
881 int pid = lwpid_of (thr);
da84f473
PA
882
883 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
884 there is no signal context, and ptrace(PTRACE_KILL) (or
885 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
886 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
887 alternative is to kill with SIGKILL. We only need one SIGKILL
888 per process, not one for each thread. But since we still support
889 linuxthreads, and we also support debugging programs using raw
890 clone without CLONE_THREAD, we send one for each thread. For
891 years, we used PTRACE_KILL only, so we're being a bit paranoid
892 about some old kernels where PTRACE_KILL might work better
893 (dubious if there are any such, but that's why it's paranoia), so
894 we try SIGKILL first, PTRACE_KILL second, and so we're fine
895 everywhere. */
896
897 errno = 0;
898 kill (pid, SIGKILL);
899 if (debug_threads)
87ce2a04 900 debug_printf ("LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
d86d4aaf 901 target_pid_to_str (ptid_of (thr)),
87ce2a04 902 errno ? strerror (errno) : "OK");
da84f473
PA
903
904 errno = 0;
b8e1b30e 905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 906 if (debug_threads)
87ce2a04 907 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
d86d4aaf 908 target_pid_to_str (ptid_of (thr)),
87ce2a04 909 errno ? strerror (errno) : "OK");
da84f473
PA
910}
911
e76126e8
PA
912/* Kill LWP and wait for it to die. */
913
914static void
915kill_wait_lwp (struct lwp_info *lwp)
916{
917 struct thread_info *thr = get_lwp_thread (lwp);
918 int pid = ptid_get_pid (ptid_of (thr));
919 int lwpid = ptid_get_lwp (ptid_of (thr));
920 int wstat;
921 int res;
922
923 if (debug_threads)
924 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
925
926 do
927 {
928 linux_kill_one_lwp (lwp);
929
930 /* Make sure it died. Notes:
931
932 - The loop is most likely unnecessary.
933
934 - We don't use linux_wait_for_event as that could delete lwps
935 while we're iterating over them. We're not interested in
936 any pending status at this point, only in making sure all
937 wait status on the kernel side are collected until the
938 process is reaped.
939
940 - We don't use __WALL here as the __WALL emulation relies on
941 SIGCHLD, and killing a stopped process doesn't generate
942 one, nor an exit status.
943 */
944 res = my_waitpid (lwpid, &wstat, 0);
945 if (res == -1 && errno == ECHILD)
946 res = my_waitpid (lwpid, &wstat, __WCLONE);
947 } while (res > 0 && WIFSTOPPED (wstat));
948
949 gdb_assert (res > 0);
950}
951
da84f473
PA
952/* Callback for `find_inferior'. Kills an lwp of a given process,
953 except the leader. */
95954743
PA
954
955static int
da84f473 956kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 957{
0d62e5e8 958 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 959 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
960 int pid = * (int *) args;
961
962 if (ptid_get_pid (entry->id) != pid)
963 return 0;
0d62e5e8 964
fd500816
DJ
965 /* We avoid killing the first thread here, because of a Linux kernel (at
966 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
967 the children get a chance to be reaped, it will remain a zombie
968 forever. */
95954743 969
d86d4aaf 970 if (lwpid_of (thread) == pid)
95954743
PA
971 {
972 if (debug_threads)
87ce2a04
DE
973 debug_printf ("lkop: is last of process %s\n",
974 target_pid_to_str (entry->id));
95954743
PA
975 return 0;
976 }
fd500816 977
e76126e8 978 kill_wait_lwp (lwp);
95954743 979 return 0;
da6d8c04
DJ
980}
981
95954743
PA
982static int
983linux_kill (int pid)
0d62e5e8 984{
95954743 985 struct process_info *process;
54a0b537 986 struct lwp_info *lwp;
fd500816 987
95954743
PA
988 process = find_process_pid (pid);
989 if (process == NULL)
990 return -1;
9d606399 991
f9e39928
PA
992 /* If we're killing a running inferior, make sure it is stopped
993 first, as PTRACE_KILL will not work otherwise. */
7984d532 994 stop_all_lwps (0, NULL);
f9e39928 995
da84f473 996 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 997
54a0b537 998 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 999 thread in the list, so do so now. */
95954743 1000 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1001
784867a5 1002 if (lwp == NULL)
fd500816 1003 {
784867a5 1004 if (debug_threads)
d86d4aaf
DE
1005 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1006 pid);
784867a5
JK
1007 }
1008 else
e76126e8 1009 kill_wait_lwp (lwp);
2d717e4f 1010
8336d594 1011 the_target->mourn (process);
f9e39928
PA
1012
1013 /* Since we presently can only stop all lwps of all processes, we
1014 need to unstop lwps of other processes. */
7984d532 1015 unstop_all_lwps (0, NULL);
95954743 1016 return 0;
0d62e5e8
DJ
1017}
1018
9b224c5e
PA
1019/* Get pending signal of THREAD, for detaching purposes. This is the
1020 signal the thread last stopped for, which we need to deliver to the
1021 thread when detaching, otherwise, it'd be suppressed/lost. */
1022
1023static int
1024get_detach_signal (struct thread_info *thread)
1025{
a493e3e2 1026 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1027 int status;
1028 struct lwp_info *lp = get_thread_lwp (thread);
1029
1030 if (lp->status_pending_p)
1031 status = lp->status_pending;
1032 else
1033 {
1034 /* If the thread had been suspended by gdbserver, and it stopped
1035 cleanly, then it'll have stopped with SIGSTOP. But we don't
1036 want to deliver that SIGSTOP. */
1037 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1038 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1039 return 0;
1040
1041 /* Otherwise, we may need to deliver the signal we
1042 intercepted. */
1043 status = lp->last_status;
1044 }
1045
1046 if (!WIFSTOPPED (status))
1047 {
1048 if (debug_threads)
87ce2a04 1049 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1050 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1051 return 0;
1052 }
1053
1054 /* Extended wait statuses aren't real SIGTRAPs. */
1055 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1056 {
1057 if (debug_threads)
87ce2a04
DE
1058 debug_printf ("GPS: lwp %s had stopped with extended "
1059 "status: no pending signal\n",
d86d4aaf 1060 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1061 return 0;
1062 }
1063
2ea28649 1064 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1065
1066 if (program_signals_p && !program_signals[signo])
1067 {
1068 if (debug_threads)
87ce2a04 1069 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1070 target_pid_to_str (ptid_of (thread)),
87ce2a04 1071 gdb_signal_to_string (signo));
9b224c5e
PA
1072 return 0;
1073 }
1074 else if (!program_signals_p
1075 /* If we have no way to know which signals GDB does not
1076 want to have passed to the program, assume
1077 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1078 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1079 {
1080 if (debug_threads)
87ce2a04
DE
1081 debug_printf ("GPS: lwp %s had signal %s, "
1082 "but we don't know if we should pass it. "
1083 "Default to not.\n",
d86d4aaf 1084 target_pid_to_str (ptid_of (thread)),
87ce2a04 1085 gdb_signal_to_string (signo));
9b224c5e
PA
1086 return 0;
1087 }
1088 else
1089 {
1090 if (debug_threads)
87ce2a04 1091 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1092 target_pid_to_str (ptid_of (thread)),
87ce2a04 1093 gdb_signal_to_string (signo));
9b224c5e
PA
1094
1095 return WSTOPSIG (status);
1096 }
1097}
1098
95954743
PA
1099static int
1100linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1101{
1102 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1103 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1104 int pid = * (int *) args;
9b224c5e 1105 int sig;
95954743
PA
1106
1107 if (ptid_get_pid (entry->id) != pid)
1108 return 0;
6ad8ae5c 1109
9b224c5e 1110 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1111 if (lwp->stop_expected)
ae13219e 1112 {
9b224c5e 1113 if (debug_threads)
87ce2a04 1114 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1115 target_pid_to_str (ptid_of (thread)));
9b224c5e 1116
d86d4aaf 1117 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1118 lwp->stop_expected = 0;
ae13219e
DJ
1119 }
1120
1121 /* Flush any pending changes to the process's registers. */
d86d4aaf 1122 regcache_invalidate_thread (thread);
ae13219e 1123
9b224c5e
PA
1124 /* Pass on any pending signal for this thread. */
1125 sig = get_detach_signal (thread);
1126
ae13219e 1127 /* Finally, let it resume. */
82bfbe7e
PA
1128 if (the_low_target.prepare_to_resume != NULL)
1129 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1130 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1131 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1132 error (_("Can't detach %s: %s"),
d86d4aaf 1133 target_pid_to_str (ptid_of (thread)),
9b224c5e 1134 strerror (errno));
bd99dc85
PA
1135
1136 delete_lwp (lwp);
95954743 1137 return 0;
6ad8ae5c
DJ
1138}
1139
95954743
PA
1140static int
1141linux_detach (int pid)
1142{
1143 struct process_info *process;
1144
1145 process = find_process_pid (pid);
1146 if (process == NULL)
1147 return -1;
1148
f9e39928
PA
1149 /* Stop all threads before detaching. First, ptrace requires that
1150 the thread is stopped to sucessfully detach. Second, thread_db
1151 may need to uninstall thread event breakpoints from memory, which
1152 only works with a stopped process anyway. */
7984d532 1153 stop_all_lwps (0, NULL);
f9e39928 1154
ca5c370d 1155#ifdef USE_THREAD_DB
8336d594 1156 thread_db_detach (process);
ca5c370d
PA
1157#endif
1158
fa593d66
PA
1159 /* Stabilize threads (move out of jump pads). */
1160 stabilize_threads ();
1161
95954743 1162 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1163
1164 the_target->mourn (process);
f9e39928
PA
1165
1166 /* Since we presently can only stop all lwps of all processes, we
1167 need to unstop lwps of other processes. */
7984d532 1168 unstop_all_lwps (0, NULL);
f9e39928
PA
1169 return 0;
1170}
1171
1172/* Remove all LWPs that belong to process PROC from the lwp list. */
1173
1174static int
1175delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1176{
d86d4aaf
DE
1177 struct thread_info *thread = (struct thread_info *) entry;
1178 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1179 struct process_info *process = proc;
1180
d86d4aaf 1181 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1182 delete_lwp (lwp);
1183
dd6953e1 1184 return 0;
6ad8ae5c
DJ
1185}
1186
8336d594
PA
1187static void
1188linux_mourn (struct process_info *process)
1189{
1190 struct process_info_private *priv;
1191
1192#ifdef USE_THREAD_DB
1193 thread_db_mourn (process);
1194#endif
1195
d86d4aaf 1196 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1197
8336d594
PA
1198 /* Freeing all private data. */
1199 priv = process->private;
1200 free (priv->arch_private);
1201 free (priv);
1202 process->private = NULL;
505106cd
PA
1203
1204 remove_process (process);
8336d594
PA
1205}
1206
444d6139 1207static void
95954743 1208linux_join (int pid)
444d6139 1209{
444d6139
PA
1210 int status, ret;
1211
1212 do {
95954743 1213 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1214 if (WIFEXITED (status) || WIFSIGNALED (status))
1215 break;
1216 } while (ret != -1 || errno != ECHILD);
1217}
1218
6ad8ae5c 1219/* Return nonzero if the given thread is still alive. */
0d62e5e8 1220static int
95954743 1221linux_thread_alive (ptid_t ptid)
0d62e5e8 1222{
95954743
PA
1223 struct lwp_info *lwp = find_lwp_pid (ptid);
1224
1225 /* We assume we always know if a thread exits. If a whole process
1226 exited but we still haven't been able to report it to GDB, we'll
1227 hold on to the last lwp of the dead process. */
1228 if (lwp != NULL)
1229 return !lwp->dead;
0d62e5e8
DJ
1230 else
1231 return 0;
1232}
1233
6bf5e0ba 1234/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1235static int
d50171e4 1236status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1237{
d86d4aaf
DE
1238 struct thread_info *thread = (struct thread_info *) entry;
1239 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1240 ptid_t ptid = * (ptid_t *) arg;
1241
1242 /* Check if we're only interested in events from a specific process
1243 or its lwps. */
1244 if (!ptid_equal (minus_one_ptid, ptid)
d86d4aaf 1245 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
95954743 1246 return 0;
0d62e5e8 1247
d50171e4
PA
1248 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1249 report any status pending the LWP may have. */
8336d594 1250 if (thread->last_resume_kind == resume_stop
7984d532 1251 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4 1252 return 0;
0d62e5e8 1253
d50171e4 1254 return lwp->status_pending_p;
0d62e5e8
DJ
1255}
1256
95954743
PA
1257static int
1258same_lwp (struct inferior_list_entry *entry, void *data)
1259{
1260 ptid_t ptid = *(ptid_t *) data;
1261 int lwp;
1262
1263 if (ptid_get_lwp (ptid) != 0)
1264 lwp = ptid_get_lwp (ptid);
1265 else
1266 lwp = ptid_get_pid (ptid);
1267
1268 if (ptid_get_lwp (entry->id) == lwp)
1269 return 1;
1270
1271 return 0;
1272}
1273
1274struct lwp_info *
1275find_lwp_pid (ptid_t ptid)
1276{
d86d4aaf
DE
1277 struct inferior_list_entry *thread
1278 = find_inferior (&all_threads, same_lwp, &ptid);
1279
1280 if (thread == NULL)
1281 return NULL;
1282
1283 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1284}
1285
fa96cb38 1286/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1287
fa96cb38
PA
1288static int
1289num_lwps (int pid)
1290{
1291 struct inferior_list_entry *inf, *tmp;
1292 int count = 0;
0d62e5e8 1293
fa96cb38 1294 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1295 {
fa96cb38
PA
1296 if (ptid_get_pid (inf->id) == pid)
1297 count++;
24a09b5f 1298 }
3aee8918 1299
fa96cb38
PA
1300 return count;
1301}
d61ddec4 1302
fa96cb38
PA
1303/* Detect zombie thread group leaders, and "exit" them. We can't reap
1304 their exits until all other threads in the group have exited. */
c3adc08c 1305
fa96cb38
PA
1306static void
1307check_zombie_leaders (void)
1308{
1309 struct process_info *proc, *tmp;
c3adc08c 1310
fa96cb38 1311 ALL_PROCESSES (proc, tmp)
c3adc08c 1312 {
fa96cb38
PA
1313 pid_t leader_pid = pid_of (proc);
1314 struct lwp_info *leader_lp;
c3adc08c 1315
fa96cb38 1316 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1317
fa96cb38
PA
1318 if (debug_threads)
1319 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1320 "num_lwps=%d, zombie=%d\n",
1321 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1322 linux_proc_pid_is_zombie (leader_pid));
1323
1324 if (leader_lp != NULL
1325 /* Check if there are other threads in the group, as we may
1326 have raced with the inferior simply exiting. */
1327 && !last_thread_of_process_p (leader_pid)
1328 && linux_proc_pid_is_zombie (leader_pid))
1329 {
1330 /* A leader zombie can mean one of two things:
1331
1332 - It exited, and there's an exit status pending
1333 available, or only the leader exited (not the whole
1334 program). In the latter case, we can't waitpid the
1335 leader's exit status until all other threads are gone.
1336
1337 - There are 3 or more threads in the group, and a thread
1338 other than the leader exec'd. On an exec, the Linux
1339 kernel destroys all other threads (except the execing
1340 one) in the thread group, and resets the execing thread's
1341 tid to the tgid. No exit notification is sent for the
1342 execing thread -- from the ptracer's perspective, it
1343 appears as though the execing thread just vanishes.
1344 Until we reap all other threads except the leader and the
1345 execing thread, the leader will be zombie, and the
1346 execing thread will be in `D (disc sleep)'. As soon as
1347 all other threads are reaped, the execing thread changes
1348 it's tid to the tgid, and the previous (zombie) leader
1349 vanishes, giving place to the "new" leader. We could try
1350 distinguishing the exit and exec cases, by waiting once
1351 more, and seeing if something comes out, but it doesn't
1352 sound useful. The previous leader _does_ go away, and
1353 we'll re-add the new one once we see the exec event
1354 (which is just the same as what would happen if the
1355 previous leader did exit voluntarily before some other
1356 thread execs). */
c3adc08c 1357
fa96cb38
PA
1358 if (debug_threads)
1359 fprintf (stderr,
1360 "CZL: Thread group leader %d zombie "
1361 "(it exited, or another thread execd).\n",
1362 leader_pid);
c3adc08c 1363
fa96cb38 1364 delete_lwp (leader_lp);
c3adc08c
PA
1365 }
1366 }
fa96cb38 1367}
c3adc08c 1368
fa96cb38
PA
1369/* Callback for `find_inferior'. Returns the first LWP that is not
1370 stopped. ARG is a PTID filter. */
d50171e4 1371
fa96cb38
PA
1372static int
1373not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1374{
1375 struct thread_info *thr = (struct thread_info *) entry;
1376 struct lwp_info *lwp;
1377 ptid_t filter = *(ptid_t *) arg;
47c0c975 1378
fa96cb38
PA
1379 if (!ptid_match (ptid_of (thr), filter))
1380 return 0;
bd99dc85 1381
fa96cb38
PA
1382 lwp = get_thread_lwp (thr);
1383 if (!lwp->stopped)
1384 return 1;
1385
1386 return 0;
0d62e5e8 1387}
611cb4a5 1388
219f2f23
PA
1389/* This function should only be called if the LWP got a SIGTRAP.
1390
1391 Handle any tracepoint steps or hits. Return true if a tracepoint
1392 event was handled, 0 otherwise. */
1393
1394static int
1395handle_tracepoints (struct lwp_info *lwp)
1396{
1397 struct thread_info *tinfo = get_lwp_thread (lwp);
1398 int tpoint_related_event = 0;
1399
7984d532
PA
1400 /* If this tracepoint hit causes a tracing stop, we'll immediately
1401 uninsert tracepoints. To do this, we temporarily pause all
1402 threads, unpatch away, and then unpause threads. We need to make
1403 sure the unpausing doesn't resume LWP too. */
1404 lwp->suspended++;
1405
219f2f23
PA
1406 /* And we need to be sure that any all-threads-stopping doesn't try
1407 to move threads out of the jump pads, as it could deadlock the
1408 inferior (LWP could be in the jump pad, maybe even holding the
1409 lock.) */
1410
1411 /* Do any necessary step collect actions. */
1412 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1413
fa593d66
PA
1414 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1415
219f2f23
PA
1416 /* See if we just hit a tracepoint and do its main collect
1417 actions. */
1418 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1419
7984d532
PA
1420 lwp->suspended--;
1421
1422 gdb_assert (lwp->suspended == 0);
fa593d66 1423 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1424
219f2f23
PA
1425 if (tpoint_related_event)
1426 {
1427 if (debug_threads)
87ce2a04 1428 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1429 return 1;
1430 }
1431
1432 return 0;
1433}
1434
fa593d66
PA
1435/* Convenience wrapper. Returns true if LWP is presently collecting a
1436 fast tracepoint. */
1437
1438static int
1439linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1440 struct fast_tpoint_collect_status *status)
1441{
1442 CORE_ADDR thread_area;
d86d4aaf 1443 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1444
1445 if (the_low_target.get_thread_area == NULL)
1446 return 0;
1447
1448 /* Get the thread area address. This is used to recognize which
1449 thread is which when tracing with the in-process agent library.
1450 We don't read anything from the address, and treat it as opaque;
1451 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1452 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1453 return 0;
1454
1455 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1456}
1457
1458/* The reason we resume in the caller, is because we want to be able
1459 to pass lwp->status_pending as WSTAT, and we need to clear
1460 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1461 refuses to resume. */
1462
1463static int
1464maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1465{
1466 struct thread_info *saved_inferior;
1467
1468 saved_inferior = current_inferior;
1469 current_inferior = get_lwp_thread (lwp);
1470
1471 if ((wstat == NULL
1472 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1473 && supports_fast_tracepoints ()
58b4daa5 1474 && agent_loaded_p ())
fa593d66
PA
1475 {
1476 struct fast_tpoint_collect_status status;
1477 int r;
1478
1479 if (debug_threads)
87ce2a04
DE
1480 debug_printf ("Checking whether LWP %ld needs to move out of the "
1481 "jump pad.\n",
d86d4aaf 1482 lwpid_of (current_inferior));
fa593d66
PA
1483
1484 r = linux_fast_tracepoint_collecting (lwp, &status);
1485
1486 if (wstat == NULL
1487 || (WSTOPSIG (*wstat) != SIGILL
1488 && WSTOPSIG (*wstat) != SIGFPE
1489 && WSTOPSIG (*wstat) != SIGSEGV
1490 && WSTOPSIG (*wstat) != SIGBUS))
1491 {
1492 lwp->collecting_fast_tracepoint = r;
1493
1494 if (r != 0)
1495 {
1496 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1497 {
1498 /* Haven't executed the original instruction yet.
1499 Set breakpoint there, and wait till it's hit,
1500 then single-step until exiting the jump pad. */
1501 lwp->exit_jump_pad_bkpt
1502 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1503 }
1504
1505 if (debug_threads)
87ce2a04
DE
1506 debug_printf ("Checking whether LWP %ld needs to move out of "
1507 "the jump pad...it does\n",
d86d4aaf 1508 lwpid_of (current_inferior));
0cccb683 1509 current_inferior = saved_inferior;
fa593d66
PA
1510
1511 return 1;
1512 }
1513 }
1514 else
1515 {
1516 /* If we get a synchronous signal while collecting, *and*
1517 while executing the (relocated) original instruction,
1518 reset the PC to point at the tpoint address, before
1519 reporting to GDB. Otherwise, it's an IPA lib bug: just
1520 report the signal to GDB, and pray for the best. */
1521
1522 lwp->collecting_fast_tracepoint = 0;
1523
1524 if (r != 0
1525 && (status.adjusted_insn_addr <= lwp->stop_pc
1526 && lwp->stop_pc < status.adjusted_insn_addr_end))
1527 {
1528 siginfo_t info;
1529 struct regcache *regcache;
1530
1531 /* The si_addr on a few signals references the address
1532 of the faulting instruction. Adjust that as
1533 well. */
1534 if ((WSTOPSIG (*wstat) == SIGILL
1535 || WSTOPSIG (*wstat) == SIGFPE
1536 || WSTOPSIG (*wstat) == SIGBUS
1537 || WSTOPSIG (*wstat) == SIGSEGV)
d86d4aaf 1538 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
b8e1b30e 1539 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1540 /* Final check just to make sure we don't clobber
1541 the siginfo of non-kernel-sent signals. */
1542 && (uintptr_t) info.si_addr == lwp->stop_pc)
1543 {
1544 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
d86d4aaf 1545 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_inferior),
b8e1b30e 1546 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1547 }
1548
d86d4aaf 1549 regcache = get_thread_regcache (current_inferior, 1);
fa593d66
PA
1550 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1551 lwp->stop_pc = status.tpoint_addr;
1552
1553 /* Cancel any fast tracepoint lock this thread was
1554 holding. */
1555 force_unlock_trace_buffer ();
1556 }
1557
1558 if (lwp->exit_jump_pad_bkpt != NULL)
1559 {
1560 if (debug_threads)
87ce2a04
DE
1561 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1562 "stopping all threads momentarily.\n");
fa593d66
PA
1563
1564 stop_all_lwps (1, lwp);
1565 cancel_breakpoints ();
1566
1567 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1568 lwp->exit_jump_pad_bkpt = NULL;
1569
1570 unstop_all_lwps (1, lwp);
1571
1572 gdb_assert (lwp->suspended >= 0);
1573 }
1574 }
1575 }
1576
1577 if (debug_threads)
87ce2a04
DE
1578 debug_printf ("Checking whether LWP %ld needs to move out of the "
1579 "jump pad...no\n",
d86d4aaf 1580 lwpid_of (current_inferior));
0cccb683
YQ
1581
1582 current_inferior = saved_inferior;
fa593d66
PA
1583 return 0;
1584}
1585
1586/* Enqueue one signal in the "signals to report later when out of the
1587 jump pad" list. */
1588
1589static void
1590enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1591{
1592 struct pending_signals *p_sig;
d86d4aaf 1593 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1594
1595 if (debug_threads)
87ce2a04 1596 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1597 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1598
1599 if (debug_threads)
1600 {
1601 struct pending_signals *sig;
1602
1603 for (sig = lwp->pending_signals_to_report;
1604 sig != NULL;
1605 sig = sig->prev)
87ce2a04
DE
1606 debug_printf (" Already queued %d\n",
1607 sig->signal);
fa593d66 1608
87ce2a04 1609 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1610 }
1611
1a981360
PA
1612 /* Don't enqueue non-RT signals if they are already in the deferred
1613 queue. (SIGSTOP being the easiest signal to see ending up here
1614 twice) */
1615 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1616 {
1617 struct pending_signals *sig;
1618
1619 for (sig = lwp->pending_signals_to_report;
1620 sig != NULL;
1621 sig = sig->prev)
1622 {
1623 if (sig->signal == WSTOPSIG (*wstat))
1624 {
1625 if (debug_threads)
87ce2a04
DE
1626 debug_printf ("Not requeuing already queued non-RT signal %d"
1627 " for LWP %ld\n",
1628 sig->signal,
d86d4aaf 1629 lwpid_of (thread));
1a981360
PA
1630 return;
1631 }
1632 }
1633 }
1634
fa593d66
PA
1635 p_sig = xmalloc (sizeof (*p_sig));
1636 p_sig->prev = lwp->pending_signals_to_report;
1637 p_sig->signal = WSTOPSIG (*wstat);
1638 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1639 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1640 &p_sig->info);
fa593d66
PA
1641
1642 lwp->pending_signals_to_report = p_sig;
1643}
1644
1645/* Dequeue one signal from the "signals to report later when out of
1646 the jump pad" list. */
1647
1648static int
1649dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1650{
d86d4aaf
DE
1651 struct thread_info *thread = get_lwp_thread (lwp);
1652
fa593d66
PA
1653 if (lwp->pending_signals_to_report != NULL)
1654 {
1655 struct pending_signals **p_sig;
1656
1657 p_sig = &lwp->pending_signals_to_report;
1658 while ((*p_sig)->prev != NULL)
1659 p_sig = &(*p_sig)->prev;
1660
1661 *wstat = W_STOPCODE ((*p_sig)->signal);
1662 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1663 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1664 &(*p_sig)->info);
fa593d66
PA
1665 free (*p_sig);
1666 *p_sig = NULL;
1667
1668 if (debug_threads)
87ce2a04 1669 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1670 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1671
1672 if (debug_threads)
1673 {
1674 struct pending_signals *sig;
1675
1676 for (sig = lwp->pending_signals_to_report;
1677 sig != NULL;
1678 sig = sig->prev)
87ce2a04
DE
1679 debug_printf (" Still queued %d\n",
1680 sig->signal);
fa593d66 1681
87ce2a04 1682 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1683 }
1684
1685 return 1;
1686 }
1687
1688 return 0;
1689}
1690
d50171e4
PA
1691/* Arrange for a breakpoint to be hit again later. We don't keep the
1692 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1693 will handle the current event, eventually we will resume this LWP,
1694 and this breakpoint will trap again. */
1695
1696static int
1697cancel_breakpoint (struct lwp_info *lwp)
1698{
1699 struct thread_info *saved_inferior;
d50171e4
PA
1700
1701 /* There's nothing to do if we don't support breakpoints. */
1702 if (!supports_breakpoints ())
1703 return 0;
1704
d50171e4
PA
1705 /* breakpoint_at reads from current inferior. */
1706 saved_inferior = current_inferior;
1707 current_inferior = get_lwp_thread (lwp);
1708
1709 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1710 {
1711 if (debug_threads)
87ce2a04 1712 debug_printf ("CB: Push back breakpoint for %s\n",
d86d4aaf 1713 target_pid_to_str (ptid_of (current_inferior)));
d50171e4
PA
1714
1715 /* Back up the PC if necessary. */
1716 if (the_low_target.decr_pc_after_break)
1717 {
1718 struct regcache *regcache
fc7238bb 1719 = get_thread_regcache (current_inferior, 1);
d50171e4
PA
1720 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1721 }
1722
1723 current_inferior = saved_inferior;
1724 return 1;
1725 }
1726 else
1727 {
1728 if (debug_threads)
87ce2a04
DE
1729 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1730 paddress (lwp->stop_pc),
d86d4aaf 1731 target_pid_to_str (ptid_of (current_inferior)));
d50171e4
PA
1732 }
1733
1734 current_inferior = saved_inferior;
1735 return 0;
1736}
1737
fa96cb38
PA
1738/* Do low-level handling of the event, and check if we should go on
1739 and pass it to caller code. Return the affected lwp if we are, or
1740 NULL otherwise. */
1741
1742static struct lwp_info *
1743linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1744{
1745 struct lwp_info *child;
1746 struct thread_info *thread;
1747
1748 child = find_lwp_pid (pid_to_ptid (lwpid));
1749
1750 /* If we didn't find a process, one of two things presumably happened:
1751 - A process we started and then detached from has exited. Ignore it.
1752 - A process we are controlling has forked and the new child's stop
1753 was reported to us by the kernel. Save its PID. */
1754 if (child == NULL && WIFSTOPPED (wstat))
1755 {
1756 add_to_pid_list (&stopped_pids, lwpid, wstat);
1757 return NULL;
1758 }
1759 else if (child == NULL)
1760 return NULL;
1761
1762 thread = get_lwp_thread (child);
1763
1764 child->stopped = 1;
1765
1766 child->last_status = wstat;
1767
1768 if (WIFSTOPPED (wstat))
1769 {
1770 struct process_info *proc;
1771
1772 /* Architecture-specific setup after inferior is running. This
1773 needs to happen after we have attached to the inferior and it
1774 is stopped for the first time, but before we access any
1775 inferior registers. */
1776 proc = find_process_pid (pid_of (thread));
1777 if (proc->private->new_inferior)
1778 {
1779 struct thread_info *saved_inferior;
1780
1781 saved_inferior = current_inferior;
1782 current_inferior = thread;
1783
1784 the_low_target.arch_setup ();
1785
1786 current_inferior = saved_inferior;
1787
1788 proc->private->new_inferior = 0;
1789 }
1790 }
1791
1792 /* Store the STOP_PC, with adjustment applied. This depends on the
1793 architecture being defined already (so that CHILD has a valid
1794 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1795 not). */
1796 if (WIFSTOPPED (wstat))
1797 {
1798 if (debug_threads
1799 && the_low_target.get_pc != NULL)
1800 {
1801 struct thread_info *saved_inferior;
1802 struct regcache *regcache;
1803 CORE_ADDR pc;
1804
1805 saved_inferior = current_inferior;
1806 current_inferior = thread;
1807 regcache = get_thread_regcache (current_inferior, 1);
1808 pc = (*the_low_target.get_pc) (regcache);
1809 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1810 current_inferior = saved_inferior;
1811 }
1812
1813 child->stop_pc = get_stop_pc (child);
1814 }
1815
1816 /* Fetch the possibly triggered data watchpoint info and store it in
1817 CHILD.
1818
1819 On some archs, like x86, that use debug registers to set
1820 watchpoints, it's possible that the way to know which watched
1821 address trapped, is to check the register that is used to select
1822 which address to watch. Problem is, between setting the
1823 watchpoint and reading back which data address trapped, the user
1824 may change the set of watchpoints, and, as a consequence, GDB
1825 changes the debug registers in the inferior. To avoid reading
1826 back a stale stopped-data-address when that happens, we cache in
1827 LP the fact that a watchpoint trapped, and the corresponding data
1828 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1829 changes the debug registers meanwhile, we have the cached data we
1830 can rely on. */
1831
1832 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1833 {
1834 if (the_low_target.stopped_by_watchpoint == NULL)
1835 {
1836 child->stopped_by_watchpoint = 0;
1837 }
1838 else
1839 {
1840 struct thread_info *saved_inferior;
1841
1842 saved_inferior = current_inferior;
1843 current_inferior = thread;
1844
1845 child->stopped_by_watchpoint
1846 = the_low_target.stopped_by_watchpoint ();
1847
1848 if (child->stopped_by_watchpoint)
1849 {
1850 if (the_low_target.stopped_data_address != NULL)
1851 child->stopped_data_address
1852 = the_low_target.stopped_data_address ();
1853 else
1854 child->stopped_data_address = 0;
1855 }
1856
1857 current_inferior = saved_inferior;
1858 }
1859 }
1860
1861 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1862 {
1863 linux_enable_event_reporting (lwpid);
1864 child->must_set_ptrace_flags = 0;
1865 }
1866
1867 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1868 && wstat >> 16 != 0)
1869 {
1870 handle_extended_wait (child, wstat);
1871 return NULL;
1872 }
1873
1874 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1875 && child->stop_expected)
1876 {
1877 if (debug_threads)
1878 debug_printf ("Expected stop.\n");
1879 child->stop_expected = 0;
1880
1881 if (thread->last_resume_kind == resume_stop)
1882 {
1883 /* We want to report the stop to the core. Treat the
1884 SIGSTOP as a normal event. */
1885 }
1886 else if (stopping_threads != NOT_STOPPING_THREADS)
1887 {
1888 /* Stopping threads. We don't want this SIGSTOP to end up
1889 pending in the FILTER_PTID handling below. */
1890 return NULL;
1891 }
1892 else
1893 {
1894 /* Filter out the event. */
1895 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1896 return NULL;
1897 }
1898 }
1899
1900 /* Check if the thread has exited. */
1901 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1902 && num_lwps (pid_of (thread)) > 1)
1903 {
1904 if (debug_threads)
1905 debug_printf ("LLW: %d exited.\n", lwpid);
1906
1907 /* If there is at least one more LWP, then the exit signal
1908 was not the end of the debugged application and should be
1909 ignored. */
1910 delete_lwp (child);
1911 return NULL;
1912 }
1913
1914 if (!ptid_match (ptid_of (thread), filter_ptid))
1915 {
1916 if (debug_threads)
1917 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1918 lwpid, wstat);
1919
1920 if (WIFSTOPPED (wstat))
1921 {
1922 child->status_pending_p = 1;
1923 child->status_pending = wstat;
1924
1925 if (WSTOPSIG (wstat) != SIGSTOP)
1926 {
1927 /* Cancel breakpoint hits. The breakpoint may be
1928 removed before we fetch events from this process to
1929 report to the core. It is best not to assume the
1930 moribund breakpoints heuristic always handles these
1931 cases --- it could be too many events go through to
1932 the core before this one is handled. All-stop always
1933 cancels breakpoint hits in all threads. */
1934 if (non_stop
1935 && WSTOPSIG (wstat) == SIGTRAP
1936 && cancel_breakpoint (child))
1937 {
1938 /* Throw away the SIGTRAP. */
1939 child->status_pending_p = 0;
1940
1941 if (debug_threads)
1942 debug_printf ("LLW: LWP %d hit a breakpoint while"
1943 " waiting for another process;"
1944 " cancelled it\n", lwpid);
1945 }
1946 }
1947 }
1948 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1949 {
1950 if (debug_threads)
1951 debug_printf ("LLWE: process %d exited while fetching "
1952 "event from another LWP\n", lwpid);
1953
1954 /* This was the last lwp in the process. Since events are
1955 serialized to GDB core, and we can't report this one
1956 right now, but GDB core and the other target layers will
1957 want to be notified about the exit code/signal, leave the
1958 status pending for the next time we're able to report
1959 it. */
1960 mark_lwp_dead (child, wstat);
1961 }
1962
1963 return NULL;
1964 }
1965
1966 return child;
1967}
1968
d50171e4
PA
1969/* When the event-loop is doing a step-over, this points at the thread
1970 being stepped. */
1971ptid_t step_over_bkpt;
1972
fa96cb38
PA
1973/* Wait for an event from child(ren) WAIT_PTID, and return any that
1974 match FILTER_PTID (leaving others pending). The PTIDs can be:
1975 minus_one_ptid, to specify any child; a pid PTID, specifying all
1976 lwps of a thread group; or a PTID representing a single lwp. Store
1977 the stop status through the status pointer WSTAT. OPTIONS is
1978 passed to the waitpid call. Return 0 if no event was found and
1979 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1980 was found. Return the PID of the stopped child otherwise. */
bd99dc85 1981
0d62e5e8 1982static int
fa96cb38
PA
1983linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1984 int *wstatp, int options)
0d62e5e8 1985{
d86d4aaf 1986 struct thread_info *event_thread;
d50171e4 1987 struct lwp_info *event_child, *requested_child;
fa96cb38 1988 sigset_t block_mask, prev_mask;
d50171e4 1989
fa96cb38 1990 retry:
d86d4aaf
DE
1991 /* N.B. event_thread points to the thread_info struct that contains
1992 event_child. Keep them in sync. */
1993 event_thread = NULL;
d50171e4
PA
1994 event_child = NULL;
1995 requested_child = NULL;
0d62e5e8 1996
95954743 1997 /* Check for a lwp with a pending status. */
bd99dc85 1998
fa96cb38 1999 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2000 {
d86d4aaf 2001 event_thread = (struct thread_info *)
fa96cb38 2002 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2003 if (event_thread != NULL)
2004 event_child = get_thread_lwp (event_thread);
2005 if (debug_threads && event_thread)
2006 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2007 }
fa96cb38 2008 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2009 {
fa96cb38 2010 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2011
bde24c0a 2012 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2013 && requested_child->status_pending_p
2014 && requested_child->collecting_fast_tracepoint)
2015 {
2016 enqueue_one_deferred_signal (requested_child,
2017 &requested_child->status_pending);
2018 requested_child->status_pending_p = 0;
2019 requested_child->status_pending = 0;
2020 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2021 }
2022
2023 if (requested_child->suspended
2024 && requested_child->status_pending_p)
2025 fatal ("requesting an event out of a suspended child?");
2026
d50171e4 2027 if (requested_child->status_pending_p)
d86d4aaf
DE
2028 {
2029 event_child = requested_child;
2030 event_thread = get_lwp_thread (event_child);
2031 }
0d62e5e8 2032 }
611cb4a5 2033
0d62e5e8
DJ
2034 if (event_child != NULL)
2035 {
bd99dc85 2036 if (debug_threads)
87ce2a04 2037 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2038 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2039 *wstatp = event_child->status_pending;
bd99dc85
PA
2040 event_child->status_pending_p = 0;
2041 event_child->status_pending = 0;
d86d4aaf
DE
2042 current_inferior = event_thread;
2043 return lwpid_of (event_thread);
0d62e5e8
DJ
2044 }
2045
fa96cb38
PA
2046 /* But if we don't find a pending event, we'll have to wait.
2047
2048 We only enter this loop if no process has a pending wait status.
2049 Thus any action taken in response to a wait status inside this
2050 loop is responding as soon as we detect the status, not after any
2051 pending events. */
d8301ad1 2052
fa96cb38
PA
2053 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2054 all signals while here. */
2055 sigfillset (&block_mask);
2056 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2057
2058 while (event_child == NULL)
0d62e5e8 2059 {
fa96cb38 2060 pid_t ret = 0;
0d62e5e8 2061
fa96cb38
PA
2062 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2063 quirks:
0d62e5e8 2064
fa96cb38
PA
2065 - If the thread group leader exits while other threads in the
2066 thread group still exist, waitpid(TGID, ...) hangs. That
2067 waitpid won't return an exit status until the other threads
2068 in the group are reaped.
611cb4a5 2069
fa96cb38
PA
2070 - When a non-leader thread execs, that thread just vanishes
2071 without reporting an exit (so we'd hang if we waited for it
2072 explicitly in that case). The exec event is reported to
2073 the TGID pid (although we don't currently enable exec
2074 events). */
2075 errno = 0;
2076 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2077
fa96cb38
PA
2078 if (debug_threads)
2079 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2080 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2081
fa96cb38 2082 if (ret > 0)
0d62e5e8 2083 {
89be2091 2084 if (debug_threads)
bd99dc85 2085 {
fa96cb38
PA
2086 debug_printf ("LLW: waitpid %ld received %s\n",
2087 (long) ret, status_to_str (*wstatp));
bd99dc85 2088 }
89be2091 2089
fa96cb38
PA
2090 event_child = linux_low_filter_event (filter_ptid,
2091 ret, *wstatp);
2092 if (event_child != NULL)
bd99dc85 2093 {
fa96cb38
PA
2094 /* We got an event to report to the core. */
2095 event_thread = get_lwp_thread (event_child);
2096 break;
bd99dc85 2097 }
89be2091 2098
fa96cb38
PA
2099 /* Retry until nothing comes out of waitpid. A single
2100 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2101 continue;
2102 }
2103
fa96cb38
PA
2104 /* Check for zombie thread group leaders. Those can't be reaped
2105 until all other threads in the thread group are. */
2106 check_zombie_leaders ();
2107
2108 /* If there are no resumed children left in the set of LWPs we
2109 want to wait for, bail. We can't just block in
2110 waitpid/sigsuspend, because lwps might have been left stopped
2111 in trace-stop state, and we'd be stuck forever waiting for
2112 their status to change (which would only happen if we resumed
2113 them). Even if WNOHANG is set, this return code is preferred
2114 over 0 (below), as it is more detailed. */
2115 if ((find_inferior (&all_threads,
2116 not_stopped_callback,
2117 &wait_ptid) == NULL))
a6dbe5df 2118 {
fa96cb38
PA
2119 if (debug_threads)
2120 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2121 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2122 return -1;
a6dbe5df
PA
2123 }
2124
fa96cb38
PA
2125 /* No interesting event to report to the caller. */
2126 if ((options & WNOHANG))
24a09b5f 2127 {
fa96cb38
PA
2128 if (debug_threads)
2129 debug_printf ("WNOHANG set, no event found\n");
2130
2131 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2132 return 0;
24a09b5f
DJ
2133 }
2134
fa96cb38
PA
2135 /* Block until we get an event reported with SIGCHLD. */
2136 if (debug_threads)
2137 debug_printf ("sigsuspend'ing\n");
d50171e4 2138
fa96cb38
PA
2139 sigsuspend (&prev_mask);
2140 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2141 goto retry;
2142 }
d50171e4 2143
fa96cb38 2144 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2145
fa96cb38 2146 current_inferior = event_thread;
d50171e4 2147
fa96cb38
PA
2148 /* Check for thread exit. */
2149 if (! WIFSTOPPED (*wstatp))
2150 {
2151 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2152
2153 if (debug_threads)
2154 debug_printf ("LWP %d is the last lwp of process. "
2155 "Process %ld exiting.\n",
2156 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2157 return lwpid_of (event_thread);
611cb4a5 2158 }
0d62e5e8 2159
fa96cb38
PA
2160 return lwpid_of (event_thread);
2161}
2162
2163/* Wait for an event from child(ren) PTID. PTIDs can be:
2164 minus_one_ptid, to specify any child; a pid PTID, specifying all
2165 lwps of a thread group; or a PTID representing a single lwp. Store
2166 the stop status through the status pointer WSTAT. OPTIONS is
2167 passed to the waitpid call. Return 0 if no event was found and
2168 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2169 was found. Return the PID of the stopped child otherwise. */
2170
2171static int
2172linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2173{
2174 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2175}
2176
6bf5e0ba
PA
2177/* Count the LWP's that have had events. */
2178
2179static int
2180count_events_callback (struct inferior_list_entry *entry, void *data)
2181{
d86d4aaf
DE
2182 struct thread_info *thread = (struct thread_info *) entry;
2183 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2184 int *count = data;
2185
2186 gdb_assert (count != NULL);
2187
2188 /* Count only resumed LWPs that have a SIGTRAP event pending that
2189 should be reported to GDB. */
8336d594
PA
2190 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2191 && thread->last_resume_kind != resume_stop
6bf5e0ba
PA
2192 && lp->status_pending_p
2193 && WIFSTOPPED (lp->status_pending)
2194 && WSTOPSIG (lp->status_pending) == SIGTRAP
2195 && !breakpoint_inserted_here (lp->stop_pc))
2196 (*count)++;
2197
2198 return 0;
2199}
2200
2201/* Select the LWP (if any) that is currently being single-stepped. */
2202
2203static int
2204select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2205{
d86d4aaf
DE
2206 struct thread_info *thread = (struct thread_info *) entry;
2207 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2208
8336d594
PA
2209 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2210 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2211 && lp->status_pending_p)
2212 return 1;
2213 else
2214 return 0;
2215}
2216
2217/* Select the Nth LWP that has had a SIGTRAP event that should be
2218 reported to GDB. */
2219
2220static int
2221select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2222{
d86d4aaf
DE
2223 struct thread_info *thread = (struct thread_info *) entry;
2224 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2225 int *selector = data;
2226
2227 gdb_assert (selector != NULL);
2228
2229 /* Select only resumed LWPs that have a SIGTRAP event pending. */
8336d594
PA
2230 if (thread->last_resume_kind != resume_stop
2231 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2232 && lp->status_pending_p
2233 && WIFSTOPPED (lp->status_pending)
2234 && WSTOPSIG (lp->status_pending) == SIGTRAP
2235 && !breakpoint_inserted_here (lp->stop_pc))
2236 if ((*selector)-- == 0)
2237 return 1;
2238
2239 return 0;
2240}
2241
2242static int
2243cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2244{
d86d4aaf
DE
2245 struct thread_info *thread = (struct thread_info *) entry;
2246 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2247 struct lwp_info *event_lp = data;
2248
2249 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2250 if (lp == event_lp)
2251 return 0;
2252
2253 /* If a LWP other than the LWP that we're reporting an event for has
2254 hit a GDB breakpoint (as opposed to some random trap signal),
2255 then just arrange for it to hit it again later. We don't keep
2256 the SIGTRAP status and don't forward the SIGTRAP signal to the
2257 LWP. We will handle the current event, eventually we will resume
2258 all LWPs, and this one will get its breakpoint trap again.
2259
2260 If we do not do this, then we run the risk that the user will
2261 delete or disable the breakpoint, but the LWP will have already
2262 tripped on it. */
2263
8336d594
PA
2264 if (thread->last_resume_kind != resume_stop
2265 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
6bf5e0ba
PA
2266 && lp->status_pending_p
2267 && WIFSTOPPED (lp->status_pending)
2268 && WSTOPSIG (lp->status_pending) == SIGTRAP
bdabb078
PA
2269 && !lp->stepping
2270 && !lp->stopped_by_watchpoint
6bf5e0ba
PA
2271 && cancel_breakpoint (lp))
2272 /* Throw away the SIGTRAP. */
2273 lp->status_pending_p = 0;
2274
2275 return 0;
2276}
2277
7984d532
PA
2278static void
2279linux_cancel_breakpoints (void)
2280{
d86d4aaf 2281 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
7984d532
PA
2282}
2283
6bf5e0ba
PA
2284/* Select one LWP out of those that have events pending. */
2285
2286static void
2287select_event_lwp (struct lwp_info **orig_lp)
2288{
2289 int num_events = 0;
2290 int random_selector;
d86d4aaf 2291 struct thread_info *event_thread;
6bf5e0ba
PA
2292
2293 /* Give preference to any LWP that is being single-stepped. */
d86d4aaf
DE
2294 event_thread
2295 = (struct thread_info *) find_inferior (&all_threads,
2296 select_singlestep_lwp_callback,
2297 NULL);
2298 if (event_thread != NULL)
6bf5e0ba
PA
2299 {
2300 if (debug_threads)
87ce2a04 2301 debug_printf ("SEL: Select single-step %s\n",
d86d4aaf 2302 target_pid_to_str (ptid_of (event_thread)));
6bf5e0ba
PA
2303 }
2304 else
2305 {
2306 /* No single-stepping LWP. Select one at random, out of those
2307 which have had SIGTRAP events. */
2308
2309 /* First see how many SIGTRAP events we have. */
d86d4aaf 2310 find_inferior (&all_threads, count_events_callback, &num_events);
6bf5e0ba
PA
2311
2312 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2313 random_selector = (int)
2314 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2315
2316 if (debug_threads && num_events > 1)
87ce2a04
DE
2317 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2318 num_events, random_selector);
6bf5e0ba 2319
d86d4aaf
DE
2320 event_thread
2321 = (struct thread_info *) find_inferior (&all_threads,
2322 select_event_lwp_callback,
2323 &random_selector);
6bf5e0ba
PA
2324 }
2325
d86d4aaf 2326 if (event_thread != NULL)
6bf5e0ba 2327 {
d86d4aaf
DE
2328 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2329
6bf5e0ba
PA
2330 /* Switch the event LWP. */
2331 *orig_lp = event_lp;
2332 }
2333}
2334
7984d532
PA
2335/* Decrement the suspend count of an LWP. */
2336
2337static int
2338unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2339{
d86d4aaf
DE
2340 struct thread_info *thread = (struct thread_info *) entry;
2341 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2342
2343 /* Ignore EXCEPT. */
2344 if (lwp == except)
2345 return 0;
2346
2347 lwp->suspended--;
2348
2349 gdb_assert (lwp->suspended >= 0);
2350 return 0;
2351}
2352
2353/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2354 NULL. */
2355
2356static void
2357unsuspend_all_lwps (struct lwp_info *except)
2358{
d86d4aaf 2359 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2360}
2361
fa593d66
PA
2362static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2363static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2364 void *data);
2365static int lwp_running (struct inferior_list_entry *entry, void *data);
2366static ptid_t linux_wait_1 (ptid_t ptid,
2367 struct target_waitstatus *ourstatus,
2368 int target_options);
2369
2370/* Stabilize threads (move out of jump pads).
2371
2372 If a thread is midway collecting a fast tracepoint, we need to
2373 finish the collection and move it out of the jump pad before
2374 reporting the signal.
2375
2376 This avoids recursion while collecting (when a signal arrives
2377 midway, and the signal handler itself collects), which would trash
2378 the trace buffer. In case the user set a breakpoint in a signal
2379 handler, this avoids the backtrace showing the jump pad, etc..
2380 Most importantly, there are certain things we can't do safely if
2381 threads are stopped in a jump pad (or in its callee's). For
2382 example:
2383
2384 - starting a new trace run. A thread still collecting the
2385 previous run, could trash the trace buffer when resumed. The trace
2386 buffer control structures would have been reset but the thread had
2387 no way to tell. The thread could even midway memcpy'ing to the
2388 buffer, which would mean that when resumed, it would clobber the
2389 trace buffer that had been set for a new run.
2390
2391 - we can't rewrite/reuse the jump pads for new tracepoints
2392 safely. Say you do tstart while a thread is stopped midway while
2393 collecting. When the thread is later resumed, it finishes the
2394 collection, and returns to the jump pad, to execute the original
2395 instruction that was under the tracepoint jump at the time the
2396 older run had been started. If the jump pad had been rewritten
2397 since for something else in the new run, the thread would now
2398 execute the wrong / random instructions. */
2399
2400static void
2401linux_stabilize_threads (void)
2402{
2403 struct thread_info *save_inferior;
d86d4aaf 2404 struct thread_info *thread_stuck;
fa593d66 2405
d86d4aaf
DE
2406 thread_stuck
2407 = (struct thread_info *) find_inferior (&all_threads,
2408 stuck_in_jump_pad_callback,
2409 NULL);
2410 if (thread_stuck != NULL)
fa593d66 2411 {
b4d51a55 2412 if (debug_threads)
87ce2a04 2413 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2414 lwpid_of (thread_stuck));
fa593d66
PA
2415 return;
2416 }
2417
2418 save_inferior = current_inferior;
2419
2420 stabilizing_threads = 1;
2421
2422 /* Kick 'em all. */
d86d4aaf 2423 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2424
2425 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2426 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2427 {
2428 struct target_waitstatus ourstatus;
2429 struct lwp_info *lwp;
fa593d66
PA
2430 int wstat;
2431
2432 /* Note that we go through the full wait even loop. While
2433 moving threads out of jump pad, we need to be able to step
2434 over internal breakpoints and such. */
32fcada3 2435 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2436
2437 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2438 {
2439 lwp = get_thread_lwp (current_inferior);
2440
2441 /* Lock it. */
2442 lwp->suspended++;
2443
a493e3e2 2444 if (ourstatus.value.sig != GDB_SIGNAL_0
fa593d66
PA
2445 || current_inferior->last_resume_kind == resume_stop)
2446 {
2ea28649 2447 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2448 enqueue_one_deferred_signal (lwp, &wstat);
2449 }
2450 }
2451 }
2452
d86d4aaf 2453 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2454
2455 stabilizing_threads = 0;
2456
2457 current_inferior = save_inferior;
2458
b4d51a55 2459 if (debug_threads)
fa593d66 2460 {
d86d4aaf
DE
2461 thread_stuck
2462 = (struct thread_info *) find_inferior (&all_threads,
2463 stuck_in_jump_pad_callback,
2464 NULL);
2465 if (thread_stuck != NULL)
87ce2a04 2466 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2467 lwpid_of (thread_stuck));
fa593d66
PA
2468 }
2469}
2470
0d62e5e8 2471/* Wait for process, returns status. */
da6d8c04 2472
95954743
PA
2473static ptid_t
2474linux_wait_1 (ptid_t ptid,
2475 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2476{
e5f1222d 2477 int w;
fc7238bb 2478 struct lwp_info *event_child;
bd99dc85 2479 int options;
bd99dc85 2480 int pid;
6bf5e0ba
PA
2481 int step_over_finished;
2482 int bp_explains_trap;
2483 int maybe_internal_trap;
2484 int report_to_gdb;
219f2f23 2485 int trace_event;
c2d6af84 2486 int in_step_range;
bd99dc85 2487
87ce2a04
DE
2488 if (debug_threads)
2489 {
2490 debug_enter ();
2491 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2492 }
2493
bd99dc85
PA
2494 /* Translate generic target options into linux options. */
2495 options = __WALL;
2496 if (target_options & TARGET_WNOHANG)
2497 options |= WNOHANG;
0d62e5e8
DJ
2498
2499retry:
fa593d66
PA
2500 bp_explains_trap = 0;
2501 trace_event = 0;
c2d6af84 2502 in_step_range = 0;
bd99dc85
PA
2503 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2504
0d62e5e8
DJ
2505 /* If we were only supposed to resume one thread, only wait for
2506 that thread - if it's still alive. If it died, however - which
2507 can happen if we're coming from the thread death case below -
2508 then we need to make sure we restart the other threads. We could
2509 pick a thread at random or restart all; restarting all is less
2510 arbitrary. */
95954743
PA
2511 if (!non_stop
2512 && !ptid_equal (cont_thread, null_ptid)
2513 && !ptid_equal (cont_thread, minus_one_ptid))
0d62e5e8 2514 {
fc7238bb
PA
2515 struct thread_info *thread;
2516
bd99dc85
PA
2517 thread = (struct thread_info *) find_inferior_id (&all_threads,
2518 cont_thread);
0d62e5e8
DJ
2519
2520 /* No stepping, no signal - unless one is pending already, of course. */
bd99dc85 2521 if (thread == NULL)
64386c31
DJ
2522 {
2523 struct thread_resume resume_info;
95954743 2524 resume_info.thread = minus_one_ptid;
bd99dc85
PA
2525 resume_info.kind = resume_continue;
2526 resume_info.sig = 0;
2bd7c093 2527 linux_resume (&resume_info, 1);
64386c31 2528 }
bd99dc85 2529 else
95954743 2530 ptid = cont_thread;
0d62e5e8 2531 }
da6d8c04 2532
6bf5e0ba
PA
2533 if (ptid_equal (step_over_bkpt, null_ptid))
2534 pid = linux_wait_for_event (ptid, &w, options);
2535 else
2536 {
2537 if (debug_threads)
87ce2a04
DE
2538 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2539 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2540 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2541 }
2542
fa96cb38 2543 if (pid == 0)
87ce2a04 2544 {
fa96cb38
PA
2545 gdb_assert (target_options & TARGET_WNOHANG);
2546
87ce2a04
DE
2547 if (debug_threads)
2548 {
fa96cb38
PA
2549 debug_printf ("linux_wait_1 ret = null_ptid, "
2550 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2551 debug_exit ();
2552 }
fa96cb38
PA
2553
2554 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2555 return null_ptid;
2556 }
fa96cb38
PA
2557 else if (pid == -1)
2558 {
2559 if (debug_threads)
2560 {
2561 debug_printf ("linux_wait_1 ret = null_ptid, "
2562 "TARGET_WAITKIND_NO_RESUMED\n");
2563 debug_exit ();
2564 }
bd99dc85 2565
fa96cb38
PA
2566 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2567 return null_ptid;
2568 }
0d62e5e8 2569
fa96cb38 2570 event_child = get_thread_lwp (current_inferior);
0d62e5e8 2571
fa96cb38
PA
2572 /* linux_wait_for_event only returns an exit status for the last
2573 child of a process. Report it. */
2574 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2575 {
fa96cb38 2576 if (WIFEXITED (w))
0d62e5e8 2577 {
fa96cb38
PA
2578 ourstatus->kind = TARGET_WAITKIND_EXITED;
2579 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2580
fa96cb38 2581 if (debug_threads)
bd99dc85 2582 {
fa96cb38
PA
2583 debug_printf ("linux_wait_1 ret = %s, exited with "
2584 "retcode %d\n",
2585 target_pid_to_str (ptid_of (current_inferior)),
2586 WEXITSTATUS (w));
2587 debug_exit ();
bd99dc85 2588 }
fa96cb38
PA
2589 }
2590 else
2591 {
2592 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2593 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2594
fa96cb38
PA
2595 if (debug_threads)
2596 {
2597 debug_printf ("linux_wait_1 ret = %s, terminated with "
2598 "signal %d\n",
2599 target_pid_to_str (ptid_of (current_inferior)),
2600 WTERMSIG (w));
2601 debug_exit ();
2602 }
0d62e5e8 2603 }
fa96cb38
PA
2604
2605 return ptid_of (current_inferior);
da6d8c04
DJ
2606 }
2607
6bf5e0ba
PA
2608 /* If this event was not handled before, and is not a SIGTRAP, we
2609 report it. SIGILL and SIGSEGV are also treated as traps in case
2610 a breakpoint is inserted at the current PC. If this target does
2611 not support internal breakpoints at all, we also report the
2612 SIGTRAP without further processing; it's of no concern to us. */
2613 maybe_internal_trap
2614 = (supports_breakpoints ()
2615 && (WSTOPSIG (w) == SIGTRAP
2616 || ((WSTOPSIG (w) == SIGILL
2617 || WSTOPSIG (w) == SIGSEGV)
2618 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2619
2620 if (maybe_internal_trap)
2621 {
2622 /* Handle anything that requires bookkeeping before deciding to
2623 report the event or continue waiting. */
2624
2625 /* First check if we can explain the SIGTRAP with an internal
2626 breakpoint, or if we should possibly report the event to GDB.
2627 Do this before anything that may remove or insert a
2628 breakpoint. */
2629 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2630
2631 /* We have a SIGTRAP, possibly a step-over dance has just
2632 finished. If so, tweak the state machine accordingly,
2633 reinsert breakpoints and delete any reinsert (software
2634 single-step) breakpoints. */
2635 step_over_finished = finish_step_over (event_child);
2636
2637 /* Now invoke the callbacks of any internal breakpoints there. */
2638 check_breakpoints (event_child->stop_pc);
2639
219f2f23
PA
2640 /* Handle tracepoint data collecting. This may overflow the
2641 trace buffer, and cause a tracing stop, removing
2642 breakpoints. */
2643 trace_event = handle_tracepoints (event_child);
2644
6bf5e0ba
PA
2645 if (bp_explains_trap)
2646 {
2647 /* If we stepped or ran into an internal breakpoint, we've
2648 already handled it. So next time we resume (from this
2649 PC), we should step over it. */
2650 if (debug_threads)
87ce2a04 2651 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2652
8b07ae33
PA
2653 if (breakpoint_here (event_child->stop_pc))
2654 event_child->need_step_over = 1;
6bf5e0ba
PA
2655 }
2656 }
2657 else
2658 {
2659 /* We have some other signal, possibly a step-over dance was in
2660 progress, and it should be cancelled too. */
2661 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2662 }
2663
2664 /* We have all the data we need. Either report the event to GDB, or
2665 resume threads and keep waiting for more. */
2666
2667 /* If we're collecting a fast tracepoint, finish the collection and
2668 move out of the jump pad before delivering a signal. See
2669 linux_stabilize_threads. */
2670
2671 if (WIFSTOPPED (w)
2672 && WSTOPSIG (w) != SIGTRAP
2673 && supports_fast_tracepoints ()
58b4daa5 2674 && agent_loaded_p ())
fa593d66
PA
2675 {
2676 if (debug_threads)
87ce2a04
DE
2677 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2678 "to defer or adjust it.\n",
d86d4aaf 2679 WSTOPSIG (w), lwpid_of (current_inferior));
fa593d66
PA
2680
2681 /* Allow debugging the jump pad itself. */
2682 if (current_inferior->last_resume_kind != resume_step
2683 && maybe_move_out_of_jump_pad (event_child, &w))
2684 {
2685 enqueue_one_deferred_signal (event_child, &w);
2686
2687 if (debug_threads)
87ce2a04 2688 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
d86d4aaf 2689 WSTOPSIG (w), lwpid_of (current_inferior));
fa593d66
PA
2690
2691 linux_resume_one_lwp (event_child, 0, 0, NULL);
2692 goto retry;
2693 }
2694 }
219f2f23 2695
fa593d66
PA
2696 if (event_child->collecting_fast_tracepoint)
2697 {
2698 if (debug_threads)
87ce2a04
DE
2699 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2700 "Check if we're already there.\n",
d86d4aaf 2701 lwpid_of (current_inferior),
87ce2a04 2702 event_child->collecting_fast_tracepoint);
fa593d66
PA
2703
2704 trace_event = 1;
2705
2706 event_child->collecting_fast_tracepoint
2707 = linux_fast_tracepoint_collecting (event_child, NULL);
2708
2709 if (event_child->collecting_fast_tracepoint != 1)
2710 {
2711 /* No longer need this breakpoint. */
2712 if (event_child->exit_jump_pad_bkpt != NULL)
2713 {
2714 if (debug_threads)
87ce2a04
DE
2715 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2716 "stopping all threads momentarily.\n");
fa593d66
PA
2717
2718 /* Other running threads could hit this breakpoint.
2719 We don't handle moribund locations like GDB does,
2720 instead we always pause all threads when removing
2721 breakpoints, so that any step-over or
2722 decr_pc_after_break adjustment is always taken
2723 care of while the breakpoint is still
2724 inserted. */
2725 stop_all_lwps (1, event_child);
2726 cancel_breakpoints ();
2727
2728 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2729 event_child->exit_jump_pad_bkpt = NULL;
2730
2731 unstop_all_lwps (1, event_child);
2732
2733 gdb_assert (event_child->suspended >= 0);
2734 }
2735 }
2736
2737 if (event_child->collecting_fast_tracepoint == 0)
2738 {
2739 if (debug_threads)
87ce2a04
DE
2740 debug_printf ("fast tracepoint finished "
2741 "collecting successfully.\n");
fa593d66
PA
2742
2743 /* We may have a deferred signal to report. */
2744 if (dequeue_one_deferred_signal (event_child, &w))
2745 {
2746 if (debug_threads)
87ce2a04 2747 debug_printf ("dequeued one signal.\n");
fa593d66 2748 }
3c11dd79 2749 else
fa593d66 2750 {
3c11dd79 2751 if (debug_threads)
87ce2a04 2752 debug_printf ("no deferred signals.\n");
fa593d66
PA
2753
2754 if (stabilizing_threads)
2755 {
2756 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 2757 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
2758
2759 if (debug_threads)
2760 {
2761 debug_printf ("linux_wait_1 ret = %s, stopped "
2762 "while stabilizing threads\n",
d86d4aaf 2763 target_pid_to_str (ptid_of (current_inferior)));
87ce2a04
DE
2764 debug_exit ();
2765 }
2766
d86d4aaf 2767 return ptid_of (current_inferior);
fa593d66
PA
2768 }
2769 }
2770 }
6bf5e0ba
PA
2771 }
2772
e471f25b
PA
2773 /* Check whether GDB would be interested in this event. */
2774
2775 /* If GDB is not interested in this signal, don't stop other
2776 threads, and don't report it to GDB. Just resume the inferior
2777 right away. We do this for threading-related signals as well as
2778 any that GDB specifically requested we ignore. But never ignore
2779 SIGSTOP if we sent it ourselves, and do not ignore signals when
2780 stepping - they may require special handling to skip the signal
2781 handler. */
2782 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2783 thread library? */
2784 if (WIFSTOPPED (w)
2785 && current_inferior->last_resume_kind != resume_step
2786 && (
1a981360 2787#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
e471f25b
PA
2788 (current_process ()->private->thread_db != NULL
2789 && (WSTOPSIG (w) == __SIGRTMIN
2790 || WSTOPSIG (w) == __SIGRTMIN + 1))
2791 ||
2792#endif
2ea28649 2793 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b
PA
2794 && !(WSTOPSIG (w) == SIGSTOP
2795 && current_inferior->last_resume_kind == resume_stop))))
2796 {
2797 siginfo_t info, *info_p;
2798
2799 if (debug_threads)
87ce2a04 2800 debug_printf ("Ignored signal %d for LWP %ld.\n",
d86d4aaf 2801 WSTOPSIG (w), lwpid_of (current_inferior));
e471f25b 2802
d86d4aaf 2803 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_inferior),
b8e1b30e 2804 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
2805 info_p = &info;
2806 else
2807 info_p = NULL;
2808 linux_resume_one_lwp (event_child, event_child->stepping,
2809 WSTOPSIG (w), info_p);
2810 goto retry;
2811 }
2812
c2d6af84
PA
2813 /* Note that all addresses are always "out of the step range" when
2814 there's no range to begin with. */
2815 in_step_range = lwp_in_step_range (event_child);
2816
2817 /* If GDB wanted this thread to single step, and the thread is out
2818 of the step range, we always want to report the SIGTRAP, and let
2819 GDB handle it. Watchpoints should always be reported. So should
2820 signals we can't explain. A SIGTRAP we can't explain could be a
2821 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2822 do, we're be able to handle GDB breakpoints on top of internal
2823 breakpoints, by handling the internal breakpoint and still
2824 reporting the event to GDB. If we don't, we're out of luck, GDB
2825 won't see the breakpoint hit. */
6bf5e0ba 2826 report_to_gdb = (!maybe_internal_trap
c2d6af84
PA
2827 || (current_inferior->last_resume_kind == resume_step
2828 && !in_step_range)
6bf5e0ba 2829 || event_child->stopped_by_watchpoint
c2d6af84 2830 || (!step_over_finished && !in_step_range
493e2a69 2831 && !bp_explains_trap && !trace_event)
9f3a5c85 2832 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5
SS
2833 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2834 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2835
2836 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
2837
2838 /* We found no reason GDB would want us to stop. We either hit one
2839 of our own breakpoints, or finished an internal step GDB
2840 shouldn't know about. */
2841 if (!report_to_gdb)
2842 {
2843 if (debug_threads)
2844 {
2845 if (bp_explains_trap)
87ce2a04 2846 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2847 if (step_over_finished)
87ce2a04 2848 debug_printf ("Step-over finished.\n");
219f2f23 2849 if (trace_event)
87ce2a04 2850 debug_printf ("Tracepoint event.\n");
c2d6af84 2851 if (lwp_in_step_range (event_child))
87ce2a04
DE
2852 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2853 paddress (event_child->stop_pc),
2854 paddress (event_child->step_range_start),
2855 paddress (event_child->step_range_end));
6bf5e0ba
PA
2856 }
2857
2858 /* We're not reporting this breakpoint to GDB, so apply the
2859 decr_pc_after_break adjustment to the inferior's regcache
2860 ourselves. */
2861
2862 if (the_low_target.set_pc != NULL)
2863 {
2864 struct regcache *regcache
d86d4aaf 2865 = get_thread_regcache (current_inferior, 1);
6bf5e0ba
PA
2866 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2867 }
2868
7984d532
PA
2869 /* We may have finished stepping over a breakpoint. If so,
2870 we've stopped and suspended all LWPs momentarily except the
2871 stepping one. This is where we resume them all again. We're
2872 going to keep waiting, so use proceed, which handles stepping
2873 over the next breakpoint. */
6bf5e0ba 2874 if (debug_threads)
87ce2a04 2875 debug_printf ("proceeding all threads.\n");
7984d532
PA
2876
2877 if (step_over_finished)
2878 unsuspend_all_lwps (event_child);
2879
6bf5e0ba
PA
2880 proceed_all_lwps ();
2881 goto retry;
2882 }
2883
2884 if (debug_threads)
2885 {
8336d594 2886 if (current_inferior->last_resume_kind == resume_step)
c2d6af84
PA
2887 {
2888 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 2889 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 2890 else if (!lwp_in_step_range (event_child))
87ce2a04 2891 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 2892 }
6bf5e0ba 2893 if (event_child->stopped_by_watchpoint)
87ce2a04 2894 debug_printf ("Stopped by watchpoint.\n");
8b07ae33 2895 if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 2896 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 2897 if (debug_threads)
87ce2a04 2898 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
2899 }
2900
2901 /* Alright, we're going to report a stop. */
2902
fa593d66 2903 if (!non_stop && !stabilizing_threads)
6bf5e0ba
PA
2904 {
2905 /* In all-stop, stop all threads. */
7984d532 2906 stop_all_lwps (0, NULL);
6bf5e0ba
PA
2907
2908 /* If we're not waiting for a specific LWP, choose an event LWP
2909 from among those that have had events. Giving equal priority
2910 to all LWPs that have had events helps prevent
2911 starvation. */
2912 if (ptid_equal (ptid, minus_one_ptid))
2913 {
2914 event_child->status_pending_p = 1;
2915 event_child->status_pending = w;
2916
2917 select_event_lwp (&event_child);
2918
ee1e2d4f
DE
2919 /* current_inferior and event_child must stay in sync. */
2920 current_inferior = get_lwp_thread (event_child);
2921
6bf5e0ba
PA
2922 event_child->status_pending_p = 0;
2923 w = event_child->status_pending;
2924 }
2925
2926 /* Now that we've selected our final event LWP, cancel any
2927 breakpoints in other LWPs that have hit a GDB breakpoint.
2928 See the comment in cancel_breakpoints_callback to find out
2929 why. */
d86d4aaf 2930 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
fa593d66 2931
c03e6ccc
YQ
2932 /* If we were going a step-over, all other threads but the stepping one
2933 had been paused in start_step_over, with their suspend counts
2934 incremented. We don't want to do a full unstop/unpause, because we're
2935 in all-stop mode (so we want threads stopped), but we still need to
2936 unsuspend the other threads, to decrement their `suspended' count
2937 back. */
2938 if (step_over_finished)
2939 unsuspend_all_lwps (event_child);
2940
fa593d66
PA
2941 /* Stabilize threads (move out of jump pads). */
2942 stabilize_threads ();
6bf5e0ba
PA
2943 }
2944 else
2945 {
2946 /* If we just finished a step-over, then all threads had been
2947 momentarily paused. In all-stop, that's fine, we want
2948 threads stopped by now anyway. In non-stop, we need to
2949 re-resume threads that GDB wanted to be running. */
2950 if (step_over_finished)
7984d532 2951 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
2952 }
2953
5b1c542e 2954 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 2955
8336d594
PA
2956 if (current_inferior->last_resume_kind == resume_stop
2957 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
2958 {
2959 /* A thread that has been requested to stop by GDB with vCont;t,
2960 and it stopped cleanly, so report as SIG0. The use of
2961 SIGSTOP is an implementation detail. */
a493e3e2 2962 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 2963 }
8336d594
PA
2964 else if (current_inferior->last_resume_kind == resume_stop
2965 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
2966 {
2967 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 2968 but, it stopped for other reasons. */
2ea28649 2969 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2970 }
2971 else
2972 {
2ea28649 2973 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
2974 }
2975
d50171e4
PA
2976 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2977
bd99dc85 2978 if (debug_threads)
87ce2a04
DE
2979 {
2980 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
d86d4aaf 2981 target_pid_to_str (ptid_of (current_inferior)),
87ce2a04
DE
2982 ourstatus->kind, ourstatus->value.sig);
2983 debug_exit ();
2984 }
bd99dc85 2985
d86d4aaf 2986 return ptid_of (current_inferior);
bd99dc85
PA
2987}
2988
2989/* Get rid of any pending event in the pipe. */
2990static void
2991async_file_flush (void)
2992{
2993 int ret;
2994 char buf;
2995
2996 do
2997 ret = read (linux_event_pipe[0], &buf, 1);
2998 while (ret >= 0 || (ret == -1 && errno == EINTR));
2999}
3000
3001/* Put something in the pipe, so the event loop wakes up. */
3002static void
3003async_file_mark (void)
3004{
3005 int ret;
3006
3007 async_file_flush ();
3008
3009 do
3010 ret = write (linux_event_pipe[1], "+", 1);
3011 while (ret == 0 || (ret == -1 && errno == EINTR));
3012
3013 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3014 be awakened anyway. */
3015}
3016
95954743
PA
3017static ptid_t
3018linux_wait (ptid_t ptid,
3019 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3020{
95954743 3021 ptid_t event_ptid;
bd99dc85 3022
bd99dc85
PA
3023 /* Flush the async file first. */
3024 if (target_is_async_p ())
3025 async_file_flush ();
3026
95954743 3027 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
bd99dc85
PA
3028
3029 /* If at least one stop was reported, there may be more. A single
3030 SIGCHLD can signal more than one child stop. */
3031 if (target_is_async_p ()
3032 && (target_options & TARGET_WNOHANG) != 0
95954743 3033 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3034 async_file_mark ();
3035
3036 return event_ptid;
da6d8c04
DJ
3037}
3038
c5f62d5f 3039/* Send a signal to an LWP. */
fd500816
DJ
3040
3041static int
a1928bad 3042kill_lwp (unsigned long lwpid, int signo)
fd500816 3043{
c5f62d5f
DE
3044 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3045 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3046
c5f62d5f
DE
3047#ifdef __NR_tkill
3048 {
3049 static int tkill_failed;
fd500816 3050
c5f62d5f
DE
3051 if (!tkill_failed)
3052 {
3053 int ret;
3054
3055 errno = 0;
3056 ret = syscall (__NR_tkill, lwpid, signo);
3057 if (errno != ENOSYS)
3058 return ret;
3059 tkill_failed = 1;
3060 }
3061 }
fd500816
DJ
3062#endif
3063
3064 return kill (lwpid, signo);
3065}
3066
964e4306
PA
3067void
3068linux_stop_lwp (struct lwp_info *lwp)
3069{
3070 send_sigstop (lwp);
3071}
3072
0d62e5e8 3073static void
02fc4de7 3074send_sigstop (struct lwp_info *lwp)
0d62e5e8 3075{
bd99dc85 3076 int pid;
0d62e5e8 3077
d86d4aaf 3078 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3079
0d62e5e8
DJ
3080 /* If we already have a pending stop signal for this process, don't
3081 send another. */
54a0b537 3082 if (lwp->stop_expected)
0d62e5e8 3083 {
ae13219e 3084 if (debug_threads)
87ce2a04 3085 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3086
0d62e5e8
DJ
3087 return;
3088 }
3089
3090 if (debug_threads)
87ce2a04 3091 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3092
d50171e4 3093 lwp->stop_expected = 1;
bd99dc85 3094 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3095}
3096
7984d532
PA
3097static int
3098send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3099{
d86d4aaf
DE
3100 struct thread_info *thread = (struct thread_info *) entry;
3101 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3102
7984d532
PA
3103 /* Ignore EXCEPT. */
3104 if (lwp == except)
3105 return 0;
3106
02fc4de7 3107 if (lwp->stopped)
7984d532 3108 return 0;
02fc4de7
PA
3109
3110 send_sigstop (lwp);
7984d532
PA
3111 return 0;
3112}
3113
3114/* Increment the suspend count of an LWP, and stop it, if not stopped
3115 yet. */
3116static int
3117suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3118 void *except)
3119{
d86d4aaf
DE
3120 struct thread_info *thread = (struct thread_info *) entry;
3121 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3122
3123 /* Ignore EXCEPT. */
3124 if (lwp == except)
3125 return 0;
3126
3127 lwp->suspended++;
3128
3129 return send_sigstop_callback (entry, except);
02fc4de7
PA
3130}
3131
95954743
PA
3132static void
3133mark_lwp_dead (struct lwp_info *lwp, int wstat)
3134{
3135 /* It's dead, really. */
3136 lwp->dead = 1;
3137
3138 /* Store the exit status for later. */
3139 lwp->status_pending_p = 1;
3140 lwp->status_pending = wstat;
3141
95954743
PA
3142 /* Prevent trying to stop it. */
3143 lwp->stopped = 1;
3144
3145 /* No further stops are expected from a dead lwp. */
3146 lwp->stop_expected = 0;
3147}
3148
fa96cb38
PA
3149/* Wait for all children to stop for the SIGSTOPs we just queued. */
3150
0d62e5e8 3151static void
fa96cb38 3152wait_for_sigstop (void)
0d62e5e8 3153{
bd99dc85 3154 struct thread_info *saved_inferior;
95954743 3155 ptid_t saved_tid;
fa96cb38
PA
3156 int wstat;
3157 int ret;
0d62e5e8
DJ
3158
3159 saved_inferior = current_inferior;
bd99dc85 3160 if (saved_inferior != NULL)
80894984 3161 saved_tid = saved_inferior->entry.id;
bd99dc85 3162 else
95954743 3163 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3164
d50171e4 3165 if (debug_threads)
fa96cb38 3166 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3167
fa96cb38
PA
3168 /* Passing NULL_PTID as filter indicates we want all events to be
3169 left pending. Eventually this returns when there are no
3170 unwaited-for children left. */
3171 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3172 &wstat, __WALL);
3173 gdb_assert (ret == -1);
0d62e5e8 3174
bd99dc85 3175 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
0d62e5e8
DJ
3176 current_inferior = saved_inferior;
3177 else
3178 {
3179 if (debug_threads)
87ce2a04 3180 debug_printf ("Previously current thread died.\n");
0d62e5e8 3181
bd99dc85
PA
3182 if (non_stop)
3183 {
3184 /* We can't change the current inferior behind GDB's back,
3185 otherwise, a subsequent command may apply to the wrong
3186 process. */
3187 current_inferior = NULL;
3188 }
3189 else
3190 {
3191 /* Set a valid thread as current. */
3192 set_desired_inferior (0);
3193 }
0d62e5e8
DJ
3194 }
3195}
3196
fa593d66
PA
3197/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3198 move it out, because we need to report the stop event to GDB. For
3199 example, if the user puts a breakpoint in the jump pad, it's
3200 because she wants to debug it. */
3201
3202static int
3203stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3204{
d86d4aaf
DE
3205 struct thread_info *thread = (struct thread_info *) entry;
3206 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3207
3208 gdb_assert (lwp->suspended == 0);
3209 gdb_assert (lwp->stopped);
3210
3211 /* Allow debugging the jump pad, gdb_collect, etc.. */
3212 return (supports_fast_tracepoints ()
58b4daa5 3213 && agent_loaded_p ()
fa593d66
PA
3214 && (gdb_breakpoint_here (lwp->stop_pc)
3215 || lwp->stopped_by_watchpoint
3216 || thread->last_resume_kind == resume_step)
3217 && linux_fast_tracepoint_collecting (lwp, NULL));
3218}
3219
3220static void
3221move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3222{
d86d4aaf
DE
3223 struct thread_info *thread = (struct thread_info *) entry;
3224 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3225 int *wstat;
3226
3227 gdb_assert (lwp->suspended == 0);
3228 gdb_assert (lwp->stopped);
3229
3230 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3231
3232 /* Allow debugging the jump pad, gdb_collect, etc. */
3233 if (!gdb_breakpoint_here (lwp->stop_pc)
3234 && !lwp->stopped_by_watchpoint
3235 && thread->last_resume_kind != resume_step
3236 && maybe_move_out_of_jump_pad (lwp, wstat))
3237 {
3238 if (debug_threads)
87ce2a04 3239 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3240 lwpid_of (thread));
fa593d66
PA
3241
3242 if (wstat)
3243 {
3244 lwp->status_pending_p = 0;
3245 enqueue_one_deferred_signal (lwp, wstat);
3246
3247 if (debug_threads)
87ce2a04
DE
3248 debug_printf ("Signal %d for LWP %ld deferred "
3249 "(in jump pad)\n",
d86d4aaf 3250 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3251 }
3252
3253 linux_resume_one_lwp (lwp, 0, 0, NULL);
3254 }
3255 else
3256 lwp->suspended++;
3257}
3258
3259static int
3260lwp_running (struct inferior_list_entry *entry, void *data)
3261{
d86d4aaf
DE
3262 struct thread_info *thread = (struct thread_info *) entry;
3263 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3264
3265 if (lwp->dead)
3266 return 0;
3267 if (lwp->stopped)
3268 return 0;
3269 return 1;
3270}
3271
7984d532
PA
3272/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3273 If SUSPEND, then also increase the suspend count of every LWP,
3274 except EXCEPT. */
3275
0d62e5e8 3276static void
7984d532 3277stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3278{
bde24c0a
PA
3279 /* Should not be called recursively. */
3280 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3281
87ce2a04
DE
3282 if (debug_threads)
3283 {
3284 debug_enter ();
3285 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3286 suspend ? "stop-and-suspend" : "stop",
3287 except != NULL
d86d4aaf 3288 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3289 : "none");
3290 }
3291
bde24c0a
PA
3292 stopping_threads = (suspend
3293 ? STOPPING_AND_SUSPENDING_THREADS
3294 : STOPPING_THREADS);
7984d532
PA
3295
3296 if (suspend)
d86d4aaf 3297 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3298 else
d86d4aaf 3299 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3300 wait_for_sigstop ();
bde24c0a 3301 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3302
3303 if (debug_threads)
3304 {
3305 debug_printf ("stop_all_lwps done, setting stopping_threads "
3306 "back to !stopping\n");
3307 debug_exit ();
3308 }
0d62e5e8
DJ
3309}
3310
da6d8c04
DJ
3311/* Resume execution of the inferior process.
3312 If STEP is nonzero, single-step it.
3313 If SIGNAL is nonzero, give it that signal. */
3314
ce3a066d 3315static void
2acc282a 3316linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 3317 int step, int signal, siginfo_t *info)
da6d8c04 3318{
d86d4aaf 3319 struct thread_info *thread = get_lwp_thread (lwp);
0d62e5e8 3320 struct thread_info *saved_inferior;
fa593d66 3321 int fast_tp_collecting;
0d62e5e8 3322
54a0b537 3323 if (lwp->stopped == 0)
0d62e5e8
DJ
3324 return;
3325
fa593d66
PA
3326 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3327
3328 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3329
219f2f23
PA
3330 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3331 user used the "jump" command, or "set $pc = foo"). */
3332 if (lwp->stop_pc != get_pc (lwp))
3333 {
3334 /* Collecting 'while-stepping' actions doesn't make sense
3335 anymore. */
d86d4aaf 3336 release_while_stepping_state_list (thread);
219f2f23
PA
3337 }
3338
0d62e5e8
DJ
3339 /* If we have pending signals or status, and a new signal, enqueue the
3340 signal. Also enqueue the signal if we are waiting to reinsert a
3341 breakpoint; it will be picked up again below. */
3342 if (signal != 0
fa593d66
PA
3343 && (lwp->status_pending_p
3344 || lwp->pending_signals != NULL
3345 || lwp->bp_reinsert != 0
3346 || fast_tp_collecting))
0d62e5e8
DJ
3347 {
3348 struct pending_signals *p_sig;
bca929d3 3349 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3350 p_sig->prev = lwp->pending_signals;
0d62e5e8 3351 p_sig->signal = signal;
32ca6d61
DJ
3352 if (info == NULL)
3353 memset (&p_sig->info, 0, sizeof (siginfo_t));
3354 else
3355 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3356 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3357 }
3358
d50171e4
PA
3359 if (lwp->status_pending_p)
3360 {
3361 if (debug_threads)
87ce2a04
DE
3362 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3363 " has pending status\n",
d86d4aaf 3364 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3365 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3366 return;
3367 }
0d62e5e8
DJ
3368
3369 saved_inferior = current_inferior;
d86d4aaf 3370 current_inferior = thread;
0d62e5e8
DJ
3371
3372 if (debug_threads)
87ce2a04 3373 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3374 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3375 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3376
3377 /* This bit needs some thinking about. If we get a signal that
3378 we must report while a single-step reinsert is still pending,
3379 we often end up resuming the thread. It might be better to
3380 (ew) allow a stack of pending events; then we could be sure that
3381 the reinsert happened right away and not lose any signals.
3382
3383 Making this stack would also shrink the window in which breakpoints are
54a0b537 3384 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3385 complete correctness, so it won't solve that problem. It may be
3386 worthwhile just to solve this one, however. */
54a0b537 3387 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3388 {
3389 if (debug_threads)
87ce2a04
DE
3390 debug_printf (" pending reinsert at 0x%s\n",
3391 paddress (lwp->bp_reinsert));
d50171e4 3392
85e00e85 3393 if (can_hardware_single_step ())
d50171e4 3394 {
fa593d66
PA
3395 if (fast_tp_collecting == 0)
3396 {
3397 if (step == 0)
3398 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3399 if (lwp->suspended)
3400 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3401 lwp->suspended);
3402 }
d50171e4
PA
3403
3404 step = 1;
3405 }
0d62e5e8
DJ
3406
3407 /* Postpone any pending signal. It was enqueued above. */
3408 signal = 0;
3409 }
3410
fa593d66
PA
3411 if (fast_tp_collecting == 1)
3412 {
3413 if (debug_threads)
87ce2a04
DE
3414 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3415 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3416 lwpid_of (thread));
fa593d66
PA
3417
3418 /* Postpone any pending signal. It was enqueued above. */
3419 signal = 0;
3420 }
3421 else if (fast_tp_collecting == 2)
3422 {
3423 if (debug_threads)
87ce2a04
DE
3424 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3425 " single-stepping\n",
d86d4aaf 3426 lwpid_of (thread));
fa593d66
PA
3427
3428 if (can_hardware_single_step ())
3429 step = 1;
3430 else
3431 fatal ("moving out of jump pad single-stepping"
3432 " not implemented on this target");
3433
3434 /* Postpone any pending signal. It was enqueued above. */
3435 signal = 0;
3436 }
3437
219f2f23
PA
3438 /* If we have while-stepping actions in this thread set it stepping.
3439 If we have a signal to deliver, it may or may not be set to
3440 SIG_IGN, we don't know. Assume so, and allow collecting
3441 while-stepping into a signal handler. A possible smart thing to
3442 do would be to set an internal breakpoint at the signal return
3443 address, continue, and carry on catching this while-stepping
3444 action only when that breakpoint is hit. A future
3445 enhancement. */
d86d4aaf 3446 if (thread->while_stepping != NULL
219f2f23
PA
3447 && can_hardware_single_step ())
3448 {
3449 if (debug_threads)
87ce2a04 3450 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3451 lwpid_of (thread));
219f2f23
PA
3452 step = 1;
3453 }
3454
aa691b87 3455 if (debug_threads && the_low_target.get_pc != NULL)
0d62e5e8 3456 {
442ea881
PA
3457 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3458 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
87ce2a04 3459 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
0d62e5e8
DJ
3460 }
3461
fa593d66
PA
3462 /* If we have pending signals, consume one unless we are trying to
3463 reinsert a breakpoint or we're trying to finish a fast tracepoint
3464 collect. */
3465 if (lwp->pending_signals != NULL
3466 && lwp->bp_reinsert == 0
3467 && fast_tp_collecting == 0)
0d62e5e8
DJ
3468 {
3469 struct pending_signals **p_sig;
3470
54a0b537 3471 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3472 while ((*p_sig)->prev != NULL)
3473 p_sig = &(*p_sig)->prev;
3474
3475 signal = (*p_sig)->signal;
32ca6d61 3476 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3477 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3478 &(*p_sig)->info);
32ca6d61 3479
0d62e5e8
DJ
3480 free (*p_sig);
3481 *p_sig = NULL;
3482 }
3483
aa5ca48f
DE
3484 if (the_low_target.prepare_to_resume != NULL)
3485 the_low_target.prepare_to_resume (lwp);
3486
d86d4aaf 3487 regcache_invalidate_thread (thread);
da6d8c04 3488 errno = 0;
54a0b537 3489 lwp->stopped = 0;
c3adc08c 3490 lwp->stopped_by_watchpoint = 0;
54a0b537 3491 lwp->stepping = step;
d86d4aaf 3492 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3493 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3494 /* Coerce to a uintptr_t first to avoid potential gcc warning
3495 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3496 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8
DJ
3497
3498 current_inferior = saved_inferior;
da6d8c04 3499 if (errno)
3221518c
UW
3500 {
3501 /* ESRCH from ptrace either means that the thread was already
3502 running (an error) or that it is gone (a race condition). If
3503 it's gone, we will get a notification the next time we wait,
3504 so we can ignore the error. We could differentiate these
3505 two, but it's tricky without waiting; the thread still exists
3506 as a zombie, so sending it signal 0 would succeed. So just
3507 ignore ESRCH. */
3508 if (errno == ESRCH)
3509 return;
3510
3511 perror_with_name ("ptrace");
3512 }
da6d8c04
DJ
3513}
3514
2bd7c093
PA
3515struct thread_resume_array
3516{
3517 struct thread_resume *resume;
3518 size_t n;
3519};
64386c31 3520
ebcf782c
DE
3521/* This function is called once per thread via find_inferior.
3522 ARG is a pointer to a thread_resume_array struct.
3523 We look up the thread specified by ENTRY in ARG, and mark the thread
3524 with a pointer to the appropriate resume request.
5544ad89
DJ
3525
3526 This algorithm is O(threads * resume elements), but resume elements
3527 is small (and will remain small at least until GDB supports thread
3528 suspension). */
ebcf782c 3529
2bd7c093
PA
3530static int
3531linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3532{
d86d4aaf
DE
3533 struct thread_info *thread = (struct thread_info *) entry;
3534 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3535 int ndx;
2bd7c093 3536 struct thread_resume_array *r;
64386c31 3537
2bd7c093 3538 r = arg;
64386c31 3539
2bd7c093 3540 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3541 {
3542 ptid_t ptid = r->resume[ndx].thread;
3543 if (ptid_equal (ptid, minus_one_ptid)
3544 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3545 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3546 of PID'. */
d86d4aaf 3547 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3548 && (ptid_is_pid (ptid)
3549 || ptid_get_lwp (ptid) == -1)))
95954743 3550 {
d50171e4 3551 if (r->resume[ndx].kind == resume_stop
8336d594 3552 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3553 {
3554 if (debug_threads)
87ce2a04
DE
3555 debug_printf ("already %s LWP %ld at GDB's request\n",
3556 (thread->last_status.kind
3557 == TARGET_WAITKIND_STOPPED)
3558 ? "stopped"
3559 : "stopping",
d86d4aaf 3560 lwpid_of (thread));
d50171e4
PA
3561
3562 continue;
3563 }
3564
95954743 3565 lwp->resume = &r->resume[ndx];
8336d594 3566 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3567
c2d6af84
PA
3568 lwp->step_range_start = lwp->resume->step_range_start;
3569 lwp->step_range_end = lwp->resume->step_range_end;
3570
fa593d66
PA
3571 /* If we had a deferred signal to report, dequeue one now.
3572 This can happen if LWP gets more than one signal while
3573 trying to get out of a jump pad. */
3574 if (lwp->stopped
3575 && !lwp->status_pending_p
3576 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3577 {
3578 lwp->status_pending_p = 1;
3579
3580 if (debug_threads)
87ce2a04
DE
3581 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3582 "leaving status pending.\n",
d86d4aaf
DE
3583 WSTOPSIG (lwp->status_pending),
3584 lwpid_of (thread));
fa593d66
PA
3585 }
3586
95954743
PA
3587 return 0;
3588 }
3589 }
2bd7c093
PA
3590
3591 /* No resume action for this thread. */
3592 lwp->resume = NULL;
64386c31 3593
2bd7c093 3594 return 0;
5544ad89
DJ
3595}
3596
20ad9378
DE
3597/* find_inferior callback for linux_resume.
3598 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 3599
bd99dc85
PA
3600static int
3601resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3602{
d86d4aaf
DE
3603 struct thread_info *thread = (struct thread_info *) entry;
3604 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3605
bd99dc85
PA
3606 /* LWPs which will not be resumed are not interesting, because
3607 we might not wait for them next time through linux_wait. */
2bd7c093 3608 if (lwp->resume == NULL)
bd99dc85 3609 return 0;
64386c31 3610
bd99dc85 3611 if (lwp->status_pending_p)
d50171e4
PA
3612 * (int *) flag_p = 1;
3613
3614 return 0;
3615}
3616
3617/* Return 1 if this lwp that GDB wants running is stopped at an
3618 internal breakpoint that we need to step over. It assumes that any
3619 required STOP_PC adjustment has already been propagated to the
3620 inferior's regcache. */
3621
3622static int
3623need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3624{
d86d4aaf
DE
3625 struct thread_info *thread = (struct thread_info *) entry;
3626 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
3627 struct thread_info *saved_inferior;
3628 CORE_ADDR pc;
3629
3630 /* LWPs which will not be resumed are not interesting, because we
3631 might not wait for them next time through linux_wait. */
3632
3633 if (!lwp->stopped)
3634 {
3635 if (debug_threads)
87ce2a04 3636 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 3637 lwpid_of (thread));
d50171e4
PA
3638 return 0;
3639 }
3640
8336d594 3641 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
3642 {
3643 if (debug_threads)
87ce2a04
DE
3644 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3645 " stopped\n",
d86d4aaf 3646 lwpid_of (thread));
d50171e4
PA
3647 return 0;
3648 }
3649
7984d532
PA
3650 gdb_assert (lwp->suspended >= 0);
3651
3652 if (lwp->suspended)
3653 {
3654 if (debug_threads)
87ce2a04 3655 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 3656 lwpid_of (thread));
7984d532
PA
3657 return 0;
3658 }
3659
d50171e4
PA
3660 if (!lwp->need_step_over)
3661 {
3662 if (debug_threads)
d86d4aaf 3663 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 3664 }
5544ad89 3665
bd99dc85 3666 if (lwp->status_pending_p)
d50171e4
PA
3667 {
3668 if (debug_threads)
87ce2a04
DE
3669 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3670 " status.\n",
d86d4aaf 3671 lwpid_of (thread));
d50171e4
PA
3672 return 0;
3673 }
3674
3675 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3676 or we have. */
3677 pc = get_pc (lwp);
3678
3679 /* If the PC has changed since we stopped, then don't do anything,
3680 and let the breakpoint/tracepoint be hit. This happens if, for
3681 instance, GDB handled the decr_pc_after_break subtraction itself,
3682 GDB is OOL stepping this thread, or the user has issued a "jump"
3683 command, or poked thread's registers herself. */
3684 if (pc != lwp->stop_pc)
3685 {
3686 if (debug_threads)
87ce2a04
DE
3687 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3688 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
3689 lwpid_of (thread),
3690 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
3691
3692 lwp->need_step_over = 0;
3693 return 0;
3694 }
3695
3696 saved_inferior = current_inferior;
8336d594 3697 current_inferior = thread;
d50171e4 3698
8b07ae33 3699 /* We can only step over breakpoints we know about. */
fa593d66 3700 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 3701 {
8b07ae33 3702 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
3703 though. If the condition is being evaluated on the target's side
3704 and it evaluate to false, step over this breakpoint as well. */
3705 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
3706 && gdb_condition_true_at_breakpoint (pc)
3707 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
3708 {
3709 if (debug_threads)
87ce2a04
DE
3710 debug_printf ("Need step over [LWP %ld]? yes, but found"
3711 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 3712 lwpid_of (thread), paddress (pc));
d50171e4 3713
8b07ae33
PA
3714 current_inferior = saved_inferior;
3715 return 0;
3716 }
3717 else
3718 {
3719 if (debug_threads)
87ce2a04
DE
3720 debug_printf ("Need step over [LWP %ld]? yes, "
3721 "found breakpoint at 0x%s\n",
d86d4aaf 3722 lwpid_of (thread), paddress (pc));
d50171e4 3723
8b07ae33
PA
3724 /* We've found an lwp that needs stepping over --- return 1 so
3725 that find_inferior stops looking. */
3726 current_inferior = saved_inferior;
3727
3728 /* If the step over is cancelled, this is set again. */
3729 lwp->need_step_over = 0;
3730 return 1;
3731 }
d50171e4
PA
3732 }
3733
3734 current_inferior = saved_inferior;
3735
3736 if (debug_threads)
87ce2a04
DE
3737 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3738 " at 0x%s\n",
d86d4aaf 3739 lwpid_of (thread), paddress (pc));
c6ecbae5 3740
bd99dc85 3741 return 0;
5544ad89
DJ
3742}
3743
d50171e4
PA
3744/* Start a step-over operation on LWP. When LWP stopped at a
3745 breakpoint, to make progress, we need to remove the breakpoint out
3746 of the way. If we let other threads run while we do that, they may
3747 pass by the breakpoint location and miss hitting it. To avoid
3748 that, a step-over momentarily stops all threads while LWP is
3749 single-stepped while the breakpoint is temporarily uninserted from
3750 the inferior. When the single-step finishes, we reinsert the
3751 breakpoint, and let all threads that are supposed to be running,
3752 run again.
3753
3754 On targets that don't support hardware single-step, we don't
3755 currently support full software single-stepping. Instead, we only
3756 support stepping over the thread event breakpoint, by asking the
3757 low target where to place a reinsert breakpoint. Since this
3758 routine assumes the breakpoint being stepped over is a thread event
3759 breakpoint, it usually assumes the return address of the current
3760 function is a good enough place to set the reinsert breakpoint. */
3761
3762static int
3763start_step_over (struct lwp_info *lwp)
3764{
d86d4aaf 3765 struct thread_info *thread = get_lwp_thread (lwp);
d50171e4
PA
3766 struct thread_info *saved_inferior;
3767 CORE_ADDR pc;
3768 int step;
3769
3770 if (debug_threads)
87ce2a04 3771 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 3772 lwpid_of (thread));
d50171e4 3773
7984d532
PA
3774 stop_all_lwps (1, lwp);
3775 gdb_assert (lwp->suspended == 0);
d50171e4
PA
3776
3777 if (debug_threads)
87ce2a04 3778 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
3779
3780 /* Note, we should always reach here with an already adjusted PC,
3781 either by GDB (if we're resuming due to GDB's request), or by our
3782 caller, if we just finished handling an internal breakpoint GDB
3783 shouldn't care about. */
3784 pc = get_pc (lwp);
3785
3786 saved_inferior = current_inferior;
d86d4aaf 3787 current_inferior = thread;
d50171e4
PA
3788
3789 lwp->bp_reinsert = pc;
3790 uninsert_breakpoints_at (pc);
fa593d66 3791 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
3792
3793 if (can_hardware_single_step ())
3794 {
3795 step = 1;
3796 }
3797 else
3798 {
3799 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3800 set_reinsert_breakpoint (raddr);
3801 step = 0;
3802 }
3803
3804 current_inferior = saved_inferior;
3805
3806 linux_resume_one_lwp (lwp, step, 0, NULL);
3807
3808 /* Require next event from this LWP. */
d86d4aaf 3809 step_over_bkpt = thread->entry.id;
d50171e4
PA
3810 return 1;
3811}
3812
3813/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3814 start_step_over, if still there, and delete any reinsert
3815 breakpoints we've set, on non hardware single-step targets. */
3816
3817static int
3818finish_step_over (struct lwp_info *lwp)
3819{
3820 if (lwp->bp_reinsert != 0)
3821 {
3822 if (debug_threads)
87ce2a04 3823 debug_printf ("Finished step over.\n");
d50171e4
PA
3824
3825 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3826 may be no breakpoint to reinsert there by now. */
3827 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 3828 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
3829
3830 lwp->bp_reinsert = 0;
3831
3832 /* Delete any software-single-step reinsert breakpoints. No
3833 longer needed. We don't have to worry about other threads
3834 hitting this trap, and later not being able to explain it,
3835 because we were stepping over a breakpoint, and we hold all
3836 threads but LWP stopped while doing that. */
3837 if (!can_hardware_single_step ())
3838 delete_reinsert_breakpoints ();
3839
3840 step_over_bkpt = null_ptid;
3841 return 1;
3842 }
3843 else
3844 return 0;
3845}
3846
5544ad89
DJ
3847/* This function is called once per thread. We check the thread's resume
3848 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 3849 stopped; and what signal, if any, it should be sent.
5544ad89 3850
bd99dc85
PA
3851 For threads which we aren't explicitly told otherwise, we preserve
3852 the stepping flag; this is used for stepping over gdbserver-placed
3853 breakpoints.
3854
3855 If pending_flags was set in any thread, we queue any needed
3856 signals, since we won't actually resume. We already have a pending
3857 event to report, so we don't need to preserve any step requests;
3858 they should be re-issued if necessary. */
3859
3860static int
3861linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 3862{
d86d4aaf
DE
3863 struct thread_info *thread = (struct thread_info *) entry;
3864 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 3865 int step;
d50171e4
PA
3866 int leave_all_stopped = * (int *) arg;
3867 int leave_pending;
5544ad89 3868
2bd7c093 3869 if (lwp->resume == NULL)
bd99dc85 3870 return 0;
5544ad89 3871
bd99dc85 3872 if (lwp->resume->kind == resume_stop)
5544ad89 3873 {
bd99dc85 3874 if (debug_threads)
d86d4aaf 3875 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
3876
3877 if (!lwp->stopped)
3878 {
3879 if (debug_threads)
d86d4aaf 3880 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 3881
d50171e4
PA
3882 /* Stop the thread, and wait for the event asynchronously,
3883 through the event loop. */
02fc4de7 3884 send_sigstop (lwp);
bd99dc85
PA
3885 }
3886 else
3887 {
3888 if (debug_threads)
87ce2a04 3889 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 3890 lwpid_of (thread));
d50171e4
PA
3891
3892 /* The LWP may have been stopped in an internal event that
3893 was not meant to be notified back to GDB (e.g., gdbserver
3894 breakpoint), so we should be reporting a stop event in
3895 this case too. */
3896
3897 /* If the thread already has a pending SIGSTOP, this is a
3898 no-op. Otherwise, something later will presumably resume
3899 the thread and this will cause it to cancel any pending
3900 operation, due to last_resume_kind == resume_stop. If
3901 the thread already has a pending status to report, we
3902 will still report it the next time we wait - see
3903 status_pending_p_callback. */
1a981360
PA
3904
3905 /* If we already have a pending signal to report, then
3906 there's no need to queue a SIGSTOP, as this means we're
3907 midway through moving the LWP out of the jumppad, and we
3908 will report the pending signal as soon as that is
3909 finished. */
3910 if (lwp->pending_signals_to_report == NULL)
3911 send_sigstop (lwp);
bd99dc85 3912 }
32ca6d61 3913
bd99dc85
PA
3914 /* For stop requests, we're done. */
3915 lwp->resume = NULL;
fc7238bb 3916 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3917 return 0;
5544ad89
DJ
3918 }
3919
bd99dc85
PA
3920 /* If this thread which is about to be resumed has a pending status,
3921 then don't resume any threads - we can just report the pending
3922 status. Make sure to queue any signals that would otherwise be
3923 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
3924 thread has a pending status. If there's a thread that needs the
3925 step-over-breakpoint dance, then don't resume any other thread
3926 but that particular one. */
3927 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 3928
d50171e4 3929 if (!leave_pending)
bd99dc85
PA
3930 {
3931 if (debug_threads)
d86d4aaf 3932 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 3933
d50171e4 3934 step = (lwp->resume->kind == resume_step);
2acc282a 3935 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
3936 }
3937 else
3938 {
3939 if (debug_threads)
d86d4aaf 3940 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 3941
bd99dc85
PA
3942 /* If we have a new signal, enqueue the signal. */
3943 if (lwp->resume->sig != 0)
3944 {
3945 struct pending_signals *p_sig;
3946 p_sig = xmalloc (sizeof (*p_sig));
3947 p_sig->prev = lwp->pending_signals;
3948 p_sig->signal = lwp->resume->sig;
3949 memset (&p_sig->info, 0, sizeof (siginfo_t));
3950
3951 /* If this is the same signal we were previously stopped by,
3952 make sure to queue its siginfo. We can ignore the return
3953 value of ptrace; if it fails, we'll skip
3954 PTRACE_SETSIGINFO. */
3955 if (WIFSTOPPED (lwp->last_status)
3956 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 3957 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3958 &p_sig->info);
bd99dc85
PA
3959
3960 lwp->pending_signals = p_sig;
3961 }
3962 }
5544ad89 3963
fc7238bb 3964 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 3965 lwp->resume = NULL;
5544ad89 3966 return 0;
0d62e5e8
DJ
3967}
3968
3969static void
2bd7c093 3970linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 3971{
2bd7c093 3972 struct thread_resume_array array = { resume_info, n };
d86d4aaf 3973 struct thread_info *need_step_over = NULL;
d50171e4
PA
3974 int any_pending;
3975 int leave_all_stopped;
c6ecbae5 3976
87ce2a04
DE
3977 if (debug_threads)
3978 {
3979 debug_enter ();
3980 debug_printf ("linux_resume:\n");
3981 }
3982
2bd7c093 3983 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 3984
d50171e4
PA
3985 /* If there is a thread which would otherwise be resumed, which has
3986 a pending status, then don't resume any threads - we can just
3987 report the pending status. Make sure to queue any signals that
3988 would otherwise be sent. In non-stop mode, we'll apply this
3989 logic to each thread individually. We consume all pending events
3990 before considering to start a step-over (in all-stop). */
3991 any_pending = 0;
bd99dc85 3992 if (!non_stop)
d86d4aaf 3993 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
3994
3995 /* If there is a thread which would otherwise be resumed, which is
3996 stopped at a breakpoint that needs stepping over, then don't
3997 resume any threads - have it step over the breakpoint with all
3998 other threads stopped, then resume all threads again. Make sure
3999 to queue any signals that would otherwise be delivered or
4000 queued. */
4001 if (!any_pending && supports_breakpoints ())
4002 need_step_over
d86d4aaf
DE
4003 = (struct thread_info *) find_inferior (&all_threads,
4004 need_step_over_p, NULL);
d50171e4
PA
4005
4006 leave_all_stopped = (need_step_over != NULL || any_pending);
4007
4008 if (debug_threads)
4009 {
4010 if (need_step_over != NULL)
87ce2a04 4011 debug_printf ("Not resuming all, need step over\n");
d50171e4 4012 else if (any_pending)
87ce2a04
DE
4013 debug_printf ("Not resuming, all-stop and found "
4014 "an LWP with pending status\n");
d50171e4 4015 else
87ce2a04 4016 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4017 }
4018
4019 /* Even if we're leaving threads stopped, queue all signals we'd
4020 otherwise deliver. */
4021 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4022
4023 if (need_step_over)
d86d4aaf 4024 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4025
4026 if (debug_threads)
4027 {
4028 debug_printf ("linux_resume done\n");
4029 debug_exit ();
4030 }
d50171e4
PA
4031}
4032
4033/* This function is called once per thread. We check the thread's
4034 last resume request, which will tell us whether to resume, step, or
4035 leave the thread stopped. Any signal the client requested to be
4036 delivered has already been enqueued at this point.
4037
4038 If any thread that GDB wants running is stopped at an internal
4039 breakpoint that needs stepping over, we start a step-over operation
4040 on that particular thread, and leave all others stopped. */
4041
7984d532
PA
4042static int
4043proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4044{
d86d4aaf
DE
4045 struct thread_info *thread = (struct thread_info *) entry;
4046 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4047 int step;
4048
7984d532
PA
4049 if (lwp == except)
4050 return 0;
d50171e4
PA
4051
4052 if (debug_threads)
d86d4aaf 4053 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4054
4055 if (!lwp->stopped)
4056 {
4057 if (debug_threads)
d86d4aaf 4058 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4059 return 0;
d50171e4
PA
4060 }
4061
02fc4de7
PA
4062 if (thread->last_resume_kind == resume_stop
4063 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4064 {
4065 if (debug_threads)
87ce2a04 4066 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4067 lwpid_of (thread));
7984d532 4068 return 0;
d50171e4
PA
4069 }
4070
4071 if (lwp->status_pending_p)
4072 {
4073 if (debug_threads)
87ce2a04 4074 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4075 lwpid_of (thread));
7984d532 4076 return 0;
d50171e4
PA
4077 }
4078
7984d532
PA
4079 gdb_assert (lwp->suspended >= 0);
4080
d50171e4
PA
4081 if (lwp->suspended)
4082 {
4083 if (debug_threads)
d86d4aaf 4084 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4085 return 0;
d50171e4
PA
4086 }
4087
1a981360
PA
4088 if (thread->last_resume_kind == resume_stop
4089 && lwp->pending_signals_to_report == NULL
4090 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4091 {
4092 /* We haven't reported this LWP as stopped yet (otherwise, the
4093 last_status.kind check above would catch it, and we wouldn't
4094 reach here. This LWP may have been momentarily paused by a
4095 stop_all_lwps call while handling for example, another LWP's
4096 step-over. In that case, the pending expected SIGSTOP signal
4097 that was queued at vCont;t handling time will have already
4098 been consumed by wait_for_sigstop, and so we need to requeue
4099 another one here. Note that if the LWP already has a SIGSTOP
4100 pending, this is a no-op. */
4101
4102 if (debug_threads)
87ce2a04
DE
4103 debug_printf ("Client wants LWP %ld to stop. "
4104 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4105 lwpid_of (thread));
02fc4de7
PA
4106
4107 send_sigstop (lwp);
4108 }
4109
8336d594 4110 step = thread->last_resume_kind == resume_step;
d50171e4 4111 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4112 return 0;
4113}
4114
4115static int
4116unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4117{
d86d4aaf
DE
4118 struct thread_info *thread = (struct thread_info *) entry;
4119 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4120
4121 if (lwp == except)
4122 return 0;
4123
4124 lwp->suspended--;
4125 gdb_assert (lwp->suspended >= 0);
4126
4127 return proceed_one_lwp (entry, except);
d50171e4
PA
4128}
4129
4130/* When we finish a step-over, set threads running again. If there's
4131 another thread that may need a step-over, now's the time to start
4132 it. Eventually, we'll move all threads past their breakpoints. */
4133
4134static void
4135proceed_all_lwps (void)
4136{
d86d4aaf 4137 struct thread_info *need_step_over;
d50171e4
PA
4138
4139 /* If there is a thread which would otherwise be resumed, which is
4140 stopped at a breakpoint that needs stepping over, then don't
4141 resume any threads - have it step over the breakpoint with all
4142 other threads stopped, then resume all threads again. */
4143
4144 if (supports_breakpoints ())
4145 {
4146 need_step_over
d86d4aaf
DE
4147 = (struct thread_info *) find_inferior (&all_threads,
4148 need_step_over_p, NULL);
d50171e4
PA
4149
4150 if (need_step_over != NULL)
4151 {
4152 if (debug_threads)
87ce2a04
DE
4153 debug_printf ("proceed_all_lwps: found "
4154 "thread %ld needing a step-over\n",
4155 lwpid_of (need_step_over));
d50171e4 4156
d86d4aaf 4157 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4158 return;
4159 }
4160 }
5544ad89 4161
d50171e4 4162 if (debug_threads)
87ce2a04 4163 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4164
d86d4aaf 4165 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4166}
4167
4168/* Stopped LWPs that the client wanted to be running, that don't have
4169 pending statuses, are set to run again, except for EXCEPT, if not
4170 NULL. This undoes a stop_all_lwps call. */
4171
4172static void
7984d532 4173unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4174{
5544ad89
DJ
4175 if (debug_threads)
4176 {
87ce2a04 4177 debug_enter ();
d50171e4 4178 if (except)
87ce2a04 4179 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4180 lwpid_of (get_lwp_thread (except)));
5544ad89 4181 else
87ce2a04 4182 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4183 }
4184
7984d532 4185 if (unsuspend)
d86d4aaf 4186 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4187 else
d86d4aaf 4188 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4189
4190 if (debug_threads)
4191 {
4192 debug_printf ("unstop_all_lwps done\n");
4193 debug_exit ();
4194 }
0d62e5e8
DJ
4195}
4196
58caa3dc
DJ
4197
4198#ifdef HAVE_LINUX_REGSETS
4199
1faeff08
MR
4200#define use_linux_regsets 1
4201
030031ee
PA
4202/* Returns true if REGSET has been disabled. */
4203
4204static int
4205regset_disabled (struct regsets_info *info, struct regset_info *regset)
4206{
4207 return (info->disabled_regsets != NULL
4208 && info->disabled_regsets[regset - info->regsets]);
4209}
4210
4211/* Disable REGSET. */
4212
4213static void
4214disable_regset (struct regsets_info *info, struct regset_info *regset)
4215{
4216 int dr_offset;
4217
4218 dr_offset = regset - info->regsets;
4219 if (info->disabled_regsets == NULL)
4220 info->disabled_regsets = xcalloc (1, info->num_regsets);
4221 info->disabled_regsets[dr_offset] = 1;
4222}
4223
58caa3dc 4224static int
3aee8918
PA
4225regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4226 struct regcache *regcache)
58caa3dc
DJ
4227{
4228 struct regset_info *regset;
e9d25b98 4229 int saw_general_regs = 0;
95954743 4230 int pid;
1570b33e 4231 struct iovec iov;
58caa3dc 4232
3aee8918 4233 regset = regsets_info->regsets;
58caa3dc 4234
d86d4aaf 4235 pid = lwpid_of (current_inferior);
58caa3dc
DJ
4236 while (regset->size >= 0)
4237 {
1570b33e
L
4238 void *buf, *data;
4239 int nt_type, res;
58caa3dc 4240
030031ee 4241 if (regset->size == 0 || regset_disabled (regsets_info, regset))
58caa3dc
DJ
4242 {
4243 regset ++;
4244 continue;
4245 }
4246
bca929d3 4247 buf = xmalloc (regset->size);
1570b33e
L
4248
4249 nt_type = regset->nt_type;
4250 if (nt_type)
4251 {
4252 iov.iov_base = buf;
4253 iov.iov_len = regset->size;
4254 data = (void *) &iov;
4255 }
4256 else
4257 data = buf;
4258
dfb64f85 4259#ifndef __sparc__
f15f9948 4260 res = ptrace (regset->get_request, pid,
b8e1b30e 4261 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4262#else
1570b33e 4263 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4264#endif
58caa3dc
DJ
4265 if (res < 0)
4266 {
4267 if (errno == EIO)
4268 {
52fa2412 4269 /* If we get EIO on a regset, do not try it again for
3aee8918 4270 this process mode. */
030031ee 4271 disable_regset (regsets_info, regset);
fdeb2a12 4272 free (buf);
52fa2412 4273 continue;
58caa3dc
DJ
4274 }
4275 else
4276 {
0d62e5e8 4277 char s[256];
95954743
PA
4278 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4279 pid);
0d62e5e8 4280 perror (s);
58caa3dc
DJ
4281 }
4282 }
e9d25b98
DJ
4283 else if (regset->type == GENERAL_REGS)
4284 saw_general_regs = 1;
442ea881 4285 regset->store_function (regcache, buf);
58caa3dc 4286 regset ++;
fdeb2a12 4287 free (buf);
58caa3dc 4288 }
e9d25b98
DJ
4289 if (saw_general_regs)
4290 return 0;
4291 else
4292 return 1;
58caa3dc
DJ
4293}
4294
4295static int
3aee8918
PA
4296regsets_store_inferior_registers (struct regsets_info *regsets_info,
4297 struct regcache *regcache)
58caa3dc
DJ
4298{
4299 struct regset_info *regset;
e9d25b98 4300 int saw_general_regs = 0;
95954743 4301 int pid;
1570b33e 4302 struct iovec iov;
58caa3dc 4303
3aee8918 4304 regset = regsets_info->regsets;
58caa3dc 4305
d86d4aaf 4306 pid = lwpid_of (current_inferior);
58caa3dc
DJ
4307 while (regset->size >= 0)
4308 {
1570b33e
L
4309 void *buf, *data;
4310 int nt_type, res;
58caa3dc 4311
030031ee 4312 if (regset->size == 0 || regset_disabled (regsets_info, regset))
58caa3dc
DJ
4313 {
4314 regset ++;
4315 continue;
4316 }
4317
bca929d3 4318 buf = xmalloc (regset->size);
545587ee
DJ
4319
4320 /* First fill the buffer with the current register set contents,
4321 in case there are any items in the kernel's regset that are
4322 not in gdbserver's regcache. */
1570b33e
L
4323
4324 nt_type = regset->nt_type;
4325 if (nt_type)
4326 {
4327 iov.iov_base = buf;
4328 iov.iov_len = regset->size;
4329 data = (void *) &iov;
4330 }
4331 else
4332 data = buf;
4333
dfb64f85 4334#ifndef __sparc__
f15f9948 4335 res = ptrace (regset->get_request, pid,
b8e1b30e 4336 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4337#else
689cc2ae 4338 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4339#endif
545587ee
DJ
4340
4341 if (res == 0)
4342 {
4343 /* Then overlay our cached registers on that. */
442ea881 4344 regset->fill_function (regcache, buf);
545587ee
DJ
4345
4346 /* Only now do we write the register set. */
dfb64f85 4347#ifndef __sparc__
f15f9948 4348 res = ptrace (regset->set_request, pid,
b8e1b30e 4349 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4350#else
1570b33e 4351 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4352#endif
545587ee
DJ
4353 }
4354
58caa3dc
DJ
4355 if (res < 0)
4356 {
4357 if (errno == EIO)
4358 {
52fa2412 4359 /* If we get EIO on a regset, do not try it again for
3aee8918 4360 this process mode. */
030031ee 4361 disable_regset (regsets_info, regset);
fdeb2a12 4362 free (buf);
52fa2412 4363 continue;
58caa3dc 4364 }
3221518c
UW
4365 else if (errno == ESRCH)
4366 {
1b3f6016
PA
4367 /* At this point, ESRCH should mean the process is
4368 already gone, in which case we simply ignore attempts
4369 to change its registers. See also the related
4370 comment in linux_resume_one_lwp. */
fdeb2a12 4371 free (buf);
3221518c
UW
4372 return 0;
4373 }
58caa3dc
DJ
4374 else
4375 {
ce3a066d 4376 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4377 }
4378 }
e9d25b98
DJ
4379 else if (regset->type == GENERAL_REGS)
4380 saw_general_regs = 1;
58caa3dc 4381 regset ++;
09ec9b38 4382 free (buf);
58caa3dc 4383 }
e9d25b98
DJ
4384 if (saw_general_regs)
4385 return 0;
4386 else
4387 return 1;
58caa3dc
DJ
4388}
4389
1faeff08 4390#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4391
1faeff08 4392#define use_linux_regsets 0
3aee8918
PA
4393#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4394#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4395
58caa3dc 4396#endif
1faeff08
MR
4397
4398/* Return 1 if register REGNO is supported by one of the regset ptrace
4399 calls or 0 if it has to be transferred individually. */
4400
4401static int
3aee8918 4402linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4403{
4404 unsigned char mask = 1 << (regno % 8);
4405 size_t index = regno / 8;
4406
4407 return (use_linux_regsets
3aee8918
PA
4408 && (regs_info->regset_bitmap == NULL
4409 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4410}
4411
58caa3dc 4412#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4413
4414int
3aee8918 4415register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4416{
4417 int addr;
4418
3aee8918 4419 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4420 error ("Invalid register number %d.", regnum);
4421
3aee8918 4422 addr = usrregs->regmap[regnum];
1faeff08
MR
4423
4424 return addr;
4425}
4426
4427/* Fetch one register. */
4428static void
3aee8918
PA
4429fetch_register (const struct usrregs_info *usrregs,
4430 struct regcache *regcache, int regno)
1faeff08
MR
4431{
4432 CORE_ADDR regaddr;
4433 int i, size;
4434 char *buf;
4435 int pid;
4436
3aee8918 4437 if (regno >= usrregs->num_regs)
1faeff08
MR
4438 return;
4439 if ((*the_low_target.cannot_fetch_register) (regno))
4440 return;
4441
3aee8918 4442 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4443 if (regaddr == -1)
4444 return;
4445
3aee8918
PA
4446 size = ((register_size (regcache->tdesc, regno)
4447 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4448 & -sizeof (PTRACE_XFER_TYPE));
4449 buf = alloca (size);
4450
d86d4aaf 4451 pid = lwpid_of (current_inferior);
1faeff08
MR
4452 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4453 {
4454 errno = 0;
4455 *(PTRACE_XFER_TYPE *) (buf + i) =
4456 ptrace (PTRACE_PEEKUSER, pid,
4457 /* Coerce to a uintptr_t first to avoid potential gcc warning
4458 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4459 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4460 regaddr += sizeof (PTRACE_XFER_TYPE);
4461 if (errno != 0)
4462 error ("reading register %d: %s", regno, strerror (errno));
4463 }
4464
4465 if (the_low_target.supply_ptrace_register)
4466 the_low_target.supply_ptrace_register (regcache, regno, buf);
4467 else
4468 supply_register (regcache, regno, buf);
4469}
4470
4471/* Store one register. */
4472static void
3aee8918
PA
4473store_register (const struct usrregs_info *usrregs,
4474 struct regcache *regcache, int regno)
1faeff08
MR
4475{
4476 CORE_ADDR regaddr;
4477 int i, size;
4478 char *buf;
4479 int pid;
4480
3aee8918 4481 if (regno >= usrregs->num_regs)
1faeff08
MR
4482 return;
4483 if ((*the_low_target.cannot_store_register) (regno))
4484 return;
4485
3aee8918 4486 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4487 if (regaddr == -1)
4488 return;
4489
3aee8918
PA
4490 size = ((register_size (regcache->tdesc, regno)
4491 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4492 & -sizeof (PTRACE_XFER_TYPE));
4493 buf = alloca (size);
4494 memset (buf, 0, size);
4495
4496 if (the_low_target.collect_ptrace_register)
4497 the_low_target.collect_ptrace_register (regcache, regno, buf);
4498 else
4499 collect_register (regcache, regno, buf);
4500
d86d4aaf 4501 pid = lwpid_of (current_inferior);
1faeff08
MR
4502 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4503 {
4504 errno = 0;
4505 ptrace (PTRACE_POKEUSER, pid,
4506 /* Coerce to a uintptr_t first to avoid potential gcc warning
4507 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4508 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4509 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4510 if (errno != 0)
4511 {
4512 /* At this point, ESRCH should mean the process is
4513 already gone, in which case we simply ignore attempts
4514 to change its registers. See also the related
4515 comment in linux_resume_one_lwp. */
4516 if (errno == ESRCH)
4517 return;
4518
4519 if ((*the_low_target.cannot_store_register) (regno) == 0)
4520 error ("writing register %d: %s", regno, strerror (errno));
4521 }
4522 regaddr += sizeof (PTRACE_XFER_TYPE);
4523 }
4524}
4525
4526/* Fetch all registers, or just one, from the child process.
4527 If REGNO is -1, do this for all registers, skipping any that are
4528 assumed to have been retrieved by regsets_fetch_inferior_registers,
4529 unless ALL is non-zero.
4530 Otherwise, REGNO specifies which register (so we can save time). */
4531static void
3aee8918
PA
4532usr_fetch_inferior_registers (const struct regs_info *regs_info,
4533 struct regcache *regcache, int regno, int all)
1faeff08 4534{
3aee8918
PA
4535 struct usrregs_info *usr = regs_info->usrregs;
4536
1faeff08
MR
4537 if (regno == -1)
4538 {
3aee8918
PA
4539 for (regno = 0; regno < usr->num_regs; regno++)
4540 if (all || !linux_register_in_regsets (regs_info, regno))
4541 fetch_register (usr, regcache, regno);
1faeff08
MR
4542 }
4543 else
3aee8918 4544 fetch_register (usr, regcache, regno);
1faeff08
MR
4545}
4546
4547/* Store our register values back into the inferior.
4548 If REGNO is -1, do this for all registers, skipping any that are
4549 assumed to have been saved by regsets_store_inferior_registers,
4550 unless ALL is non-zero.
4551 Otherwise, REGNO specifies which register (so we can save time). */
4552static void
3aee8918
PA
4553usr_store_inferior_registers (const struct regs_info *regs_info,
4554 struct regcache *regcache, int regno, int all)
1faeff08 4555{
3aee8918
PA
4556 struct usrregs_info *usr = regs_info->usrregs;
4557
1faeff08
MR
4558 if (regno == -1)
4559 {
3aee8918
PA
4560 for (regno = 0; regno < usr->num_regs; regno++)
4561 if (all || !linux_register_in_regsets (regs_info, regno))
4562 store_register (usr, regcache, regno);
1faeff08
MR
4563 }
4564 else
3aee8918 4565 store_register (usr, regcache, regno);
1faeff08
MR
4566}
4567
4568#else /* !HAVE_LINUX_USRREGS */
4569
3aee8918
PA
4570#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4571#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4572
58caa3dc 4573#endif
1faeff08
MR
4574
4575
4576void
4577linux_fetch_registers (struct regcache *regcache, int regno)
4578{
4579 int use_regsets;
4580 int all = 0;
3aee8918 4581 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4582
4583 if (regno == -1)
4584 {
3aee8918
PA
4585 if (the_low_target.fetch_register != NULL
4586 && regs_info->usrregs != NULL)
4587 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4588 (*the_low_target.fetch_register) (regcache, regno);
4589
3aee8918
PA
4590 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4591 if (regs_info->usrregs != NULL)
4592 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4593 }
4594 else
4595 {
c14dfd32
PA
4596 if (the_low_target.fetch_register != NULL
4597 && (*the_low_target.fetch_register) (regcache, regno))
4598 return;
4599
3aee8918 4600 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4601 if (use_regsets)
3aee8918
PA
4602 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4603 regcache);
4604 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4605 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4606 }
58caa3dc
DJ
4607}
4608
4609void
442ea881 4610linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4611{
1faeff08
MR
4612 int use_regsets;
4613 int all = 0;
3aee8918 4614 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4615
4616 if (regno == -1)
4617 {
3aee8918
PA
4618 all = regsets_store_inferior_registers (regs_info->regsets_info,
4619 regcache);
4620 if (regs_info->usrregs != NULL)
4621 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4622 }
4623 else
4624 {
3aee8918 4625 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4626 if (use_regsets)
3aee8918
PA
4627 all = regsets_store_inferior_registers (regs_info->regsets_info,
4628 regcache);
4629 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4630 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4631 }
58caa3dc
DJ
4632}
4633
da6d8c04 4634
da6d8c04
DJ
4635/* Copy LEN bytes from inferior's memory starting at MEMADDR
4636 to debugger memory starting at MYADDR. */
4637
c3e735a6 4638static int
f450004a 4639linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 4640{
d86d4aaf 4641 int pid = lwpid_of (current_inferior);
4934b29e
MR
4642 register PTRACE_XFER_TYPE *buffer;
4643 register CORE_ADDR addr;
4644 register int count;
4645 char filename[64];
da6d8c04 4646 register int i;
4934b29e 4647 int ret;
fd462a61 4648 int fd;
fd462a61
DJ
4649
4650 /* Try using /proc. Don't bother for one word. */
4651 if (len >= 3 * sizeof (long))
4652 {
4934b29e
MR
4653 int bytes;
4654
fd462a61
DJ
4655 /* We could keep this file open and cache it - possibly one per
4656 thread. That requires some juggling, but is even faster. */
95954743 4657 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
4658 fd = open (filename, O_RDONLY | O_LARGEFILE);
4659 if (fd == -1)
4660 goto no_proc;
4661
4662 /* If pread64 is available, use it. It's faster if the kernel
4663 supports it (only one syscall), and it's 64-bit safe even on
4664 32-bit platforms (for instance, SPARC debugging a SPARC64
4665 application). */
4666#ifdef HAVE_PREAD64
4934b29e 4667 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 4668#else
4934b29e
MR
4669 bytes = -1;
4670 if (lseek (fd, memaddr, SEEK_SET) != -1)
4671 bytes = read (fd, myaddr, len);
fd462a61 4672#endif
fd462a61
DJ
4673
4674 close (fd);
4934b29e
MR
4675 if (bytes == len)
4676 return 0;
4677
4678 /* Some data was read, we'll try to get the rest with ptrace. */
4679 if (bytes > 0)
4680 {
4681 memaddr += bytes;
4682 myaddr += bytes;
4683 len -= bytes;
4684 }
fd462a61 4685 }
da6d8c04 4686
fd462a61 4687 no_proc:
4934b29e
MR
4688 /* Round starting address down to longword boundary. */
4689 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4690 /* Round ending address up; get number of longwords that makes. */
4691 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4692 / sizeof (PTRACE_XFER_TYPE));
4693 /* Allocate buffer of that many longwords. */
4694 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4695
da6d8c04 4696 /* Read all the longwords */
4934b29e 4697 errno = 0;
da6d8c04
DJ
4698 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4699 {
14ce3065
DE
4700 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4701 about coercing an 8 byte integer to a 4 byte pointer. */
4702 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4703 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4704 (PTRACE_TYPE_ARG4) 0);
c3e735a6 4705 if (errno)
4934b29e 4706 break;
da6d8c04 4707 }
4934b29e 4708 ret = errno;
da6d8c04
DJ
4709
4710 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
4711 if (i > 0)
4712 {
4713 i *= sizeof (PTRACE_XFER_TYPE);
4714 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4715 memcpy (myaddr,
4716 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4717 i < len ? i : len);
4718 }
c3e735a6 4719
4934b29e 4720 return ret;
da6d8c04
DJ
4721}
4722
93ae6fdc
PA
4723/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4724 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 4725 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 4726
ce3a066d 4727static int
f450004a 4728linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
4729{
4730 register int i;
4731 /* Round starting address down to longword boundary. */
4732 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4733 /* Round ending address up; get number of longwords that makes. */
4734 register int count
493e2a69
MS
4735 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4736 / sizeof (PTRACE_XFER_TYPE);
4737
da6d8c04 4738 /* Allocate buffer of that many longwords. */
493e2a69
MS
4739 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4740 alloca (count * sizeof (PTRACE_XFER_TYPE));
4741
d86d4aaf 4742 int pid = lwpid_of (current_inferior);
da6d8c04 4743
f0ae6fc3
PA
4744 if (len == 0)
4745 {
4746 /* Zero length write always succeeds. */
4747 return 0;
4748 }
4749
0d62e5e8
DJ
4750 if (debug_threads)
4751 {
58d6951d
DJ
4752 /* Dump up to four bytes. */
4753 unsigned int val = * (unsigned int *) myaddr;
4754 if (len == 1)
4755 val = val & 0xff;
4756 else if (len == 2)
4757 val = val & 0xffff;
4758 else if (len == 3)
4759 val = val & 0xffffff;
87ce2a04
DE
4760 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4761 val, (long)memaddr);
0d62e5e8
DJ
4762 }
4763
da6d8c04
DJ
4764 /* Fill start and end extra bytes of buffer with existing memory data. */
4765
93ae6fdc 4766 errno = 0;
14ce3065
DE
4767 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4768 about coercing an 8 byte integer to a 4 byte pointer. */
4769 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
4770 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4771 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4772 if (errno)
4773 return errno;
da6d8c04
DJ
4774
4775 if (count > 1)
4776 {
93ae6fdc 4777 errno = 0;
da6d8c04 4778 buffer[count - 1]
95954743 4779 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
4780 /* Coerce to a uintptr_t first to avoid potential gcc warning
4781 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4782 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 4783 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 4784 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
4785 if (errno)
4786 return errno;
da6d8c04
DJ
4787 }
4788
93ae6fdc 4789 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 4790
493e2a69
MS
4791 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4792 myaddr, len);
da6d8c04
DJ
4793
4794 /* Write the entire buffer. */
4795
4796 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4797 {
4798 errno = 0;
14ce3065
DE
4799 ptrace (PTRACE_POKETEXT, pid,
4800 /* Coerce to a uintptr_t first to avoid potential gcc warning
4801 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4802 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4803 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
4804 if (errno)
4805 return errno;
4806 }
4807
4808 return 0;
4809}
2f2893d9
DJ
4810
4811static void
4812linux_look_up_symbols (void)
4813{
0d62e5e8 4814#ifdef USE_THREAD_DB
95954743
PA
4815 struct process_info *proc = current_process ();
4816
cdbfd419 4817 if (proc->private->thread_db != NULL)
0d62e5e8
DJ
4818 return;
4819
96d7229d
LM
4820 /* If the kernel supports tracing clones, then we don't need to
4821 use the magic thread event breakpoint to learn about
4822 threads. */
4823 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
4824#endif
4825}
4826
e5379b03 4827static void
ef57601b 4828linux_request_interrupt (void)
e5379b03 4829{
a1928bad 4830 extern unsigned long signal_pid;
e5379b03 4831
95954743
PA
4832 if (!ptid_equal (cont_thread, null_ptid)
4833 && !ptid_equal (cont_thread, minus_one_ptid))
e5379b03 4834 {
bd99dc85 4835 int lwpid;
e5379b03 4836
d86d4aaf 4837 lwpid = lwpid_of (current_inferior);
bd99dc85 4838 kill_lwp (lwpid, SIGINT);
e5379b03
DJ
4839 }
4840 else
ef57601b 4841 kill_lwp (signal_pid, SIGINT);
e5379b03
DJ
4842}
4843
aa691b87
RM
4844/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4845 to debugger memory starting at MYADDR. */
4846
4847static int
f450004a 4848linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
4849{
4850 char filename[PATH_MAX];
4851 int fd, n;
d86d4aaf 4852 int pid = lwpid_of (current_inferior);
aa691b87 4853
6cebaf6e 4854 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
4855
4856 fd = open (filename, O_RDONLY);
4857 if (fd < 0)
4858 return -1;
4859
4860 if (offset != (CORE_ADDR) 0
4861 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4862 n = -1;
4863 else
4864 n = read (fd, myaddr, len);
4865
4866 close (fd);
4867
4868 return n;
4869}
4870
d993e290
PA
4871/* These breakpoint and watchpoint related wrapper functions simply
4872 pass on the function call if the target has registered a
4873 corresponding function. */
e013ee27
OF
4874
4875static int
802e8e6d
PA
4876linux_supports_z_point_type (char z_type)
4877{
4878 return (the_low_target.supports_z_point_type != NULL
4879 && the_low_target.supports_z_point_type (z_type));
4880}
4881
4882static int
4883linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4884 int size, struct raw_breakpoint *bp)
e013ee27 4885{
d993e290 4886 if (the_low_target.insert_point != NULL)
802e8e6d 4887 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
4888 else
4889 /* Unsupported (see target.h). */
4890 return 1;
4891}
4892
4893static int
802e8e6d
PA
4894linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4895 int size, struct raw_breakpoint *bp)
e013ee27 4896{
d993e290 4897 if (the_low_target.remove_point != NULL)
802e8e6d 4898 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
4899 else
4900 /* Unsupported (see target.h). */
4901 return 1;
4902}
4903
4904static int
4905linux_stopped_by_watchpoint (void)
4906{
c3adc08c
PA
4907 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4908
4909 return lwp->stopped_by_watchpoint;
e013ee27
OF
4910}
4911
4912static CORE_ADDR
4913linux_stopped_data_address (void)
4914{
c3adc08c
PA
4915 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4916
4917 return lwp->stopped_data_address;
e013ee27
OF
4918}
4919
db0dfaa0
LM
4920#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4921 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4922 && defined(PT_TEXT_END_ADDR)
4923
4924/* This is only used for targets that define PT_TEXT_ADDR,
4925 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4926 the target has different ways of acquiring this information, like
4927 loadmaps. */
52fb6437
NS
4928
4929/* Under uClinux, programs are loaded at non-zero offsets, which we need
4930 to tell gdb about. */
4931
4932static int
4933linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4934{
52fb6437 4935 unsigned long text, text_end, data;
bd99dc85 4936 int pid = lwpid_of (get_thread_lwp (current_inferior));
52fb6437
NS
4937
4938 errno = 0;
4939
b8e1b30e
LM
4940 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4941 (PTRACE_TYPE_ARG4) 0);
4942 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4943 (PTRACE_TYPE_ARG4) 0);
4944 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4945 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
4946
4947 if (errno == 0)
4948 {
4949 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
4950 used by gdb) are relative to the beginning of the program,
4951 with the data segment immediately following the text segment.
4952 However, the actual runtime layout in memory may put the data
4953 somewhere else, so when we send gdb a data base-address, we
4954 use the real data base address and subtract the compile-time
4955 data base-address from it (which is just the length of the
4956 text segment). BSS immediately follows data in both
4957 cases. */
52fb6437
NS
4958 *text_p = text;
4959 *data_p = data - (text_end - text);
1b3f6016 4960
52fb6437
NS
4961 return 1;
4962 }
52fb6437
NS
4963 return 0;
4964}
4965#endif
4966
07e059b5
VP
4967static int
4968linux_qxfer_osdata (const char *annex,
1b3f6016
PA
4969 unsigned char *readbuf, unsigned const char *writebuf,
4970 CORE_ADDR offset, int len)
07e059b5 4971{
d26e3629 4972 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4973}
4974
d0722149
DE
4975/* Convert a native/host siginfo object, into/from the siginfo in the
4976 layout of the inferiors' architecture. */
4977
4978static void
a5362b9a 4979siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
4980{
4981 int done = 0;
4982
4983 if (the_low_target.siginfo_fixup != NULL)
4984 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4985
4986 /* If there was no callback, or the callback didn't do anything,
4987 then just do a straight memcpy. */
4988 if (!done)
4989 {
4990 if (direction == 1)
a5362b9a 4991 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 4992 else
a5362b9a 4993 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
4994 }
4995}
4996
4aa995e1
PA
4997static int
4998linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4999 unsigned const char *writebuf, CORE_ADDR offset, int len)
5000{
d0722149 5001 int pid;
a5362b9a
TS
5002 siginfo_t siginfo;
5003 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
5004
5005 if (current_inferior == NULL)
5006 return -1;
5007
d86d4aaf 5008 pid = lwpid_of (current_inferior);
4aa995e1
PA
5009
5010 if (debug_threads)
87ce2a04
DE
5011 debug_printf ("%s siginfo for lwp %d.\n",
5012 readbuf != NULL ? "Reading" : "Writing",
5013 pid);
4aa995e1 5014
0adea5f7 5015 if (offset >= sizeof (siginfo))
4aa995e1
PA
5016 return -1;
5017
b8e1b30e 5018 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5019 return -1;
5020
d0722149
DE
5021 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5022 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5023 inferior with a 64-bit GDBSERVER should look the same as debugging it
5024 with a 32-bit GDBSERVER, we need to convert it. */
5025 siginfo_fixup (&siginfo, inf_siginfo, 0);
5026
4aa995e1
PA
5027 if (offset + len > sizeof (siginfo))
5028 len = sizeof (siginfo) - offset;
5029
5030 if (readbuf != NULL)
d0722149 5031 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5032 else
5033 {
d0722149
DE
5034 memcpy (inf_siginfo + offset, writebuf, len);
5035
5036 /* Convert back to ptrace layout before flushing it out. */
5037 siginfo_fixup (&siginfo, inf_siginfo, 1);
5038
b8e1b30e 5039 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5040 return -1;
5041 }
5042
5043 return len;
5044}
5045
bd99dc85
PA
5046/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5047 so we notice when children change state; as the handler for the
5048 sigsuspend in my_waitpid. */
5049
5050static void
5051sigchld_handler (int signo)
5052{
5053 int old_errno = errno;
5054
5055 if (debug_threads)
e581f2b4
PA
5056 {
5057 do
5058 {
5059 /* fprintf is not async-signal-safe, so call write
5060 directly. */
5061 if (write (2, "sigchld_handler\n",
5062 sizeof ("sigchld_handler\n") - 1) < 0)
5063 break; /* just ignore */
5064 } while (0);
5065 }
bd99dc85
PA
5066
5067 if (target_is_async_p ())
5068 async_file_mark (); /* trigger a linux_wait */
5069
5070 errno = old_errno;
5071}
5072
5073static int
5074linux_supports_non_stop (void)
5075{
5076 return 1;
5077}
5078
5079static int
5080linux_async (int enable)
5081{
5082 int previous = (linux_event_pipe[0] != -1);
5083
8336d594 5084 if (debug_threads)
87ce2a04
DE
5085 debug_printf ("linux_async (%d), previous=%d\n",
5086 enable, previous);
8336d594 5087
bd99dc85
PA
5088 if (previous != enable)
5089 {
5090 sigset_t mask;
5091 sigemptyset (&mask);
5092 sigaddset (&mask, SIGCHLD);
5093
5094 sigprocmask (SIG_BLOCK, &mask, NULL);
5095
5096 if (enable)
5097 {
5098 if (pipe (linux_event_pipe) == -1)
5099 fatal ("creating event pipe failed.");
5100
5101 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5102 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5103
5104 /* Register the event loop handler. */
5105 add_file_handler (linux_event_pipe[0],
5106 handle_target_event, NULL);
5107
5108 /* Always trigger a linux_wait. */
5109 async_file_mark ();
5110 }
5111 else
5112 {
5113 delete_file_handler (linux_event_pipe[0]);
5114
5115 close (linux_event_pipe[0]);
5116 close (linux_event_pipe[1]);
5117 linux_event_pipe[0] = -1;
5118 linux_event_pipe[1] = -1;
5119 }
5120
5121 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5122 }
5123
5124 return previous;
5125}
5126
5127static int
5128linux_start_non_stop (int nonstop)
5129{
5130 /* Register or unregister from event-loop accordingly. */
5131 linux_async (nonstop);
5132 return 0;
5133}
5134
cf8fd78b
PA
5135static int
5136linux_supports_multi_process (void)
5137{
5138 return 1;
5139}
5140
03583c20
UW
5141static int
5142linux_supports_disable_randomization (void)
5143{
5144#ifdef HAVE_PERSONALITY
5145 return 1;
5146#else
5147 return 0;
5148#endif
5149}
efcbbd14 5150
d1feda86
YQ
5151static int
5152linux_supports_agent (void)
5153{
5154 return 1;
5155}
5156
c2d6af84
PA
5157static int
5158linux_supports_range_stepping (void)
5159{
5160 if (*the_low_target.supports_range_stepping == NULL)
5161 return 0;
5162
5163 return (*the_low_target.supports_range_stepping) ();
5164}
5165
efcbbd14
UW
5166/* Enumerate spufs IDs for process PID. */
5167static int
5168spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5169{
5170 int pos = 0;
5171 int written = 0;
5172 char path[128];
5173 DIR *dir;
5174 struct dirent *entry;
5175
5176 sprintf (path, "/proc/%ld/fd", pid);
5177 dir = opendir (path);
5178 if (!dir)
5179 return -1;
5180
5181 rewinddir (dir);
5182 while ((entry = readdir (dir)) != NULL)
5183 {
5184 struct stat st;
5185 struct statfs stfs;
5186 int fd;
5187
5188 fd = atoi (entry->d_name);
5189 if (!fd)
5190 continue;
5191
5192 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5193 if (stat (path, &st) != 0)
5194 continue;
5195 if (!S_ISDIR (st.st_mode))
5196 continue;
5197
5198 if (statfs (path, &stfs) != 0)
5199 continue;
5200 if (stfs.f_type != SPUFS_MAGIC)
5201 continue;
5202
5203 if (pos >= offset && pos + 4 <= offset + len)
5204 {
5205 *(unsigned int *)(buf + pos - offset) = fd;
5206 written += 4;
5207 }
5208 pos += 4;
5209 }
5210
5211 closedir (dir);
5212 return written;
5213}
5214
5215/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5216 object type, using the /proc file system. */
5217static int
5218linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5219 unsigned const char *writebuf,
5220 CORE_ADDR offset, int len)
5221{
d86d4aaf 5222 long pid = lwpid_of (current_inferior);
efcbbd14
UW
5223 char buf[128];
5224 int fd = 0;
5225 int ret = 0;
5226
5227 if (!writebuf && !readbuf)
5228 return -1;
5229
5230 if (!*annex)
5231 {
5232 if (!readbuf)
5233 return -1;
5234 else
5235 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5236 }
5237
5238 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5239 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5240 if (fd <= 0)
5241 return -1;
5242
5243 if (offset != 0
5244 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5245 {
5246 close (fd);
5247 return 0;
5248 }
5249
5250 if (writebuf)
5251 ret = write (fd, writebuf, (size_t) len);
5252 else
5253 ret = read (fd, readbuf, (size_t) len);
5254
5255 close (fd);
5256 return ret;
5257}
5258
723b724b 5259#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5260struct target_loadseg
5261{
5262 /* Core address to which the segment is mapped. */
5263 Elf32_Addr addr;
5264 /* VMA recorded in the program header. */
5265 Elf32_Addr p_vaddr;
5266 /* Size of this segment in memory. */
5267 Elf32_Word p_memsz;
5268};
5269
723b724b 5270# if defined PT_GETDSBT
78d85199
YQ
5271struct target_loadmap
5272{
5273 /* Protocol version number, must be zero. */
5274 Elf32_Word version;
5275 /* Pointer to the DSBT table, its size, and the DSBT index. */
5276 unsigned *dsbt_table;
5277 unsigned dsbt_size, dsbt_index;
5278 /* Number of segments in this map. */
5279 Elf32_Word nsegs;
5280 /* The actual memory map. */
5281 struct target_loadseg segs[/*nsegs*/];
5282};
723b724b
MF
5283# define LINUX_LOADMAP PT_GETDSBT
5284# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5285# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5286# else
5287struct target_loadmap
5288{
5289 /* Protocol version number, must be zero. */
5290 Elf32_Half version;
5291 /* Number of segments in this map. */
5292 Elf32_Half nsegs;
5293 /* The actual memory map. */
5294 struct target_loadseg segs[/*nsegs*/];
5295};
5296# define LINUX_LOADMAP PTRACE_GETFDPIC
5297# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5298# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5299# endif
78d85199 5300
78d85199
YQ
5301static int
5302linux_read_loadmap (const char *annex, CORE_ADDR offset,
5303 unsigned char *myaddr, unsigned int len)
5304{
2eec7d5b 5305 int pid = lwpid_of (current_inferior);
78d85199
YQ
5306 int addr = -1;
5307 struct target_loadmap *data = NULL;
5308 unsigned int actual_length, copy_length;
5309
5310 if (strcmp (annex, "exec") == 0)
723b724b 5311 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5312 else if (strcmp (annex, "interp") == 0)
723b724b 5313 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5314 else
5315 return -1;
5316
723b724b 5317 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5318 return -1;
5319
5320 if (data == NULL)
5321 return -1;
5322
5323 actual_length = sizeof (struct target_loadmap)
5324 + sizeof (struct target_loadseg) * data->nsegs;
5325
5326 if (offset < 0 || offset > actual_length)
5327 return -1;
5328
5329 copy_length = actual_length - offset < len ? actual_length - offset : len;
5330 memcpy (myaddr, (char *) data + offset, copy_length);
5331 return copy_length;
5332}
723b724b
MF
5333#else
5334# define linux_read_loadmap NULL
5335#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5336
1570b33e
L
5337static void
5338linux_process_qsupported (const char *query)
5339{
5340 if (the_low_target.process_qsupported != NULL)
5341 the_low_target.process_qsupported (query);
5342}
5343
219f2f23
PA
5344static int
5345linux_supports_tracepoints (void)
5346{
5347 if (*the_low_target.supports_tracepoints == NULL)
5348 return 0;
5349
5350 return (*the_low_target.supports_tracepoints) ();
5351}
5352
5353static CORE_ADDR
5354linux_read_pc (struct regcache *regcache)
5355{
5356 if (the_low_target.get_pc == NULL)
5357 return 0;
5358
5359 return (*the_low_target.get_pc) (regcache);
5360}
5361
5362static void
5363linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5364{
5365 gdb_assert (the_low_target.set_pc != NULL);
5366
5367 (*the_low_target.set_pc) (regcache, pc);
5368}
5369
8336d594
PA
5370static int
5371linux_thread_stopped (struct thread_info *thread)
5372{
5373 return get_thread_lwp (thread)->stopped;
5374}
5375
5376/* This exposes stop-all-threads functionality to other modules. */
5377
5378static void
7984d532 5379linux_pause_all (int freeze)
8336d594 5380{
7984d532
PA
5381 stop_all_lwps (freeze, NULL);
5382}
5383
5384/* This exposes unstop-all-threads functionality to other gdbserver
5385 modules. */
5386
5387static void
5388linux_unpause_all (int unfreeze)
5389{
5390 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5391}
5392
90d74c30
PA
5393static int
5394linux_prepare_to_access_memory (void)
5395{
5396 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5397 running LWP. */
5398 if (non_stop)
5399 linux_pause_all (1);
5400 return 0;
5401}
5402
5403static void
0146f85b 5404linux_done_accessing_memory (void)
90d74c30
PA
5405{
5406 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5407 running LWP. */
5408 if (non_stop)
5409 linux_unpause_all (1);
5410}
5411
fa593d66
PA
5412static int
5413linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5414 CORE_ADDR collector,
5415 CORE_ADDR lockaddr,
5416 ULONGEST orig_size,
5417 CORE_ADDR *jump_entry,
405f8e94
SS
5418 CORE_ADDR *trampoline,
5419 ULONGEST *trampoline_size,
fa593d66
PA
5420 unsigned char *jjump_pad_insn,
5421 ULONGEST *jjump_pad_insn_size,
5422 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5423 CORE_ADDR *adjusted_insn_addr_end,
5424 char *err)
fa593d66
PA
5425{
5426 return (*the_low_target.install_fast_tracepoint_jump_pad)
5427 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5428 jump_entry, trampoline, trampoline_size,
5429 jjump_pad_insn, jjump_pad_insn_size,
5430 adjusted_insn_addr, adjusted_insn_addr_end,
5431 err);
fa593d66
PA
5432}
5433
6a271cae
PA
5434static struct emit_ops *
5435linux_emit_ops (void)
5436{
5437 if (the_low_target.emit_ops != NULL)
5438 return (*the_low_target.emit_ops) ();
5439 else
5440 return NULL;
5441}
5442
405f8e94
SS
5443static int
5444linux_get_min_fast_tracepoint_insn_len (void)
5445{
5446 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5447}
5448
2268b414
JK
5449/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5450
5451static int
5452get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5453 CORE_ADDR *phdr_memaddr, int *num_phdr)
5454{
5455 char filename[PATH_MAX];
5456 int fd;
5457 const int auxv_size = is_elf64
5458 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5459 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5460
5461 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5462
5463 fd = open (filename, O_RDONLY);
5464 if (fd < 0)
5465 return 1;
5466
5467 *phdr_memaddr = 0;
5468 *num_phdr = 0;
5469 while (read (fd, buf, auxv_size) == auxv_size
5470 && (*phdr_memaddr == 0 || *num_phdr == 0))
5471 {
5472 if (is_elf64)
5473 {
5474 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5475
5476 switch (aux->a_type)
5477 {
5478 case AT_PHDR:
5479 *phdr_memaddr = aux->a_un.a_val;
5480 break;
5481 case AT_PHNUM:
5482 *num_phdr = aux->a_un.a_val;
5483 break;
5484 }
5485 }
5486 else
5487 {
5488 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5489
5490 switch (aux->a_type)
5491 {
5492 case AT_PHDR:
5493 *phdr_memaddr = aux->a_un.a_val;
5494 break;
5495 case AT_PHNUM:
5496 *num_phdr = aux->a_un.a_val;
5497 break;
5498 }
5499 }
5500 }
5501
5502 close (fd);
5503
5504 if (*phdr_memaddr == 0 || *num_phdr == 0)
5505 {
5506 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5507 "phdr_memaddr = %ld, phdr_num = %d",
5508 (long) *phdr_memaddr, *num_phdr);
5509 return 2;
5510 }
5511
5512 return 0;
5513}
5514
5515/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5516
5517static CORE_ADDR
5518get_dynamic (const int pid, const int is_elf64)
5519{
5520 CORE_ADDR phdr_memaddr, relocation;
5521 int num_phdr, i;
5522 unsigned char *phdr_buf;
5523 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5524
5525 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5526 return 0;
5527
5528 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5529 phdr_buf = alloca (num_phdr * phdr_size);
5530
5531 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5532 return 0;
5533
5534 /* Compute relocation: it is expected to be 0 for "regular" executables,
5535 non-zero for PIE ones. */
5536 relocation = -1;
5537 for (i = 0; relocation == -1 && i < num_phdr; i++)
5538 if (is_elf64)
5539 {
5540 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5541
5542 if (p->p_type == PT_PHDR)
5543 relocation = phdr_memaddr - p->p_vaddr;
5544 }
5545 else
5546 {
5547 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5548
5549 if (p->p_type == PT_PHDR)
5550 relocation = phdr_memaddr - p->p_vaddr;
5551 }
5552
5553 if (relocation == -1)
5554 {
e237a7e2
JK
5555 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5556 any real world executables, including PIE executables, have always
5557 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5558 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5559 or present DT_DEBUG anyway (fpc binaries are statically linked).
5560
5561 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5562
5563 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5564
2268b414
JK
5565 return 0;
5566 }
5567
5568 for (i = 0; i < num_phdr; i++)
5569 {
5570 if (is_elf64)
5571 {
5572 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5573
5574 if (p->p_type == PT_DYNAMIC)
5575 return p->p_vaddr + relocation;
5576 }
5577 else
5578 {
5579 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5580
5581 if (p->p_type == PT_DYNAMIC)
5582 return p->p_vaddr + relocation;
5583 }
5584 }
5585
5586 return 0;
5587}
5588
5589/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
5590 can be 0 if the inferior does not yet have the library list initialized.
5591 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5592 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
5593
5594static CORE_ADDR
5595get_r_debug (const int pid, const int is_elf64)
5596{
5597 CORE_ADDR dynamic_memaddr;
5598 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5599 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 5600 CORE_ADDR map = -1;
2268b414
JK
5601
5602 dynamic_memaddr = get_dynamic (pid, is_elf64);
5603 if (dynamic_memaddr == 0)
367ba2c2 5604 return map;
2268b414
JK
5605
5606 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5607 {
5608 if (is_elf64)
5609 {
5610 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 5611#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5612 union
5613 {
5614 Elf64_Xword map;
5615 unsigned char buf[sizeof (Elf64_Xword)];
5616 }
5617 rld_map;
5618
5619 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5620 {
5621 if (linux_read_memory (dyn->d_un.d_val,
5622 rld_map.buf, sizeof (rld_map.buf)) == 0)
5623 return rld_map.map;
5624 else
5625 break;
5626 }
75f62ce7 5627#endif /* DT_MIPS_RLD_MAP */
2268b414 5628
367ba2c2
MR
5629 if (dyn->d_tag == DT_DEBUG && map == -1)
5630 map = dyn->d_un.d_val;
2268b414
JK
5631
5632 if (dyn->d_tag == DT_NULL)
5633 break;
5634 }
5635 else
5636 {
5637 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 5638#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
5639 union
5640 {
5641 Elf32_Word map;
5642 unsigned char buf[sizeof (Elf32_Word)];
5643 }
5644 rld_map;
5645
5646 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5647 {
5648 if (linux_read_memory (dyn->d_un.d_val,
5649 rld_map.buf, sizeof (rld_map.buf)) == 0)
5650 return rld_map.map;
5651 else
5652 break;
5653 }
75f62ce7 5654#endif /* DT_MIPS_RLD_MAP */
2268b414 5655
367ba2c2
MR
5656 if (dyn->d_tag == DT_DEBUG && map == -1)
5657 map = dyn->d_un.d_val;
2268b414
JK
5658
5659 if (dyn->d_tag == DT_NULL)
5660 break;
5661 }
5662
5663 dynamic_memaddr += dyn_size;
5664 }
5665
367ba2c2 5666 return map;
2268b414
JK
5667}
5668
5669/* Read one pointer from MEMADDR in the inferior. */
5670
5671static int
5672read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5673{
485f1ee4
PA
5674 int ret;
5675
5676 /* Go through a union so this works on either big or little endian
5677 hosts, when the inferior's pointer size is smaller than the size
5678 of CORE_ADDR. It is assumed the inferior's endianness is the
5679 same of the superior's. */
5680 union
5681 {
5682 CORE_ADDR core_addr;
5683 unsigned int ui;
5684 unsigned char uc;
5685 } addr;
5686
5687 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5688 if (ret == 0)
5689 {
5690 if (ptr_size == sizeof (CORE_ADDR))
5691 *ptr = addr.core_addr;
5692 else if (ptr_size == sizeof (unsigned int))
5693 *ptr = addr.ui;
5694 else
5695 gdb_assert_not_reached ("unhandled pointer size");
5696 }
5697 return ret;
2268b414
JK
5698}
5699
5700struct link_map_offsets
5701 {
5702 /* Offset and size of r_debug.r_version. */
5703 int r_version_offset;
5704
5705 /* Offset and size of r_debug.r_map. */
5706 int r_map_offset;
5707
5708 /* Offset to l_addr field in struct link_map. */
5709 int l_addr_offset;
5710
5711 /* Offset to l_name field in struct link_map. */
5712 int l_name_offset;
5713
5714 /* Offset to l_ld field in struct link_map. */
5715 int l_ld_offset;
5716
5717 /* Offset to l_next field in struct link_map. */
5718 int l_next_offset;
5719
5720 /* Offset to l_prev field in struct link_map. */
5721 int l_prev_offset;
5722 };
5723
fb723180 5724/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
5725
5726static int
5727linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5728 unsigned const char *writebuf,
5729 CORE_ADDR offset, int len)
5730{
5731 char *document;
5732 unsigned document_len;
5733 struct process_info_private *const priv = current_process ()->private;
5734 char filename[PATH_MAX];
5735 int pid, is_elf64;
5736
5737 static const struct link_map_offsets lmo_32bit_offsets =
5738 {
5739 0, /* r_version offset. */
5740 4, /* r_debug.r_map offset. */
5741 0, /* l_addr offset in link_map. */
5742 4, /* l_name offset in link_map. */
5743 8, /* l_ld offset in link_map. */
5744 12, /* l_next offset in link_map. */
5745 16 /* l_prev offset in link_map. */
5746 };
5747
5748 static const struct link_map_offsets lmo_64bit_offsets =
5749 {
5750 0, /* r_version offset. */
5751 8, /* r_debug.r_map offset. */
5752 0, /* l_addr offset in link_map. */
5753 8, /* l_name offset in link_map. */
5754 16, /* l_ld offset in link_map. */
5755 24, /* l_next offset in link_map. */
5756 32 /* l_prev offset in link_map. */
5757 };
5758 const struct link_map_offsets *lmo;
214d508e 5759 unsigned int machine;
b1fbec62
GB
5760 int ptr_size;
5761 CORE_ADDR lm_addr = 0, lm_prev = 0;
5762 int allocated = 1024;
5763 char *p;
5764 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5765 int header_done = 0;
2268b414
JK
5766
5767 if (writebuf != NULL)
5768 return -2;
5769 if (readbuf == NULL)
5770 return -1;
5771
d86d4aaf 5772 pid = lwpid_of (current_inferior);
2268b414 5773 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 5774 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 5775 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 5776 ptr_size = is_elf64 ? 8 : 4;
2268b414 5777
b1fbec62
GB
5778 while (annex[0] != '\0')
5779 {
5780 const char *sep;
5781 CORE_ADDR *addrp;
5782 int len;
2268b414 5783
b1fbec62
GB
5784 sep = strchr (annex, '=');
5785 if (sep == NULL)
5786 break;
0c5bf5a9 5787
b1fbec62
GB
5788 len = sep - annex;
5789 if (len == 5 && strncmp (annex, "start", 5) == 0)
5790 addrp = &lm_addr;
5791 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5792 addrp = &lm_prev;
5793 else
5794 {
5795 annex = strchr (sep, ';');
5796 if (annex == NULL)
5797 break;
5798 annex++;
5799 continue;
5800 }
5801
5802 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 5803 }
b1fbec62
GB
5804
5805 if (lm_addr == 0)
2268b414 5806 {
b1fbec62
GB
5807 int r_version = 0;
5808
5809 if (priv->r_debug == 0)
5810 priv->r_debug = get_r_debug (pid, is_elf64);
5811
5812 /* We failed to find DT_DEBUG. Such situation will not change
5813 for this inferior - do not retry it. Report it to GDB as
5814 E01, see for the reasons at the GDB solib-svr4.c side. */
5815 if (priv->r_debug == (CORE_ADDR) -1)
5816 return -1;
5817
5818 if (priv->r_debug != 0)
2268b414 5819 {
b1fbec62
GB
5820 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5821 (unsigned char *) &r_version,
5822 sizeof (r_version)) != 0
5823 || r_version != 1)
5824 {
5825 warning ("unexpected r_debug version %d", r_version);
5826 }
5827 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5828 &lm_addr, ptr_size) != 0)
5829 {
5830 warning ("unable to read r_map from 0x%lx",
5831 (long) priv->r_debug + lmo->r_map_offset);
5832 }
2268b414 5833 }
b1fbec62 5834 }
2268b414 5835
b1fbec62
GB
5836 document = xmalloc (allocated);
5837 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5838 p = document + strlen (document);
5839
5840 while (lm_addr
5841 && read_one_ptr (lm_addr + lmo->l_name_offset,
5842 &l_name, ptr_size) == 0
5843 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5844 &l_addr, ptr_size) == 0
5845 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5846 &l_ld, ptr_size) == 0
5847 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5848 &l_prev, ptr_size) == 0
5849 && read_one_ptr (lm_addr + lmo->l_next_offset,
5850 &l_next, ptr_size) == 0)
5851 {
5852 unsigned char libname[PATH_MAX];
5853
5854 if (lm_prev != l_prev)
2268b414 5855 {
b1fbec62
GB
5856 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5857 (long) lm_prev, (long) l_prev);
5858 break;
2268b414
JK
5859 }
5860
d878444c
JK
5861 /* Ignore the first entry even if it has valid name as the first entry
5862 corresponds to the main executable. The first entry should not be
5863 skipped if the dynamic loader was loaded late by a static executable
5864 (see solib-svr4.c parameter ignore_first). But in such case the main
5865 executable does not have PT_DYNAMIC present and this function already
5866 exited above due to failed get_r_debug. */
5867 if (lm_prev == 0)
2268b414 5868 {
d878444c
JK
5869 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5870 p = p + strlen (p);
5871 }
5872 else
5873 {
5874 /* Not checking for error because reading may stop before
5875 we've got PATH_MAX worth of characters. */
5876 libname[0] = '\0';
5877 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5878 libname[sizeof (libname) - 1] = '\0';
5879 if (libname[0] != '\0')
2268b414 5880 {
d878444c
JK
5881 /* 6x the size for xml_escape_text below. */
5882 size_t len = 6 * strlen ((char *) libname);
5883 char *name;
2268b414 5884
d878444c
JK
5885 if (!header_done)
5886 {
5887 /* Terminate `<library-list-svr4'. */
5888 *p++ = '>';
5889 header_done = 1;
5890 }
2268b414 5891
d878444c
JK
5892 while (allocated < p - document + len + 200)
5893 {
5894 /* Expand to guarantee sufficient storage. */
5895 uintptr_t document_len = p - document;
2268b414 5896
d878444c
JK
5897 document = xrealloc (document, 2 * allocated);
5898 allocated *= 2;
5899 p = document + document_len;
5900 }
5901
5902 name = xml_escape_text ((char *) libname);
5903 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5904 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5905 name, (unsigned long) lm_addr,
5906 (unsigned long) l_addr, (unsigned long) l_ld);
5907 free (name);
5908 }
0afae3cf 5909 }
b1fbec62
GB
5910
5911 lm_prev = lm_addr;
5912 lm_addr = l_next;
2268b414
JK
5913 }
5914
b1fbec62
GB
5915 if (!header_done)
5916 {
5917 /* Empty list; terminate `<library-list-svr4'. */
5918 strcpy (p, "/>");
5919 }
5920 else
5921 strcpy (p, "</library-list-svr4>");
5922
2268b414
JK
5923 document_len = strlen (document);
5924 if (offset < document_len)
5925 document_len -= offset;
5926 else
5927 document_len = 0;
5928 if (len > document_len)
5929 len = document_len;
5930
5931 memcpy (readbuf, document + offset, len);
5932 xfree (document);
5933
5934 return len;
5935}
5936
9accd112
MM
5937#ifdef HAVE_LINUX_BTRACE
5938
969c39fb 5939/* See to_enable_btrace target method. */
9accd112
MM
5940
5941static struct btrace_target_info *
5942linux_low_enable_btrace (ptid_t ptid)
5943{
5944 struct btrace_target_info *tinfo;
5945
5946 tinfo = linux_enable_btrace (ptid);
3aee8918 5947
9accd112 5948 if (tinfo != NULL)
3aee8918
PA
5949 {
5950 struct thread_info *thread = find_thread_ptid (ptid);
5951 struct regcache *regcache = get_thread_regcache (thread, 0);
5952
5953 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5954 }
9accd112
MM
5955
5956 return tinfo;
5957}
5958
969c39fb 5959/* See to_disable_btrace target method. */
9accd112 5960
969c39fb
MM
5961static int
5962linux_low_disable_btrace (struct btrace_target_info *tinfo)
5963{
5964 enum btrace_error err;
5965
5966 err = linux_disable_btrace (tinfo);
5967 return (err == BTRACE_ERR_NONE ? 0 : -1);
5968}
5969
5970/* See to_read_btrace target method. */
5971
5972static int
9accd112
MM
5973linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5974 int type)
5975{
5976 VEC (btrace_block_s) *btrace;
5977 struct btrace_block *block;
969c39fb 5978 enum btrace_error err;
9accd112
MM
5979 int i;
5980
969c39fb
MM
5981 btrace = NULL;
5982 err = linux_read_btrace (&btrace, tinfo, type);
5983 if (err != BTRACE_ERR_NONE)
5984 {
5985 if (err == BTRACE_ERR_OVERFLOW)
5986 buffer_grow_str0 (buffer, "E.Overflow.");
5987 else
5988 buffer_grow_str0 (buffer, "E.Generic Error.");
5989
5990 return -1;
5991 }
9accd112
MM
5992
5993 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5994 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5995
5996 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5997 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5998 paddress (block->begin), paddress (block->end));
5999
969c39fb 6000 buffer_grow_str0 (buffer, "</btrace>\n");
9accd112
MM
6001
6002 VEC_free (btrace_block_s, btrace);
969c39fb
MM
6003
6004 return 0;
9accd112
MM
6005}
6006#endif /* HAVE_LINUX_BTRACE */
6007
ce3a066d
DJ
6008static struct target_ops linux_target_ops = {
6009 linux_create_inferior,
6010 linux_attach,
6011 linux_kill,
6ad8ae5c 6012 linux_detach,
8336d594 6013 linux_mourn,
444d6139 6014 linux_join,
ce3a066d
DJ
6015 linux_thread_alive,
6016 linux_resume,
6017 linux_wait,
6018 linux_fetch_registers,
6019 linux_store_registers,
90d74c30 6020 linux_prepare_to_access_memory,
0146f85b 6021 linux_done_accessing_memory,
ce3a066d
DJ
6022 linux_read_memory,
6023 linux_write_memory,
2f2893d9 6024 linux_look_up_symbols,
ef57601b 6025 linux_request_interrupt,
aa691b87 6026 linux_read_auxv,
802e8e6d 6027 linux_supports_z_point_type,
d993e290
PA
6028 linux_insert_point,
6029 linux_remove_point,
e013ee27
OF
6030 linux_stopped_by_watchpoint,
6031 linux_stopped_data_address,
db0dfaa0
LM
6032#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6033 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6034 && defined(PT_TEXT_END_ADDR)
52fb6437 6035 linux_read_offsets,
dae5f5cf
DJ
6036#else
6037 NULL,
6038#endif
6039#ifdef USE_THREAD_DB
6040 thread_db_get_tls_address,
6041#else
6042 NULL,
52fb6437 6043#endif
efcbbd14 6044 linux_qxfer_spu,
59a016f0 6045 hostio_last_error_from_errno,
07e059b5 6046 linux_qxfer_osdata,
4aa995e1 6047 linux_xfer_siginfo,
bd99dc85
PA
6048 linux_supports_non_stop,
6049 linux_async,
6050 linux_start_non_stop,
cdbfd419
PP
6051 linux_supports_multi_process,
6052#ifdef USE_THREAD_DB
dc146f7c 6053 thread_db_handle_monitor_command,
cdbfd419 6054#else
dc146f7c 6055 NULL,
cdbfd419 6056#endif
d26e3629 6057 linux_common_core_of_thread,
78d85199 6058 linux_read_loadmap,
219f2f23
PA
6059 linux_process_qsupported,
6060 linux_supports_tracepoints,
6061 linux_read_pc,
8336d594
PA
6062 linux_write_pc,
6063 linux_thread_stopped,
7984d532 6064 NULL,
711e434b 6065 linux_pause_all,
7984d532 6066 linux_unpause_all,
fa593d66
PA
6067 linux_cancel_breakpoints,
6068 linux_stabilize_threads,
6a271cae 6069 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6070 linux_emit_ops,
6071 linux_supports_disable_randomization,
405f8e94 6072 linux_get_min_fast_tracepoint_insn_len,
2268b414 6073 linux_qxfer_libraries_svr4,
d1feda86 6074 linux_supports_agent,
9accd112
MM
6075#ifdef HAVE_LINUX_BTRACE
6076 linux_supports_btrace,
6077 linux_low_enable_btrace,
969c39fb 6078 linux_low_disable_btrace,
9accd112
MM
6079 linux_low_read_btrace,
6080#else
6081 NULL,
6082 NULL,
6083 NULL,
6084 NULL,
9accd112 6085#endif
c2d6af84 6086 linux_supports_range_stepping,
ce3a066d
DJ
6087};
6088
0d62e5e8
DJ
6089static void
6090linux_init_signals ()
6091{
6092 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6093 to find what the cancel signal actually is. */
1a981360 6094#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6095 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6096#endif
0d62e5e8
DJ
6097}
6098
3aee8918
PA
6099#ifdef HAVE_LINUX_REGSETS
6100void
6101initialize_regsets_info (struct regsets_info *info)
6102{
6103 for (info->num_regsets = 0;
6104 info->regsets[info->num_regsets].size >= 0;
6105 info->num_regsets++)
6106 ;
3aee8918
PA
6107}
6108#endif
6109
da6d8c04
DJ
6110void
6111initialize_low (void)
6112{
bd99dc85
PA
6113 struct sigaction sigchld_action;
6114 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6115 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6116 set_breakpoint_data (the_low_target.breakpoint,
6117 the_low_target.breakpoint_len);
0d62e5e8 6118 linux_init_signals ();
aa7c7447 6119 linux_ptrace_init_warnings ();
bd99dc85
PA
6120
6121 sigchld_action.sa_handler = sigchld_handler;
6122 sigemptyset (&sigchld_action.sa_mask);
6123 sigchld_action.sa_flags = SA_RESTART;
6124 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6125
6126 initialize_low_arch ();
da6d8c04 6127}
This page took 1.589328 seconds and 4 git commands to generate.