Implement vFile:setfs in gdbserver
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
CommitLineData
da6d8c04 1/* Low level interface to ptrace, for the remote server for GDB.
32d0add0 2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
da6d8c04
DJ
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
a9762ec7 8 the Free Software Foundation; either version 3 of the License, or
da6d8c04
DJ
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
a9762ec7 17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
da6d8c04
DJ
18
19#include "server.h"
58caa3dc 20#include "linux-low.h"
125f8a3d 21#include "nat/linux-osdata.h"
58b4daa5 22#include "agent.h"
de0d863e 23#include "tdesc.h"
da6d8c04 24
96d7229d
LM
25#include "nat/linux-nat.h"
26#include "nat/linux-waitpid.h"
8bdce1ff 27#include "gdb_wait.h"
da6d8c04 28#include <sys/ptrace.h>
125f8a3d
GB
29#include "nat/linux-ptrace.h"
30#include "nat/linux-procfs.h"
8cc73a39 31#include "nat/linux-personality.h"
da6d8c04
DJ
32#include <signal.h>
33#include <sys/ioctl.h>
34#include <fcntl.h>
0a30fbc4 35#include <unistd.h>
fd500816 36#include <sys/syscall.h>
f9387fc3 37#include <sched.h>
07e059b5
VP
38#include <ctype.h>
39#include <pwd.h>
40#include <sys/types.h>
41#include <dirent.h>
53ce3c39 42#include <sys/stat.h>
efcbbd14 43#include <sys/vfs.h>
1570b33e 44#include <sys/uio.h>
602e3198 45#include "filestuff.h"
c144c7a0 46#include "tracepoint.h"
533b0600 47#include "hostio.h"
957f3f49
DE
48#ifndef ELFMAG0
49/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53#include <elf.h>
54#endif
14d2069a 55#include "nat/linux-namespaces.h"
efcbbd14
UW
56
57#ifndef SPUFS_MAGIC
58#define SPUFS_MAGIC 0x23c9b64e
59#endif
da6d8c04 60
03583c20
UW
61#ifdef HAVE_PERSONALITY
62# include <sys/personality.h>
63# if !HAVE_DECL_ADDR_NO_RANDOMIZE
64# define ADDR_NO_RANDOMIZE 0x0040000
65# endif
66#endif
67
fd462a61
DJ
68#ifndef O_LARGEFILE
69#define O_LARGEFILE 0
70#endif
71
ec8ebe72
DE
72#ifndef W_STOPCODE
73#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
74#endif
75
1a981360
PA
76/* This is the kernel's hard limit. Not to be confused with
77 SIGRTMIN. */
78#ifndef __SIGRTMIN
79#define __SIGRTMIN 32
80#endif
81
db0dfaa0
LM
82/* Some targets did not define these ptrace constants from the start,
83 so gdbserver defines them locally here. In the future, these may
84 be removed after they are added to asm/ptrace.h. */
85#if !(defined(PT_TEXT_ADDR) \
86 || defined(PT_DATA_ADDR) \
87 || defined(PT_TEXT_END_ADDR))
88#if defined(__mcoldfire__)
89/* These are still undefined in 3.10 kernels. */
90#define PT_TEXT_ADDR 49*4
91#define PT_DATA_ADDR 50*4
92#define PT_TEXT_END_ADDR 51*4
93/* BFIN already defines these since at least 2.6.32 kernels. */
94#elif defined(BFIN)
95#define PT_TEXT_ADDR 220
96#define PT_TEXT_END_ADDR 224
97#define PT_DATA_ADDR 228
98/* These are still undefined in 3.10 kernels. */
99#elif defined(__TMS320C6X__)
100#define PT_TEXT_ADDR (0x10000*4)
101#define PT_DATA_ADDR (0x10004*4)
102#define PT_TEXT_END_ADDR (0x10008*4)
103#endif
104#endif
105
9accd112 106#ifdef HAVE_LINUX_BTRACE
125f8a3d 107# include "nat/linux-btrace.h"
734b0e4b 108# include "btrace-common.h"
9accd112
MM
109#endif
110
8365dcf5
TJB
111#ifndef HAVE_ELF32_AUXV_T
112/* Copied from glibc's elf.h. */
113typedef struct
114{
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123} Elf32_auxv_t;
124#endif
125
126#ifndef HAVE_ELF64_AUXV_T
127/* Copied from glibc's elf.h. */
128typedef struct
129{
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138} Elf64_auxv_t;
139#endif
140
cff068da
GB
141/* LWP accessors. */
142
143/* See nat/linux-nat.h. */
144
145ptid_t
146ptid_of_lwp (struct lwp_info *lwp)
147{
148 return ptid_of (get_lwp_thread (lwp));
149}
150
151/* See nat/linux-nat.h. */
152
4b134ca1
GB
153void
154lwp_set_arch_private_info (struct lwp_info *lwp,
155 struct arch_lwp_info *info)
156{
157 lwp->arch_private = info;
158}
159
160/* See nat/linux-nat.h. */
161
162struct arch_lwp_info *
163lwp_arch_private_info (struct lwp_info *lwp)
164{
165 return lwp->arch_private;
166}
167
168/* See nat/linux-nat.h. */
169
cff068da
GB
170int
171lwp_is_stopped (struct lwp_info *lwp)
172{
173 return lwp->stopped;
174}
175
176/* See nat/linux-nat.h. */
177
178enum target_stop_reason
179lwp_stop_reason (struct lwp_info *lwp)
180{
181 return lwp->stop_reason;
182}
183
05044653
PA
184/* A list of all unknown processes which receive stop signals. Some
185 other process will presumably claim each of these as forked
186 children momentarily. */
24a09b5f 187
05044653
PA
188struct simple_pid_list
189{
190 /* The process ID. */
191 int pid;
192
193 /* The status as reported by waitpid. */
194 int status;
195
196 /* Next in chain. */
197 struct simple_pid_list *next;
198};
199struct simple_pid_list *stopped_pids;
200
201/* Trivial list manipulation functions to keep track of a list of new
202 stopped processes. */
203
204static void
205add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
206{
207 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
208
209 new_pid->pid = pid;
210 new_pid->status = status;
211 new_pid->next = *listp;
212 *listp = new_pid;
213}
214
215static int
216pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
217{
218 struct simple_pid_list **p;
219
220 for (p = listp; *p != NULL; p = &(*p)->next)
221 if ((*p)->pid == pid)
222 {
223 struct simple_pid_list *next = (*p)->next;
224
225 *statusp = (*p)->status;
226 xfree (*p);
227 *p = next;
228 return 1;
229 }
230 return 0;
231}
24a09b5f 232
bde24c0a
PA
233enum stopping_threads_kind
234 {
235 /* Not stopping threads presently. */
236 NOT_STOPPING_THREADS,
237
238 /* Stopping threads. */
239 STOPPING_THREADS,
240
241 /* Stopping and suspending threads. */
242 STOPPING_AND_SUSPENDING_THREADS
243 };
244
245/* This is set while stop_all_lwps is in effect. */
246enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
0d62e5e8
DJ
247
248/* FIXME make into a target method? */
24a09b5f 249int using_threads = 1;
24a09b5f 250
fa593d66
PA
251/* True if we're presently stabilizing threads (moving them out of
252 jump pads). */
253static int stabilizing_threads;
254
2acc282a 255static void linux_resume_one_lwp (struct lwp_info *lwp,
54a0b537 256 int step, int signal, siginfo_t *info);
2bd7c093 257static void linux_resume (struct thread_resume *resume_info, size_t n);
7984d532
PA
258static void stop_all_lwps (int suspend, struct lwp_info *except);
259static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
fa96cb38
PA
260static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
261 int *wstat, int options);
95954743 262static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
b3312d80 263static struct lwp_info *add_lwp (ptid_t ptid);
c35fafde 264static int linux_stopped_by_watchpoint (void);
95954743 265static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
d50171e4 266static void proceed_all_lwps (void);
d50171e4 267static int finish_step_over (struct lwp_info *lwp);
d50171e4
PA
268static int kill_lwp (unsigned long lwpid, int signo);
269
582511be
PA
270/* When the event-loop is doing a step-over, this points at the thread
271 being stepped. */
272ptid_t step_over_bkpt;
273
d50171e4
PA
274/* True if the low target can hardware single-step. Such targets
275 don't need a BREAKPOINT_REINSERT_ADDR callback. */
276
277static int
278can_hardware_single_step (void)
279{
280 return (the_low_target.breakpoint_reinsert_addr == NULL);
281}
282
283/* True if the low target supports memory breakpoints. If so, we'll
284 have a GET_PC implementation. */
285
286static int
287supports_breakpoints (void)
288{
289 return (the_low_target.get_pc != NULL);
290}
0d62e5e8 291
fa593d66
PA
292/* Returns true if this target can support fast tracepoints. This
293 does not mean that the in-process agent has been loaded in the
294 inferior. */
295
296static int
297supports_fast_tracepoints (void)
298{
299 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
300}
301
c2d6af84
PA
302/* True if LWP is stopped in its stepping range. */
303
304static int
305lwp_in_step_range (struct lwp_info *lwp)
306{
307 CORE_ADDR pc = lwp->stop_pc;
308
309 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
310}
311
0d62e5e8
DJ
312struct pending_signals
313{
314 int signal;
32ca6d61 315 siginfo_t info;
0d62e5e8
DJ
316 struct pending_signals *prev;
317};
611cb4a5 318
bd99dc85
PA
319/* The read/write ends of the pipe registered as waitable file in the
320 event loop. */
321static int linux_event_pipe[2] = { -1, -1 };
322
323/* True if we're currently in async mode. */
324#define target_is_async_p() (linux_event_pipe[0] != -1)
325
02fc4de7 326static void send_sigstop (struct lwp_info *lwp);
fa96cb38 327static void wait_for_sigstop (void);
bd99dc85 328
d0722149
DE
329/* Return non-zero if HEADER is a 64-bit ELF file. */
330
331static int
214d508e 332elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
d0722149 333{
214d508e
L
334 if (header->e_ident[EI_MAG0] == ELFMAG0
335 && header->e_ident[EI_MAG1] == ELFMAG1
336 && header->e_ident[EI_MAG2] == ELFMAG2
337 && header->e_ident[EI_MAG3] == ELFMAG3)
338 {
339 *machine = header->e_machine;
340 return header->e_ident[EI_CLASS] == ELFCLASS64;
341
342 }
343 *machine = EM_NONE;
344 return -1;
d0722149
DE
345}
346
347/* Return non-zero if FILE is a 64-bit ELF file,
348 zero if the file is not a 64-bit ELF file,
349 and -1 if the file is not accessible or doesn't exist. */
350
be07f1a2 351static int
214d508e 352elf_64_file_p (const char *file, unsigned int *machine)
d0722149 353{
957f3f49 354 Elf64_Ehdr header;
d0722149
DE
355 int fd;
356
357 fd = open (file, O_RDONLY);
358 if (fd < 0)
359 return -1;
360
361 if (read (fd, &header, sizeof (header)) != sizeof (header))
362 {
363 close (fd);
364 return 0;
365 }
366 close (fd);
367
214d508e 368 return elf_64_header_p (&header, machine);
d0722149
DE
369}
370
be07f1a2
PA
371/* Accepts an integer PID; Returns true if the executable PID is
372 running is a 64-bit ELF file.. */
373
374int
214d508e 375linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
be07f1a2 376{
d8d2a3ee 377 char file[PATH_MAX];
be07f1a2
PA
378
379 sprintf (file, "/proc/%d/exe", pid);
214d508e 380 return elf_64_file_p (file, machine);
be07f1a2
PA
381}
382
bd99dc85
PA
383static void
384delete_lwp (struct lwp_info *lwp)
385{
fa96cb38
PA
386 struct thread_info *thr = get_lwp_thread (lwp);
387
388 if (debug_threads)
389 debug_printf ("deleting %ld\n", lwpid_of (thr));
390
391 remove_thread (thr);
aa5ca48f 392 free (lwp->arch_private);
bd99dc85
PA
393 free (lwp);
394}
395
95954743
PA
396/* Add a process to the common process list, and set its private
397 data. */
398
399static struct process_info *
400linux_add_process (int pid, int attached)
401{
402 struct process_info *proc;
403
95954743 404 proc = add_process (pid, attached);
fe978cb0 405 proc->priv = xcalloc (1, sizeof (*proc->priv));
95954743 406
3aee8918 407 /* Set the arch when the first LWP stops. */
fe978cb0 408 proc->priv->new_inferior = 1;
3aee8918 409
aa5ca48f 410 if (the_low_target.new_process != NULL)
fe978cb0 411 proc->priv->arch_private = the_low_target.new_process ();
aa5ca48f 412
95954743
PA
413 return proc;
414}
415
582511be
PA
416static CORE_ADDR get_pc (struct lwp_info *lwp);
417
bd99dc85 418/* Handle a GNU/Linux extended wait response. If we see a clone
de0d863e
DB
419 event, we need to add the new LWP to our list (and return 0 so as
420 not to report the trap to higher layers). */
0d62e5e8 421
de0d863e
DB
422static int
423handle_extended_wait (struct lwp_info *event_lwp, int wstat)
24a09b5f 424{
89a5711c 425 int event = linux_ptrace_get_extended_event (wstat);
de0d863e 426 struct thread_info *event_thr = get_lwp_thread (event_lwp);
54a0b537 427 struct lwp_info *new_lwp;
24a09b5f 428
c269dbdb
DB
429 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
430 || (event == PTRACE_EVENT_CLONE))
24a09b5f 431 {
95954743 432 ptid_t ptid;
24a09b5f 433 unsigned long new_pid;
05044653 434 int ret, status;
24a09b5f 435
de0d863e 436 /* Get the pid of the new lwp. */
d86d4aaf 437 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
56f7af9c 438 &new_pid);
24a09b5f
DJ
439
440 /* If we haven't already seen the new PID stop, wait for it now. */
05044653 441 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
24a09b5f
DJ
442 {
443 /* The new child has a pending SIGSTOP. We can't affect it until it
444 hits the SIGSTOP, but we're already attached. */
445
97438e3f 446 ret = my_waitpid (new_pid, &status, __WALL);
24a09b5f
DJ
447
448 if (ret == -1)
449 perror_with_name ("waiting for new child");
450 else if (ret != new_pid)
451 warning ("wait returned unexpected PID %d", ret);
da5898ce 452 else if (!WIFSTOPPED (status))
24a09b5f
DJ
453 warning ("wait returned unexpected status 0x%x", status);
454 }
455
c269dbdb 456 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
de0d863e
DB
457 {
458 struct process_info *parent_proc;
459 struct process_info *child_proc;
460 struct lwp_info *child_lwp;
bfacd19d 461 struct thread_info *child_thr;
de0d863e
DB
462 struct target_desc *tdesc;
463
464 ptid = ptid_build (new_pid, new_pid, 0);
465
466 if (debug_threads)
467 {
468 debug_printf ("HEW: Got fork event from LWP %ld, "
469 "new child is %d\n",
470 ptid_get_lwp (ptid_of (event_thr)),
471 ptid_get_pid (ptid));
472 }
473
474 /* Add the new process to the tables and clone the breakpoint
475 lists of the parent. We need to do this even if the new process
476 will be detached, since we will need the process object and the
477 breakpoints to remove any breakpoints from memory when we
478 detach, and the client side will access registers. */
479 child_proc = linux_add_process (new_pid, 0);
480 gdb_assert (child_proc != NULL);
481 child_lwp = add_lwp (ptid);
482 gdb_assert (child_lwp != NULL);
483 child_lwp->stopped = 1;
bfacd19d
DB
484 child_lwp->must_set_ptrace_flags = 1;
485 child_lwp->status_pending_p = 0;
486 child_thr = get_lwp_thread (child_lwp);
487 child_thr->last_resume_kind = resume_stop;
de0d863e
DB
488 parent_proc = get_thread_process (event_thr);
489 child_proc->attached = parent_proc->attached;
490 clone_all_breakpoints (&child_proc->breakpoints,
491 &child_proc->raw_breakpoints,
492 parent_proc->breakpoints);
493
494 tdesc = xmalloc (sizeof (struct target_desc));
495 copy_target_description (tdesc, parent_proc->tdesc);
496 child_proc->tdesc = tdesc;
de0d863e 497
3a8a0396
DB
498 /* Clone arch-specific process data. */
499 if (the_low_target.new_fork != NULL)
500 the_low_target.new_fork (parent_proc, child_proc);
501
de0d863e 502 /* Save fork info in the parent thread. */
c269dbdb
DB
503 if (event == PTRACE_EVENT_FORK)
504 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
505 else if (event == PTRACE_EVENT_VFORK)
506 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
507
de0d863e 508 event_lwp->waitstatus.value.related_pid = ptid;
c269dbdb 509
de0d863e
DB
510 /* The status_pending field contains bits denoting the
511 extended event, so when the pending event is handled,
512 the handler will look at lwp->waitstatus. */
513 event_lwp->status_pending_p = 1;
514 event_lwp->status_pending = wstat;
515
516 /* Report the event. */
517 return 0;
518 }
519
fa96cb38
PA
520 if (debug_threads)
521 debug_printf ("HEW: Got clone event "
522 "from LWP %ld, new child is LWP %ld\n",
523 lwpid_of (event_thr), new_pid);
524
d86d4aaf 525 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
b3312d80 526 new_lwp = add_lwp (ptid);
24a09b5f 527
e27d73f6
DE
528 /* Either we're going to immediately resume the new thread
529 or leave it stopped. linux_resume_one_lwp is a nop if it
530 thinks the thread is currently running, so set this first
531 before calling linux_resume_one_lwp. */
532 new_lwp->stopped = 1;
533
bde24c0a
PA
534 /* If we're suspending all threads, leave this one suspended
535 too. */
536 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
537 new_lwp->suspended = 1;
538
da5898ce
DJ
539 /* Normally we will get the pending SIGSTOP. But in some cases
540 we might get another signal delivered to the group first.
f21cc1a2 541 If we do get another signal, be sure not to lose it. */
20ba1ce6 542 if (WSTOPSIG (status) != SIGSTOP)
da5898ce 543 {
54a0b537 544 new_lwp->stop_expected = 1;
20ba1ce6
PA
545 new_lwp->status_pending_p = 1;
546 new_lwp->status_pending = status;
da5898ce 547 }
de0d863e
DB
548
549 /* Don't report the event. */
550 return 1;
24a09b5f 551 }
c269dbdb
DB
552 else if (event == PTRACE_EVENT_VFORK_DONE)
553 {
554 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
555
556 /* Report the event. */
557 return 0;
558 }
de0d863e
DB
559
560 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
24a09b5f
DJ
561}
562
d50171e4
PA
563/* Return the PC as read from the regcache of LWP, without any
564 adjustment. */
565
566static CORE_ADDR
567get_pc (struct lwp_info *lwp)
568{
0bfdf32f 569 struct thread_info *saved_thread;
d50171e4
PA
570 struct regcache *regcache;
571 CORE_ADDR pc;
572
573 if (the_low_target.get_pc == NULL)
574 return 0;
575
0bfdf32f
GB
576 saved_thread = current_thread;
577 current_thread = get_lwp_thread (lwp);
d50171e4 578
0bfdf32f 579 regcache = get_thread_regcache (current_thread, 1);
d50171e4
PA
580 pc = (*the_low_target.get_pc) (regcache);
581
582 if (debug_threads)
87ce2a04 583 debug_printf ("pc is 0x%lx\n", (long) pc);
d50171e4 584
0bfdf32f 585 current_thread = saved_thread;
d50171e4
PA
586 return pc;
587}
588
589/* This function should only be called if LWP got a SIGTRAP.
0d62e5e8
DJ
590 The SIGTRAP could mean several things.
591
592 On i386, where decr_pc_after_break is non-zero:
582511be
PA
593
594 If we were single-stepping this process using PTRACE_SINGLESTEP, we
595 will get only the one SIGTRAP. The value of $eip will be the next
596 instruction. If the instruction we stepped over was a breakpoint,
597 we need to decrement the PC.
598
0d62e5e8
DJ
599 If we continue the process using PTRACE_CONT, we will get a
600 SIGTRAP when we hit a breakpoint. The value of $eip will be
601 the instruction after the breakpoint (i.e. needs to be
602 decremented). If we report the SIGTRAP to GDB, we must also
582511be 603 report the undecremented PC. If the breakpoint is removed, we
0d62e5e8
DJ
604 must resume at the decremented PC.
605
582511be
PA
606 On a non-decr_pc_after_break machine with hardware or kernel
607 single-step:
608
609 If we either single-step a breakpoint instruction, or continue and
610 hit a breakpoint instruction, our PC will point at the breakpoint
0d62e5e8
DJ
611 instruction. */
612
582511be
PA
613static int
614check_stopped_by_breakpoint (struct lwp_info *lwp)
0d62e5e8 615{
582511be
PA
616 CORE_ADDR pc;
617 CORE_ADDR sw_breakpoint_pc;
618 struct thread_info *saved_thread;
3e572f71
PA
619#if USE_SIGTRAP_SIGINFO
620 siginfo_t siginfo;
621#endif
d50171e4
PA
622
623 if (the_low_target.get_pc == NULL)
624 return 0;
0d62e5e8 625
582511be
PA
626 pc = get_pc (lwp);
627 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
d50171e4 628
582511be
PA
629 /* breakpoint_at reads from the current thread. */
630 saved_thread = current_thread;
631 current_thread = get_lwp_thread (lwp);
47c0c975 632
3e572f71
PA
633#if USE_SIGTRAP_SIGINFO
634 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
635 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
636 {
637 if (siginfo.si_signo == SIGTRAP)
638 {
639 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
640 {
641 if (debug_threads)
642 {
643 struct thread_info *thr = get_lwp_thread (lwp);
644
2bf6fb9d 645 debug_printf ("CSBB: %s stopped by software breakpoint\n",
3e572f71
PA
646 target_pid_to_str (ptid_of (thr)));
647 }
648
649 /* Back up the PC if necessary. */
650 if (pc != sw_breakpoint_pc)
651 {
652 struct regcache *regcache
653 = get_thread_regcache (current_thread, 1);
654 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
655 }
656
657 lwp->stop_pc = sw_breakpoint_pc;
658 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
659 current_thread = saved_thread;
660 return 1;
661 }
662 else if (siginfo.si_code == TRAP_HWBKPT)
663 {
664 if (debug_threads)
665 {
666 struct thread_info *thr = get_lwp_thread (lwp);
667
2bf6fb9d
PA
668 debug_printf ("CSBB: %s stopped by hardware "
669 "breakpoint/watchpoint\n",
3e572f71
PA
670 target_pid_to_str (ptid_of (thr)));
671 }
672
673 lwp->stop_pc = pc;
674 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
675 current_thread = saved_thread;
676 return 1;
677 }
2bf6fb9d
PA
678 else if (siginfo.si_code == TRAP_TRACE)
679 {
680 if (debug_threads)
681 {
682 struct thread_info *thr = get_lwp_thread (lwp);
683
684 debug_printf ("CSBB: %s stopped by trace\n",
685 target_pid_to_str (ptid_of (thr)));
686 }
687 }
3e572f71
PA
688 }
689 }
690#else
582511be
PA
691 /* We may have just stepped a breakpoint instruction. E.g., in
692 non-stop mode, GDB first tells the thread A to step a range, and
693 then the user inserts a breakpoint inside the range. In that
8090aef2
PA
694 case we need to report the breakpoint PC. */
695 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
582511be
PA
696 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
697 {
698 if (debug_threads)
699 {
700 struct thread_info *thr = get_lwp_thread (lwp);
701
702 debug_printf ("CSBB: %s stopped by software breakpoint\n",
703 target_pid_to_str (ptid_of (thr)));
704 }
705
706 /* Back up the PC if necessary. */
707 if (pc != sw_breakpoint_pc)
708 {
709 struct regcache *regcache
710 = get_thread_regcache (current_thread, 1);
711 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
712 }
713
714 lwp->stop_pc = sw_breakpoint_pc;
15c66dd6 715 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
582511be
PA
716 current_thread = saved_thread;
717 return 1;
718 }
719
720 if (hardware_breakpoint_inserted_here (pc))
721 {
722 if (debug_threads)
723 {
724 struct thread_info *thr = get_lwp_thread (lwp);
725
726 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
727 target_pid_to_str (ptid_of (thr)));
728 }
47c0c975 729
582511be 730 lwp->stop_pc = pc;
15c66dd6 731 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
582511be
PA
732 current_thread = saved_thread;
733 return 1;
734 }
3e572f71 735#endif
582511be
PA
736
737 current_thread = saved_thread;
738 return 0;
0d62e5e8 739}
ce3a066d 740
b3312d80 741static struct lwp_info *
95954743 742add_lwp (ptid_t ptid)
611cb4a5 743{
54a0b537 744 struct lwp_info *lwp;
0d62e5e8 745
54a0b537
PA
746 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
747 memset (lwp, 0, sizeof (*lwp));
0d62e5e8 748
aa5ca48f 749 if (the_low_target.new_thread != NULL)
34c703da 750 the_low_target.new_thread (lwp);
aa5ca48f 751
f7667f0d 752 lwp->thread = add_thread (ptid, lwp);
0d62e5e8 753
54a0b537 754 return lwp;
0d62e5e8 755}
611cb4a5 756
da6d8c04
DJ
757/* Start an inferior process and returns its pid.
758 ALLARGS is a vector of program-name and args. */
759
ce3a066d
DJ
760static int
761linux_create_inferior (char *program, char **allargs)
da6d8c04 762{
a6dbe5df 763 struct lwp_info *new_lwp;
da6d8c04 764 int pid;
95954743 765 ptid_t ptid;
8cc73a39
SDJ
766 struct cleanup *restore_personality
767 = maybe_disable_address_space_randomization (disable_randomization);
03583c20 768
42c81e2a 769#if defined(__UCLIBC__) && defined(HAS_NOMMU)
52fb6437
NS
770 pid = vfork ();
771#else
da6d8c04 772 pid = fork ();
52fb6437 773#endif
da6d8c04
DJ
774 if (pid < 0)
775 perror_with_name ("fork");
776
777 if (pid == 0)
778 {
602e3198 779 close_most_fds ();
b8e1b30e 780 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da6d8c04 781
1a981360 782#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 783 signal (__SIGRTMIN + 1, SIG_DFL);
60c3d7b0 784#endif
0d62e5e8 785
a9fa9f7d
DJ
786 setpgid (0, 0);
787
e0f9f062
DE
788 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
789 stdout to stderr so that inferior i/o doesn't corrupt the connection.
790 Also, redirect stdin to /dev/null. */
791 if (remote_connection_is_stdio ())
792 {
793 close (0);
794 open ("/dev/null", O_RDONLY);
795 dup2 (2, 1);
3e52c33d
JK
796 if (write (2, "stdin/stdout redirected\n",
797 sizeof ("stdin/stdout redirected\n") - 1) < 0)
8c29b58e
YQ
798 {
799 /* Errors ignored. */;
800 }
e0f9f062
DE
801 }
802
2b876972
DJ
803 execv (program, allargs);
804 if (errno == ENOENT)
805 execvp (program, allargs);
da6d8c04
DJ
806
807 fprintf (stderr, "Cannot exec %s: %s.\n", program,
d07c63e7 808 strerror (errno));
da6d8c04
DJ
809 fflush (stderr);
810 _exit (0177);
811 }
812
8cc73a39 813 do_cleanups (restore_personality);
03583c20 814
95954743
PA
815 linux_add_process (pid, 0);
816
817 ptid = ptid_build (pid, pid, 0);
818 new_lwp = add_lwp (ptid);
a6dbe5df 819 new_lwp->must_set_ptrace_flags = 1;
611cb4a5 820
a9fa9f7d 821 return pid;
da6d8c04
DJ
822}
823
8784d563
PA
824/* Attach to an inferior process. Returns 0 on success, ERRNO on
825 error. */
da6d8c04 826
7ae1a6a6
PA
827int
828linux_attach_lwp (ptid_t ptid)
da6d8c04 829{
54a0b537 830 struct lwp_info *new_lwp;
7ae1a6a6 831 int lwpid = ptid_get_lwp (ptid);
611cb4a5 832
b8e1b30e 833 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
56f7af9c 834 != 0)
7ae1a6a6 835 return errno;
24a09b5f 836
b3312d80 837 new_lwp = add_lwp (ptid);
0d62e5e8 838
a6dbe5df
PA
839 /* We need to wait for SIGSTOP before being able to make the next
840 ptrace call on this LWP. */
841 new_lwp->must_set_ptrace_flags = 1;
842
644cebc9 843 if (linux_proc_pid_is_stopped (lwpid))
c14d7ab2
PA
844 {
845 if (debug_threads)
87ce2a04 846 debug_printf ("Attached to a stopped process\n");
c14d7ab2
PA
847
848 /* The process is definitely stopped. It is in a job control
849 stop, unless the kernel predates the TASK_STOPPED /
850 TASK_TRACED distinction, in which case it might be in a
851 ptrace stop. Make sure it is in a ptrace stop; from there we
852 can kill it, signal it, et cetera.
853
854 First make sure there is a pending SIGSTOP. Since we are
855 already attached, the process can not transition from stopped
856 to running without a PTRACE_CONT; so we know this signal will
857 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
858 probably already in the queue (unless this kernel is old
859 enough to use TASK_STOPPED for ptrace stops); but since
860 SIGSTOP is not an RT signal, it can only be queued once. */
861 kill_lwp (lwpid, SIGSTOP);
862
863 /* Finally, resume the stopped process. This will deliver the
864 SIGSTOP (or a higher priority signal, just like normal
865 PTRACE_ATTACH), which we'll catch later on. */
b8e1b30e 866 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
c14d7ab2
PA
867 }
868
0d62e5e8 869 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
0e21c1ec
DE
870 brings it to a halt.
871
872 There are several cases to consider here:
873
874 1) gdbserver has already attached to the process and is being notified
1b3f6016 875 of a new thread that is being created.
d50171e4
PA
876 In this case we should ignore that SIGSTOP and resume the
877 process. This is handled below by setting stop_expected = 1,
8336d594 878 and the fact that add_thread sets last_resume_kind ==
d50171e4 879 resume_continue.
0e21c1ec
DE
880
881 2) This is the first thread (the process thread), and we're attaching
1b3f6016
PA
882 to it via attach_inferior.
883 In this case we want the process thread to stop.
d50171e4
PA
884 This is handled by having linux_attach set last_resume_kind ==
885 resume_stop after we return.
e3deef73
LM
886
887 If the pid we are attaching to is also the tgid, we attach to and
888 stop all the existing threads. Otherwise, we attach to pid and
889 ignore any other threads in the same group as this pid.
0e21c1ec
DE
890
891 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1b3f6016
PA
892 existing threads.
893 In this case we want the thread to stop.
894 FIXME: This case is currently not properly handled.
895 We should wait for the SIGSTOP but don't. Things work apparently
896 because enough time passes between when we ptrace (ATTACH) and when
897 gdb makes the next ptrace call on the thread.
0d62e5e8
DJ
898
899 On the other hand, if we are currently trying to stop all threads, we
900 should treat the new thread as if we had sent it a SIGSTOP. This works
54a0b537 901 because we are guaranteed that the add_lwp call above added us to the
0e21c1ec
DE
902 end of the list, and so the new thread has not yet reached
903 wait_for_sigstop (but will). */
d50171e4 904 new_lwp->stop_expected = 1;
0d62e5e8 905
7ae1a6a6 906 return 0;
95954743
PA
907}
908
8784d563
PA
909/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
910 already attached. Returns true if a new LWP is found, false
911 otherwise. */
912
913static int
914attach_proc_task_lwp_callback (ptid_t ptid)
915{
916 /* Is this a new thread? */
917 if (find_thread_ptid (ptid) == NULL)
918 {
919 int lwpid = ptid_get_lwp (ptid);
920 int err;
921
922 if (debug_threads)
923 debug_printf ("Found new lwp %d\n", lwpid);
924
925 err = linux_attach_lwp (ptid);
926
927 /* Be quiet if we simply raced with the thread exiting. EPERM
928 is returned if the thread's task still exists, and is marked
929 as exited or zombie, as well as other conditions, so in that
930 case, confirm the status in /proc/PID/status. */
931 if (err == ESRCH
932 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
933 {
934 if (debug_threads)
935 {
936 debug_printf ("Cannot attach to lwp %d: "
937 "thread is gone (%d: %s)\n",
938 lwpid, err, strerror (err));
939 }
940 }
941 else if (err != 0)
942 {
943 warning (_("Cannot attach to lwp %d: %s"),
944 lwpid,
945 linux_ptrace_attach_fail_reason_string (ptid, err));
946 }
947
948 return 1;
949 }
950 return 0;
951}
952
e3deef73
LM
953/* Attach to PID. If PID is the tgid, attach to it and all
954 of its threads. */
955
c52daf70 956static int
a1928bad 957linux_attach (unsigned long pid)
0d62e5e8 958{
7ae1a6a6
PA
959 ptid_t ptid = ptid_build (pid, pid, 0);
960 int err;
961
e3deef73
LM
962 /* Attach to PID. We will check for other threads
963 soon. */
7ae1a6a6
PA
964 err = linux_attach_lwp (ptid);
965 if (err != 0)
966 error ("Cannot attach to process %ld: %s",
8784d563 967 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
7ae1a6a6 968
95954743 969 linux_add_process (pid, 1);
0d62e5e8 970
bd99dc85
PA
971 if (!non_stop)
972 {
8336d594
PA
973 struct thread_info *thread;
974
975 /* Don't ignore the initial SIGSTOP if we just attached to this
976 process. It will be collected by wait shortly. */
977 thread = find_thread_ptid (ptid_build (pid, pid, 0));
978 thread->last_resume_kind = resume_stop;
bd99dc85 979 }
0d62e5e8 980
8784d563
PA
981 /* We must attach to every LWP. If /proc is mounted, use that to
982 find them now. On the one hand, the inferior may be using raw
983 clone instead of using pthreads. On the other hand, even if it
984 is using pthreads, GDB may not be connected yet (thread_db needs
985 to do symbol lookups, through qSymbol). Also, thread_db walks
986 structures in the inferior's address space to find the list of
987 threads/LWPs, and those structures may well be corrupted. Note
988 that once thread_db is loaded, we'll still use it to list threads
989 and associate pthread info with each LWP. */
990 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
95954743
PA
991 return 0;
992}
993
994struct counter
995{
996 int pid;
997 int count;
998};
999
1000static int
1001second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1002{
1003 struct counter *counter = args;
1004
1005 if (ptid_get_pid (entry->id) == counter->pid)
1006 {
1007 if (++counter->count > 1)
1008 return 1;
1009 }
d61ddec4 1010
da6d8c04
DJ
1011 return 0;
1012}
1013
95954743 1014static int
fa96cb38 1015last_thread_of_process_p (int pid)
95954743 1016{
95954743 1017 struct counter counter = { pid , 0 };
da6d8c04 1018
95954743
PA
1019 return (find_inferior (&all_threads,
1020 second_thread_of_pid_p, &counter) == NULL);
1021}
1022
da84f473
PA
1023/* Kill LWP. */
1024
1025static void
1026linux_kill_one_lwp (struct lwp_info *lwp)
1027{
d86d4aaf
DE
1028 struct thread_info *thr = get_lwp_thread (lwp);
1029 int pid = lwpid_of (thr);
da84f473
PA
1030
1031 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1032 there is no signal context, and ptrace(PTRACE_KILL) (or
1033 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1034 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1035 alternative is to kill with SIGKILL. We only need one SIGKILL
1036 per process, not one for each thread. But since we still support
1037 linuxthreads, and we also support debugging programs using raw
1038 clone without CLONE_THREAD, we send one for each thread. For
1039 years, we used PTRACE_KILL only, so we're being a bit paranoid
1040 about some old kernels where PTRACE_KILL might work better
1041 (dubious if there are any such, but that's why it's paranoia), so
1042 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1043 everywhere. */
1044
1045 errno = 0;
69ff6be5 1046 kill_lwp (pid, SIGKILL);
da84f473 1047 if (debug_threads)
ce9e3fe7
PA
1048 {
1049 int save_errno = errno;
1050
1051 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1052 target_pid_to_str (ptid_of (thr)),
1053 save_errno ? strerror (save_errno) : "OK");
1054 }
da84f473
PA
1055
1056 errno = 0;
b8e1b30e 1057 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
da84f473 1058 if (debug_threads)
ce9e3fe7
PA
1059 {
1060 int save_errno = errno;
1061
1062 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1063 target_pid_to_str (ptid_of (thr)),
1064 save_errno ? strerror (save_errno) : "OK");
1065 }
da84f473
PA
1066}
1067
e76126e8
PA
1068/* Kill LWP and wait for it to die. */
1069
1070static void
1071kill_wait_lwp (struct lwp_info *lwp)
1072{
1073 struct thread_info *thr = get_lwp_thread (lwp);
1074 int pid = ptid_get_pid (ptid_of (thr));
1075 int lwpid = ptid_get_lwp (ptid_of (thr));
1076 int wstat;
1077 int res;
1078
1079 if (debug_threads)
1080 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1081
1082 do
1083 {
1084 linux_kill_one_lwp (lwp);
1085
1086 /* Make sure it died. Notes:
1087
1088 - The loop is most likely unnecessary.
1089
1090 - We don't use linux_wait_for_event as that could delete lwps
1091 while we're iterating over them. We're not interested in
1092 any pending status at this point, only in making sure all
1093 wait status on the kernel side are collected until the
1094 process is reaped.
1095
1096 - We don't use __WALL here as the __WALL emulation relies on
1097 SIGCHLD, and killing a stopped process doesn't generate
1098 one, nor an exit status.
1099 */
1100 res = my_waitpid (lwpid, &wstat, 0);
1101 if (res == -1 && errno == ECHILD)
1102 res = my_waitpid (lwpid, &wstat, __WCLONE);
1103 } while (res > 0 && WIFSTOPPED (wstat));
1104
1105 gdb_assert (res > 0);
1106}
1107
da84f473
PA
1108/* Callback for `find_inferior'. Kills an lwp of a given process,
1109 except the leader. */
95954743
PA
1110
1111static int
da84f473 1112kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
da6d8c04 1113{
0d62e5e8 1114 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1115 struct lwp_info *lwp = get_thread_lwp (thread);
95954743
PA
1116 int pid = * (int *) args;
1117
1118 if (ptid_get_pid (entry->id) != pid)
1119 return 0;
0d62e5e8 1120
fd500816
DJ
1121 /* We avoid killing the first thread here, because of a Linux kernel (at
1122 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1123 the children get a chance to be reaped, it will remain a zombie
1124 forever. */
95954743 1125
d86d4aaf 1126 if (lwpid_of (thread) == pid)
95954743
PA
1127 {
1128 if (debug_threads)
87ce2a04
DE
1129 debug_printf ("lkop: is last of process %s\n",
1130 target_pid_to_str (entry->id));
95954743
PA
1131 return 0;
1132 }
fd500816 1133
e76126e8 1134 kill_wait_lwp (lwp);
95954743 1135 return 0;
da6d8c04
DJ
1136}
1137
95954743
PA
1138static int
1139linux_kill (int pid)
0d62e5e8 1140{
95954743 1141 struct process_info *process;
54a0b537 1142 struct lwp_info *lwp;
fd500816 1143
95954743
PA
1144 process = find_process_pid (pid);
1145 if (process == NULL)
1146 return -1;
9d606399 1147
f9e39928
PA
1148 /* If we're killing a running inferior, make sure it is stopped
1149 first, as PTRACE_KILL will not work otherwise. */
7984d532 1150 stop_all_lwps (0, NULL);
f9e39928 1151
da84f473 1152 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
fd500816 1153
54a0b537 1154 /* See the comment in linux_kill_one_lwp. We did not kill the first
fd500816 1155 thread in the list, so do so now. */
95954743 1156 lwp = find_lwp_pid (pid_to_ptid (pid));
bd99dc85 1157
784867a5 1158 if (lwp == NULL)
fd500816 1159 {
784867a5 1160 if (debug_threads)
d86d4aaf
DE
1161 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1162 pid);
784867a5
JK
1163 }
1164 else
e76126e8 1165 kill_wait_lwp (lwp);
2d717e4f 1166
8336d594 1167 the_target->mourn (process);
f9e39928
PA
1168
1169 /* Since we presently can only stop all lwps of all processes, we
1170 need to unstop lwps of other processes. */
7984d532 1171 unstop_all_lwps (0, NULL);
95954743 1172 return 0;
0d62e5e8
DJ
1173}
1174
9b224c5e
PA
1175/* Get pending signal of THREAD, for detaching purposes. This is the
1176 signal the thread last stopped for, which we need to deliver to the
1177 thread when detaching, otherwise, it'd be suppressed/lost. */
1178
1179static int
1180get_detach_signal (struct thread_info *thread)
1181{
a493e3e2 1182 enum gdb_signal signo = GDB_SIGNAL_0;
9b224c5e
PA
1183 int status;
1184 struct lwp_info *lp = get_thread_lwp (thread);
1185
1186 if (lp->status_pending_p)
1187 status = lp->status_pending;
1188 else
1189 {
1190 /* If the thread had been suspended by gdbserver, and it stopped
1191 cleanly, then it'll have stopped with SIGSTOP. But we don't
1192 want to deliver that SIGSTOP. */
1193 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
a493e3e2 1194 || thread->last_status.value.sig == GDB_SIGNAL_0)
9b224c5e
PA
1195 return 0;
1196
1197 /* Otherwise, we may need to deliver the signal we
1198 intercepted. */
1199 status = lp->last_status;
1200 }
1201
1202 if (!WIFSTOPPED (status))
1203 {
1204 if (debug_threads)
87ce2a04 1205 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
d86d4aaf 1206 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1207 return 0;
1208 }
1209
1210 /* Extended wait statuses aren't real SIGTRAPs. */
89a5711c 1211 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
9b224c5e
PA
1212 {
1213 if (debug_threads)
87ce2a04
DE
1214 debug_printf ("GPS: lwp %s had stopped with extended "
1215 "status: no pending signal\n",
d86d4aaf 1216 target_pid_to_str (ptid_of (thread)));
9b224c5e
PA
1217 return 0;
1218 }
1219
2ea28649 1220 signo = gdb_signal_from_host (WSTOPSIG (status));
9b224c5e
PA
1221
1222 if (program_signals_p && !program_signals[signo])
1223 {
1224 if (debug_threads)
87ce2a04 1225 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
d86d4aaf 1226 target_pid_to_str (ptid_of (thread)),
87ce2a04 1227 gdb_signal_to_string (signo));
9b224c5e
PA
1228 return 0;
1229 }
1230 else if (!program_signals_p
1231 /* If we have no way to know which signals GDB does not
1232 want to have passed to the program, assume
1233 SIGTRAP/SIGINT, which is GDB's default. */
a493e3e2 1234 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
9b224c5e
PA
1235 {
1236 if (debug_threads)
87ce2a04
DE
1237 debug_printf ("GPS: lwp %s had signal %s, "
1238 "but we don't know if we should pass it. "
1239 "Default to not.\n",
d86d4aaf 1240 target_pid_to_str (ptid_of (thread)),
87ce2a04 1241 gdb_signal_to_string (signo));
9b224c5e
PA
1242 return 0;
1243 }
1244 else
1245 {
1246 if (debug_threads)
87ce2a04 1247 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
d86d4aaf 1248 target_pid_to_str (ptid_of (thread)),
87ce2a04 1249 gdb_signal_to_string (signo));
9b224c5e
PA
1250
1251 return WSTOPSIG (status);
1252 }
1253}
1254
95954743
PA
1255static int
1256linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
6ad8ae5c
DJ
1257{
1258 struct thread_info *thread = (struct thread_info *) entry;
54a0b537 1259 struct lwp_info *lwp = get_thread_lwp (thread);
95954743 1260 int pid = * (int *) args;
9b224c5e 1261 int sig;
95954743
PA
1262
1263 if (ptid_get_pid (entry->id) != pid)
1264 return 0;
6ad8ae5c 1265
9b224c5e 1266 /* If there is a pending SIGSTOP, get rid of it. */
54a0b537 1267 if (lwp->stop_expected)
ae13219e 1268 {
9b224c5e 1269 if (debug_threads)
87ce2a04 1270 debug_printf ("Sending SIGCONT to %s\n",
d86d4aaf 1271 target_pid_to_str (ptid_of (thread)));
9b224c5e 1272
d86d4aaf 1273 kill_lwp (lwpid_of (thread), SIGCONT);
54a0b537 1274 lwp->stop_expected = 0;
ae13219e
DJ
1275 }
1276
1277 /* Flush any pending changes to the process's registers. */
d86d4aaf 1278 regcache_invalidate_thread (thread);
ae13219e 1279
9b224c5e
PA
1280 /* Pass on any pending signal for this thread. */
1281 sig = get_detach_signal (thread);
1282
ae13219e 1283 /* Finally, let it resume. */
82bfbe7e
PA
1284 if (the_low_target.prepare_to_resume != NULL)
1285 the_low_target.prepare_to_resume (lwp);
d86d4aaf 1286 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
b8e1b30e 1287 (PTRACE_TYPE_ARG4) (long) sig) < 0)
9b224c5e 1288 error (_("Can't detach %s: %s"),
d86d4aaf 1289 target_pid_to_str (ptid_of (thread)),
9b224c5e 1290 strerror (errno));
bd99dc85
PA
1291
1292 delete_lwp (lwp);
95954743 1293 return 0;
6ad8ae5c
DJ
1294}
1295
95954743
PA
1296static int
1297linux_detach (int pid)
1298{
1299 struct process_info *process;
1300
1301 process = find_process_pid (pid);
1302 if (process == NULL)
1303 return -1;
1304
f9e39928
PA
1305 /* Stop all threads before detaching. First, ptrace requires that
1306 the thread is stopped to sucessfully detach. Second, thread_db
1307 may need to uninstall thread event breakpoints from memory, which
1308 only works with a stopped process anyway. */
7984d532 1309 stop_all_lwps (0, NULL);
f9e39928 1310
ca5c370d 1311#ifdef USE_THREAD_DB
8336d594 1312 thread_db_detach (process);
ca5c370d
PA
1313#endif
1314
fa593d66
PA
1315 /* Stabilize threads (move out of jump pads). */
1316 stabilize_threads ();
1317
95954743 1318 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
8336d594
PA
1319
1320 the_target->mourn (process);
f9e39928
PA
1321
1322 /* Since we presently can only stop all lwps of all processes, we
1323 need to unstop lwps of other processes. */
7984d532 1324 unstop_all_lwps (0, NULL);
f9e39928
PA
1325 return 0;
1326}
1327
1328/* Remove all LWPs that belong to process PROC from the lwp list. */
1329
1330static int
1331delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1332{
d86d4aaf
DE
1333 struct thread_info *thread = (struct thread_info *) entry;
1334 struct lwp_info *lwp = get_thread_lwp (thread);
f9e39928
PA
1335 struct process_info *process = proc;
1336
d86d4aaf 1337 if (pid_of (thread) == pid_of (process))
f9e39928
PA
1338 delete_lwp (lwp);
1339
dd6953e1 1340 return 0;
6ad8ae5c
DJ
1341}
1342
8336d594
PA
1343static void
1344linux_mourn (struct process_info *process)
1345{
1346 struct process_info_private *priv;
1347
1348#ifdef USE_THREAD_DB
1349 thread_db_mourn (process);
1350#endif
1351
d86d4aaf 1352 find_inferior (&all_threads, delete_lwp_callback, process);
f9e39928 1353
8336d594 1354 /* Freeing all private data. */
fe978cb0 1355 priv = process->priv;
8336d594
PA
1356 free (priv->arch_private);
1357 free (priv);
fe978cb0 1358 process->priv = NULL;
505106cd
PA
1359
1360 remove_process (process);
8336d594
PA
1361}
1362
444d6139 1363static void
95954743 1364linux_join (int pid)
444d6139 1365{
444d6139
PA
1366 int status, ret;
1367
1368 do {
95954743 1369 ret = my_waitpid (pid, &status, 0);
444d6139
PA
1370 if (WIFEXITED (status) || WIFSIGNALED (status))
1371 break;
1372 } while (ret != -1 || errno != ECHILD);
1373}
1374
6ad8ae5c 1375/* Return nonzero if the given thread is still alive. */
0d62e5e8 1376static int
95954743 1377linux_thread_alive (ptid_t ptid)
0d62e5e8 1378{
95954743
PA
1379 struct lwp_info *lwp = find_lwp_pid (ptid);
1380
1381 /* We assume we always know if a thread exits. If a whole process
1382 exited but we still haven't been able to report it to GDB, we'll
1383 hold on to the last lwp of the dead process. */
1384 if (lwp != NULL)
1385 return !lwp->dead;
0d62e5e8
DJ
1386 else
1387 return 0;
1388}
1389
582511be
PA
1390/* Return 1 if this lwp still has an interesting status pending. If
1391 not (e.g., it had stopped for a breakpoint that is gone), return
1392 false. */
1393
1394static int
1395thread_still_has_status_pending_p (struct thread_info *thread)
1396{
1397 struct lwp_info *lp = get_thread_lwp (thread);
1398
1399 if (!lp->status_pending_p)
1400 return 0;
1401
1402 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1403 report any status pending the LWP may have. */
1404 if (thread->last_resume_kind == resume_stop
1405 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1406 return 0;
1407
1408 if (thread->last_resume_kind != resume_stop
15c66dd6
PA
1409 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1410 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
582511be
PA
1411 {
1412 struct thread_info *saved_thread;
1413 CORE_ADDR pc;
1414 int discard = 0;
1415
1416 gdb_assert (lp->last_status != 0);
1417
1418 pc = get_pc (lp);
1419
1420 saved_thread = current_thread;
1421 current_thread = thread;
1422
1423 if (pc != lp->stop_pc)
1424 {
1425 if (debug_threads)
1426 debug_printf ("PC of %ld changed\n",
1427 lwpid_of (thread));
1428 discard = 1;
1429 }
3e572f71
PA
1430
1431#if !USE_SIGTRAP_SIGINFO
15c66dd6 1432 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
582511be
PA
1433 && !(*the_low_target.breakpoint_at) (pc))
1434 {
1435 if (debug_threads)
1436 debug_printf ("previous SW breakpoint of %ld gone\n",
1437 lwpid_of (thread));
1438 discard = 1;
1439 }
15c66dd6 1440 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
582511be
PA
1441 && !hardware_breakpoint_inserted_here (pc))
1442 {
1443 if (debug_threads)
1444 debug_printf ("previous HW breakpoint of %ld gone\n",
1445 lwpid_of (thread));
1446 discard = 1;
1447 }
3e572f71 1448#endif
582511be
PA
1449
1450 current_thread = saved_thread;
1451
1452 if (discard)
1453 {
1454 if (debug_threads)
1455 debug_printf ("discarding pending breakpoint status\n");
1456 lp->status_pending_p = 0;
1457 return 0;
1458 }
1459 }
1460
1461 return 1;
1462}
1463
6bf5e0ba 1464/* Return 1 if this lwp has an interesting status pending. */
611cb4a5 1465static int
d50171e4 1466status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
0d62e5e8 1467{
d86d4aaf 1468 struct thread_info *thread = (struct thread_info *) entry;
582511be 1469 struct lwp_info *lp = get_thread_lwp (thread);
95954743
PA
1470 ptid_t ptid = * (ptid_t *) arg;
1471
1472 /* Check if we're only interested in events from a specific process
afa8d396
PA
1473 or a specific LWP. */
1474 if (!ptid_match (ptid_of (thread), ptid))
95954743 1475 return 0;
0d62e5e8 1476
582511be
PA
1477 if (lp->status_pending_p
1478 && !thread_still_has_status_pending_p (thread))
1479 {
1480 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1481 return 0;
1482 }
0d62e5e8 1483
582511be 1484 return lp->status_pending_p;
0d62e5e8
DJ
1485}
1486
95954743
PA
1487static int
1488same_lwp (struct inferior_list_entry *entry, void *data)
1489{
1490 ptid_t ptid = *(ptid_t *) data;
1491 int lwp;
1492
1493 if (ptid_get_lwp (ptid) != 0)
1494 lwp = ptid_get_lwp (ptid);
1495 else
1496 lwp = ptid_get_pid (ptid);
1497
1498 if (ptid_get_lwp (entry->id) == lwp)
1499 return 1;
1500
1501 return 0;
1502}
1503
1504struct lwp_info *
1505find_lwp_pid (ptid_t ptid)
1506{
d86d4aaf
DE
1507 struct inferior_list_entry *thread
1508 = find_inferior (&all_threads, same_lwp, &ptid);
1509
1510 if (thread == NULL)
1511 return NULL;
1512
1513 return get_thread_lwp ((struct thread_info *) thread);
95954743
PA
1514}
1515
fa96cb38 1516/* Return the number of known LWPs in the tgid given by PID. */
0d62e5e8 1517
fa96cb38
PA
1518static int
1519num_lwps (int pid)
1520{
1521 struct inferior_list_entry *inf, *tmp;
1522 int count = 0;
0d62e5e8 1523
fa96cb38 1524 ALL_INFERIORS (&all_threads, inf, tmp)
24a09b5f 1525 {
fa96cb38
PA
1526 if (ptid_get_pid (inf->id) == pid)
1527 count++;
24a09b5f 1528 }
3aee8918 1529
fa96cb38
PA
1530 return count;
1531}
d61ddec4 1532
6d4ee8c6
GB
1533/* The arguments passed to iterate_over_lwps. */
1534
1535struct iterate_over_lwps_args
1536{
1537 /* The FILTER argument passed to iterate_over_lwps. */
1538 ptid_t filter;
1539
1540 /* The CALLBACK argument passed to iterate_over_lwps. */
1541 iterate_over_lwps_ftype *callback;
1542
1543 /* The DATA argument passed to iterate_over_lwps. */
1544 void *data;
1545};
1546
1547/* Callback for find_inferior used by iterate_over_lwps to filter
1548 calls to the callback supplied to that function. Returning a
1549 nonzero value causes find_inferiors to stop iterating and return
1550 the current inferior_list_entry. Returning zero indicates that
1551 find_inferiors should continue iterating. */
1552
1553static int
1554iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1555{
1556 struct iterate_over_lwps_args *args
1557 = (struct iterate_over_lwps_args *) args_p;
1558
1559 if (ptid_match (entry->id, args->filter))
1560 {
1561 struct thread_info *thr = (struct thread_info *) entry;
1562 struct lwp_info *lwp = get_thread_lwp (thr);
1563
1564 return (*args->callback) (lwp, args->data);
1565 }
1566
1567 return 0;
1568}
1569
1570/* See nat/linux-nat.h. */
1571
1572struct lwp_info *
1573iterate_over_lwps (ptid_t filter,
1574 iterate_over_lwps_ftype callback,
1575 void *data)
1576{
1577 struct iterate_over_lwps_args args = {filter, callback, data};
1578 struct inferior_list_entry *entry;
1579
1580 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1581 if (entry == NULL)
1582 return NULL;
1583
1584 return get_thread_lwp ((struct thread_info *) entry);
1585}
1586
fa96cb38
PA
1587/* Detect zombie thread group leaders, and "exit" them. We can't reap
1588 their exits until all other threads in the group have exited. */
c3adc08c 1589
fa96cb38
PA
1590static void
1591check_zombie_leaders (void)
1592{
1593 struct process_info *proc, *tmp;
c3adc08c 1594
fa96cb38 1595 ALL_PROCESSES (proc, tmp)
c3adc08c 1596 {
fa96cb38
PA
1597 pid_t leader_pid = pid_of (proc);
1598 struct lwp_info *leader_lp;
c3adc08c 1599
fa96cb38 1600 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
c3adc08c 1601
fa96cb38
PA
1602 if (debug_threads)
1603 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1604 "num_lwps=%d, zombie=%d\n",
1605 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1606 linux_proc_pid_is_zombie (leader_pid));
1607
1608 if (leader_lp != NULL
1609 /* Check if there are other threads in the group, as we may
1610 have raced with the inferior simply exiting. */
1611 && !last_thread_of_process_p (leader_pid)
1612 && linux_proc_pid_is_zombie (leader_pid))
1613 {
1614 /* A leader zombie can mean one of two things:
1615
1616 - It exited, and there's an exit status pending
1617 available, or only the leader exited (not the whole
1618 program). In the latter case, we can't waitpid the
1619 leader's exit status until all other threads are gone.
1620
1621 - There are 3 or more threads in the group, and a thread
1622 other than the leader exec'd. On an exec, the Linux
1623 kernel destroys all other threads (except the execing
1624 one) in the thread group, and resets the execing thread's
1625 tid to the tgid. No exit notification is sent for the
1626 execing thread -- from the ptracer's perspective, it
1627 appears as though the execing thread just vanishes.
1628 Until we reap all other threads except the leader and the
1629 execing thread, the leader will be zombie, and the
1630 execing thread will be in `D (disc sleep)'. As soon as
1631 all other threads are reaped, the execing thread changes
1632 it's tid to the tgid, and the previous (zombie) leader
1633 vanishes, giving place to the "new" leader. We could try
1634 distinguishing the exit and exec cases, by waiting once
1635 more, and seeing if something comes out, but it doesn't
1636 sound useful. The previous leader _does_ go away, and
1637 we'll re-add the new one once we see the exec event
1638 (which is just the same as what would happen if the
1639 previous leader did exit voluntarily before some other
1640 thread execs). */
c3adc08c 1641
fa96cb38
PA
1642 if (debug_threads)
1643 fprintf (stderr,
1644 "CZL: Thread group leader %d zombie "
1645 "(it exited, or another thread execd).\n",
1646 leader_pid);
c3adc08c 1647
fa96cb38 1648 delete_lwp (leader_lp);
c3adc08c
PA
1649 }
1650 }
fa96cb38 1651}
c3adc08c 1652
fa96cb38
PA
1653/* Callback for `find_inferior'. Returns the first LWP that is not
1654 stopped. ARG is a PTID filter. */
d50171e4 1655
fa96cb38
PA
1656static int
1657not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1658{
1659 struct thread_info *thr = (struct thread_info *) entry;
1660 struct lwp_info *lwp;
1661 ptid_t filter = *(ptid_t *) arg;
47c0c975 1662
fa96cb38
PA
1663 if (!ptid_match (ptid_of (thr), filter))
1664 return 0;
bd99dc85 1665
fa96cb38
PA
1666 lwp = get_thread_lwp (thr);
1667 if (!lwp->stopped)
1668 return 1;
1669
1670 return 0;
0d62e5e8 1671}
611cb4a5 1672
219f2f23
PA
1673/* This function should only be called if the LWP got a SIGTRAP.
1674
1675 Handle any tracepoint steps or hits. Return true if a tracepoint
1676 event was handled, 0 otherwise. */
1677
1678static int
1679handle_tracepoints (struct lwp_info *lwp)
1680{
1681 struct thread_info *tinfo = get_lwp_thread (lwp);
1682 int tpoint_related_event = 0;
1683
582511be
PA
1684 gdb_assert (lwp->suspended == 0);
1685
7984d532
PA
1686 /* If this tracepoint hit causes a tracing stop, we'll immediately
1687 uninsert tracepoints. To do this, we temporarily pause all
1688 threads, unpatch away, and then unpause threads. We need to make
1689 sure the unpausing doesn't resume LWP too. */
1690 lwp->suspended++;
1691
219f2f23
PA
1692 /* And we need to be sure that any all-threads-stopping doesn't try
1693 to move threads out of the jump pads, as it could deadlock the
1694 inferior (LWP could be in the jump pad, maybe even holding the
1695 lock.) */
1696
1697 /* Do any necessary step collect actions. */
1698 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1699
fa593d66
PA
1700 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1701
219f2f23
PA
1702 /* See if we just hit a tracepoint and do its main collect
1703 actions. */
1704 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1705
7984d532
PA
1706 lwp->suspended--;
1707
1708 gdb_assert (lwp->suspended == 0);
fa593d66 1709 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
7984d532 1710
219f2f23
PA
1711 if (tpoint_related_event)
1712 {
1713 if (debug_threads)
87ce2a04 1714 debug_printf ("got a tracepoint event\n");
219f2f23
PA
1715 return 1;
1716 }
1717
1718 return 0;
1719}
1720
fa593d66
PA
1721/* Convenience wrapper. Returns true if LWP is presently collecting a
1722 fast tracepoint. */
1723
1724static int
1725linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1726 struct fast_tpoint_collect_status *status)
1727{
1728 CORE_ADDR thread_area;
d86d4aaf 1729 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1730
1731 if (the_low_target.get_thread_area == NULL)
1732 return 0;
1733
1734 /* Get the thread area address. This is used to recognize which
1735 thread is which when tracing with the in-process agent library.
1736 We don't read anything from the address, and treat it as opaque;
1737 it's the address itself that we assume is unique per-thread. */
d86d4aaf 1738 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
fa593d66
PA
1739 return 0;
1740
1741 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1742}
1743
1744/* The reason we resume in the caller, is because we want to be able
1745 to pass lwp->status_pending as WSTAT, and we need to clear
1746 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1747 refuses to resume. */
1748
1749static int
1750maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1751{
0bfdf32f 1752 struct thread_info *saved_thread;
fa593d66 1753
0bfdf32f
GB
1754 saved_thread = current_thread;
1755 current_thread = get_lwp_thread (lwp);
fa593d66
PA
1756
1757 if ((wstat == NULL
1758 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1759 && supports_fast_tracepoints ()
58b4daa5 1760 && agent_loaded_p ())
fa593d66
PA
1761 {
1762 struct fast_tpoint_collect_status status;
1763 int r;
1764
1765 if (debug_threads)
87ce2a04
DE
1766 debug_printf ("Checking whether LWP %ld needs to move out of the "
1767 "jump pad.\n",
0bfdf32f 1768 lwpid_of (current_thread));
fa593d66
PA
1769
1770 r = linux_fast_tracepoint_collecting (lwp, &status);
1771
1772 if (wstat == NULL
1773 || (WSTOPSIG (*wstat) != SIGILL
1774 && WSTOPSIG (*wstat) != SIGFPE
1775 && WSTOPSIG (*wstat) != SIGSEGV
1776 && WSTOPSIG (*wstat) != SIGBUS))
1777 {
1778 lwp->collecting_fast_tracepoint = r;
1779
1780 if (r != 0)
1781 {
1782 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1783 {
1784 /* Haven't executed the original instruction yet.
1785 Set breakpoint there, and wait till it's hit,
1786 then single-step until exiting the jump pad. */
1787 lwp->exit_jump_pad_bkpt
1788 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1789 }
1790
1791 if (debug_threads)
87ce2a04
DE
1792 debug_printf ("Checking whether LWP %ld needs to move out of "
1793 "the jump pad...it does\n",
0bfdf32f
GB
1794 lwpid_of (current_thread));
1795 current_thread = saved_thread;
fa593d66
PA
1796
1797 return 1;
1798 }
1799 }
1800 else
1801 {
1802 /* If we get a synchronous signal while collecting, *and*
1803 while executing the (relocated) original instruction,
1804 reset the PC to point at the tpoint address, before
1805 reporting to GDB. Otherwise, it's an IPA lib bug: just
1806 report the signal to GDB, and pray for the best. */
1807
1808 lwp->collecting_fast_tracepoint = 0;
1809
1810 if (r != 0
1811 && (status.adjusted_insn_addr <= lwp->stop_pc
1812 && lwp->stop_pc < status.adjusted_insn_addr_end))
1813 {
1814 siginfo_t info;
1815 struct regcache *regcache;
1816
1817 /* The si_addr on a few signals references the address
1818 of the faulting instruction. Adjust that as
1819 well. */
1820 if ((WSTOPSIG (*wstat) == SIGILL
1821 || WSTOPSIG (*wstat) == SIGFPE
1822 || WSTOPSIG (*wstat) == SIGBUS
1823 || WSTOPSIG (*wstat) == SIGSEGV)
0bfdf32f 1824 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1825 (PTRACE_TYPE_ARG3) 0, &info) == 0
fa593d66
PA
1826 /* Final check just to make sure we don't clobber
1827 the siginfo of non-kernel-sent signals. */
1828 && (uintptr_t) info.si_addr == lwp->stop_pc)
1829 {
1830 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
0bfdf32f 1831 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
b8e1b30e 1832 (PTRACE_TYPE_ARG3) 0, &info);
fa593d66
PA
1833 }
1834
0bfdf32f 1835 regcache = get_thread_regcache (current_thread, 1);
fa593d66
PA
1836 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1837 lwp->stop_pc = status.tpoint_addr;
1838
1839 /* Cancel any fast tracepoint lock this thread was
1840 holding. */
1841 force_unlock_trace_buffer ();
1842 }
1843
1844 if (lwp->exit_jump_pad_bkpt != NULL)
1845 {
1846 if (debug_threads)
87ce2a04
DE
1847 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1848 "stopping all threads momentarily.\n");
fa593d66
PA
1849
1850 stop_all_lwps (1, lwp);
fa593d66
PA
1851
1852 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1853 lwp->exit_jump_pad_bkpt = NULL;
1854
1855 unstop_all_lwps (1, lwp);
1856
1857 gdb_assert (lwp->suspended >= 0);
1858 }
1859 }
1860 }
1861
1862 if (debug_threads)
87ce2a04
DE
1863 debug_printf ("Checking whether LWP %ld needs to move out of the "
1864 "jump pad...no\n",
0bfdf32f 1865 lwpid_of (current_thread));
0cccb683 1866
0bfdf32f 1867 current_thread = saved_thread;
fa593d66
PA
1868 return 0;
1869}
1870
1871/* Enqueue one signal in the "signals to report later when out of the
1872 jump pad" list. */
1873
1874static void
1875enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1876{
1877 struct pending_signals *p_sig;
d86d4aaf 1878 struct thread_info *thread = get_lwp_thread (lwp);
fa593d66
PA
1879
1880 if (debug_threads)
87ce2a04 1881 debug_printf ("Deferring signal %d for LWP %ld.\n",
d86d4aaf 1882 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1883
1884 if (debug_threads)
1885 {
1886 struct pending_signals *sig;
1887
1888 for (sig = lwp->pending_signals_to_report;
1889 sig != NULL;
1890 sig = sig->prev)
87ce2a04
DE
1891 debug_printf (" Already queued %d\n",
1892 sig->signal);
fa593d66 1893
87ce2a04 1894 debug_printf (" (no more currently queued signals)\n");
fa593d66
PA
1895 }
1896
1a981360
PA
1897 /* Don't enqueue non-RT signals if they are already in the deferred
1898 queue. (SIGSTOP being the easiest signal to see ending up here
1899 twice) */
1900 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1901 {
1902 struct pending_signals *sig;
1903
1904 for (sig = lwp->pending_signals_to_report;
1905 sig != NULL;
1906 sig = sig->prev)
1907 {
1908 if (sig->signal == WSTOPSIG (*wstat))
1909 {
1910 if (debug_threads)
87ce2a04
DE
1911 debug_printf ("Not requeuing already queued non-RT signal %d"
1912 " for LWP %ld\n",
1913 sig->signal,
d86d4aaf 1914 lwpid_of (thread));
1a981360
PA
1915 return;
1916 }
1917 }
1918 }
1919
fa593d66
PA
1920 p_sig = xmalloc (sizeof (*p_sig));
1921 p_sig->prev = lwp->pending_signals_to_report;
1922 p_sig->signal = WSTOPSIG (*wstat);
1923 memset (&p_sig->info, 0, sizeof (siginfo_t));
d86d4aaf 1924 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1925 &p_sig->info);
fa593d66
PA
1926
1927 lwp->pending_signals_to_report = p_sig;
1928}
1929
1930/* Dequeue one signal from the "signals to report later when out of
1931 the jump pad" list. */
1932
1933static int
1934dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1935{
d86d4aaf
DE
1936 struct thread_info *thread = get_lwp_thread (lwp);
1937
fa593d66
PA
1938 if (lwp->pending_signals_to_report != NULL)
1939 {
1940 struct pending_signals **p_sig;
1941
1942 p_sig = &lwp->pending_signals_to_report;
1943 while ((*p_sig)->prev != NULL)
1944 p_sig = &(*p_sig)->prev;
1945
1946 *wstat = W_STOPCODE ((*p_sig)->signal);
1947 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 1948 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 1949 &(*p_sig)->info);
fa593d66
PA
1950 free (*p_sig);
1951 *p_sig = NULL;
1952
1953 if (debug_threads)
87ce2a04 1954 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
d86d4aaf 1955 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
1956
1957 if (debug_threads)
1958 {
1959 struct pending_signals *sig;
1960
1961 for (sig = lwp->pending_signals_to_report;
1962 sig != NULL;
1963 sig = sig->prev)
87ce2a04
DE
1964 debug_printf (" Still queued %d\n",
1965 sig->signal);
fa593d66 1966
87ce2a04 1967 debug_printf (" (no more queued signals)\n");
fa593d66
PA
1968 }
1969
1970 return 1;
1971 }
1972
1973 return 0;
1974}
1975
582511be
PA
1976/* Fetch the possibly triggered data watchpoint info and store it in
1977 CHILD.
d50171e4 1978
582511be
PA
1979 On some archs, like x86, that use debug registers to set
1980 watchpoints, it's possible that the way to know which watched
1981 address trapped, is to check the register that is used to select
1982 which address to watch. Problem is, between setting the watchpoint
1983 and reading back which data address trapped, the user may change
1984 the set of watchpoints, and, as a consequence, GDB changes the
1985 debug registers in the inferior. To avoid reading back a stale
1986 stopped-data-address when that happens, we cache in LP the fact
1987 that a watchpoint trapped, and the corresponding data address, as
1988 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1989 registers meanwhile, we have the cached data we can rely on. */
d50171e4 1990
582511be
PA
1991static int
1992check_stopped_by_watchpoint (struct lwp_info *child)
1993{
1994 if (the_low_target.stopped_by_watchpoint != NULL)
d50171e4 1995 {
582511be 1996 struct thread_info *saved_thread;
d50171e4 1997
582511be
PA
1998 saved_thread = current_thread;
1999 current_thread = get_lwp_thread (child);
2000
2001 if (the_low_target.stopped_by_watchpoint ())
d50171e4 2002 {
15c66dd6 2003 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
582511be
PA
2004
2005 if (the_low_target.stopped_data_address != NULL)
2006 child->stopped_data_address
2007 = the_low_target.stopped_data_address ();
2008 else
2009 child->stopped_data_address = 0;
d50171e4
PA
2010 }
2011
0bfdf32f 2012 current_thread = saved_thread;
d50171e4
PA
2013 }
2014
15c66dd6 2015 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
c4d9ceb6
YQ
2016}
2017
de0d863e
DB
2018/* Return the ptrace options that we want to try to enable. */
2019
2020static int
2021linux_low_ptrace_options (int attached)
2022{
2023 int options = 0;
2024
2025 if (!attached)
2026 options |= PTRACE_O_EXITKILL;
2027
2028 if (report_fork_events)
2029 options |= PTRACE_O_TRACEFORK;
2030
c269dbdb
DB
2031 if (report_vfork_events)
2032 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2033
de0d863e
DB
2034 return options;
2035}
2036
fa96cb38
PA
2037/* Do low-level handling of the event, and check if we should go on
2038 and pass it to caller code. Return the affected lwp if we are, or
2039 NULL otherwise. */
2040
2041static struct lwp_info *
582511be 2042linux_low_filter_event (int lwpid, int wstat)
fa96cb38
PA
2043{
2044 struct lwp_info *child;
2045 struct thread_info *thread;
582511be 2046 int have_stop_pc = 0;
fa96cb38
PA
2047
2048 child = find_lwp_pid (pid_to_ptid (lwpid));
2049
2050 /* If we didn't find a process, one of two things presumably happened:
2051 - A process we started and then detached from has exited. Ignore it.
2052 - A process we are controlling has forked and the new child's stop
2053 was reported to us by the kernel. Save its PID. */
2054 if (child == NULL && WIFSTOPPED (wstat))
2055 {
2056 add_to_pid_list (&stopped_pids, lwpid, wstat);
2057 return NULL;
2058 }
2059 else if (child == NULL)
2060 return NULL;
2061
2062 thread = get_lwp_thread (child);
2063
2064 child->stopped = 1;
2065
2066 child->last_status = wstat;
2067
582511be
PA
2068 /* Check if the thread has exited. */
2069 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2070 {
2071 if (debug_threads)
2072 debug_printf ("LLFE: %d exited.\n", lwpid);
2073 if (num_lwps (pid_of (thread)) > 1)
2074 {
2075
2076 /* If there is at least one more LWP, then the exit signal was
2077 not the end of the debugged application and should be
2078 ignored. */
2079 delete_lwp (child);
2080 return NULL;
2081 }
2082 else
2083 {
2084 /* This was the last lwp in the process. Since events are
2085 serialized to GDB core, and we can't report this one
2086 right now, but GDB core and the other target layers will
2087 want to be notified about the exit code/signal, leave the
2088 status pending for the next time we're able to report
2089 it. */
2090 mark_lwp_dead (child, wstat);
2091 return child;
2092 }
2093 }
2094
2095 gdb_assert (WIFSTOPPED (wstat));
2096
fa96cb38
PA
2097 if (WIFSTOPPED (wstat))
2098 {
2099 struct process_info *proc;
2100
2101 /* Architecture-specific setup after inferior is running. This
2102 needs to happen after we have attached to the inferior and it
2103 is stopped for the first time, but before we access any
2104 inferior registers. */
2105 proc = find_process_pid (pid_of (thread));
fe978cb0 2106 if (proc->priv->new_inferior)
fa96cb38 2107 {
0bfdf32f 2108 struct thread_info *saved_thread;
fa96cb38 2109
0bfdf32f
GB
2110 saved_thread = current_thread;
2111 current_thread = thread;
fa96cb38
PA
2112
2113 the_low_target.arch_setup ();
2114
0bfdf32f 2115 current_thread = saved_thread;
fa96cb38 2116
fe978cb0 2117 proc->priv->new_inferior = 0;
fa96cb38
PA
2118 }
2119 }
2120
fa96cb38
PA
2121 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2122 {
beed38b8 2123 struct process_info *proc = find_process_pid (pid_of (thread));
de0d863e 2124 int options = linux_low_ptrace_options (proc->attached);
beed38b8 2125
de0d863e 2126 linux_enable_event_reporting (lwpid, options);
fa96cb38
PA
2127 child->must_set_ptrace_flags = 0;
2128 }
2129
582511be
PA
2130 /* Be careful to not overwrite stop_pc until
2131 check_stopped_by_breakpoint is called. */
fa96cb38 2132 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
89a5711c 2133 && linux_is_extended_waitstatus (wstat))
fa96cb38 2134 {
582511be 2135 child->stop_pc = get_pc (child);
de0d863e
DB
2136 if (handle_extended_wait (child, wstat))
2137 {
2138 /* The event has been handled, so just return without
2139 reporting it. */
2140 return NULL;
2141 }
fa96cb38
PA
2142 }
2143
3e572f71
PA
2144 /* Check first whether this was a SW/HW breakpoint before checking
2145 watchpoints, because at least s390 can't tell the data address of
2146 hardware watchpoint hits, and returns stopped-by-watchpoint as
2147 long as there's a watchpoint set. */
2148 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
582511be
PA
2149 {
2150 if (check_stopped_by_breakpoint (child))
2151 have_stop_pc = 1;
2152 }
2153
3e572f71
PA
2154 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2155 or hardware watchpoint. Check which is which if we got
2156 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2157 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2158 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2159 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2160 check_stopped_by_watchpoint (child);
2161
582511be
PA
2162 if (!have_stop_pc)
2163 child->stop_pc = get_pc (child);
2164
fa96cb38
PA
2165 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2166 && child->stop_expected)
2167 {
2168 if (debug_threads)
2169 debug_printf ("Expected stop.\n");
2170 child->stop_expected = 0;
2171
2172 if (thread->last_resume_kind == resume_stop)
2173 {
2174 /* We want to report the stop to the core. Treat the
2175 SIGSTOP as a normal event. */
2bf6fb9d
PA
2176 if (debug_threads)
2177 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2178 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2179 }
2180 else if (stopping_threads != NOT_STOPPING_THREADS)
2181 {
2182 /* Stopping threads. We don't want this SIGSTOP to end up
582511be 2183 pending. */
2bf6fb9d
PA
2184 if (debug_threads)
2185 debug_printf ("LLW: SIGSTOP caught for %s "
2186 "while stopping threads.\n",
2187 target_pid_to_str (ptid_of (thread)));
fa96cb38
PA
2188 return NULL;
2189 }
2190 else
2191 {
2bf6fb9d
PA
2192 /* This is a delayed SIGSTOP. Filter out the event. */
2193 if (debug_threads)
2194 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2195 child->stepping ? "step" : "continue",
2196 target_pid_to_str (ptid_of (thread)));
2197
fa96cb38
PA
2198 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2199 return NULL;
2200 }
2201 }
2202
582511be
PA
2203 child->status_pending_p = 1;
2204 child->status_pending = wstat;
fa96cb38
PA
2205 return child;
2206}
2207
20ba1ce6
PA
2208/* Resume LWPs that are currently stopped without any pending status
2209 to report, but are resumed from the core's perspective. */
2210
2211static void
2212resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2213{
2214 struct thread_info *thread = (struct thread_info *) entry;
2215 struct lwp_info *lp = get_thread_lwp (thread);
2216
2217 if (lp->stopped
2218 && !lp->status_pending_p
2219 && thread->last_resume_kind != resume_stop
2220 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2221 {
2222 int step = thread->last_resume_kind == resume_step;
2223
2224 if (debug_threads)
2225 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2226 target_pid_to_str (ptid_of (thread)),
2227 paddress (lp->stop_pc),
2228 step);
2229
2230 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2231 }
2232}
2233
fa96cb38
PA
2234/* Wait for an event from child(ren) WAIT_PTID, and return any that
2235 match FILTER_PTID (leaving others pending). The PTIDs can be:
2236 minus_one_ptid, to specify any child; a pid PTID, specifying all
2237 lwps of a thread group; or a PTID representing a single lwp. Store
2238 the stop status through the status pointer WSTAT. OPTIONS is
2239 passed to the waitpid call. Return 0 if no event was found and
2240 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2241 was found. Return the PID of the stopped child otherwise. */
bd99dc85 2242
0d62e5e8 2243static int
fa96cb38
PA
2244linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2245 int *wstatp, int options)
0d62e5e8 2246{
d86d4aaf 2247 struct thread_info *event_thread;
d50171e4 2248 struct lwp_info *event_child, *requested_child;
fa96cb38 2249 sigset_t block_mask, prev_mask;
d50171e4 2250
fa96cb38 2251 retry:
d86d4aaf
DE
2252 /* N.B. event_thread points to the thread_info struct that contains
2253 event_child. Keep them in sync. */
2254 event_thread = NULL;
d50171e4
PA
2255 event_child = NULL;
2256 requested_child = NULL;
0d62e5e8 2257
95954743 2258 /* Check for a lwp with a pending status. */
bd99dc85 2259
fa96cb38 2260 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
0d62e5e8 2261 {
d86d4aaf 2262 event_thread = (struct thread_info *)
fa96cb38 2263 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
d86d4aaf
DE
2264 if (event_thread != NULL)
2265 event_child = get_thread_lwp (event_thread);
2266 if (debug_threads && event_thread)
2267 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
0d62e5e8 2268 }
fa96cb38 2269 else if (!ptid_equal (filter_ptid, null_ptid))
0d62e5e8 2270 {
fa96cb38 2271 requested_child = find_lwp_pid (filter_ptid);
d50171e4 2272
bde24c0a 2273 if (stopping_threads == NOT_STOPPING_THREADS
fa593d66
PA
2274 && requested_child->status_pending_p
2275 && requested_child->collecting_fast_tracepoint)
2276 {
2277 enqueue_one_deferred_signal (requested_child,
2278 &requested_child->status_pending);
2279 requested_child->status_pending_p = 0;
2280 requested_child->status_pending = 0;
2281 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2282 }
2283
2284 if (requested_child->suspended
2285 && requested_child->status_pending_p)
38e08fca
GB
2286 {
2287 internal_error (__FILE__, __LINE__,
2288 "requesting an event out of a"
2289 " suspended child?");
2290 }
fa593d66 2291
d50171e4 2292 if (requested_child->status_pending_p)
d86d4aaf
DE
2293 {
2294 event_child = requested_child;
2295 event_thread = get_lwp_thread (event_child);
2296 }
0d62e5e8 2297 }
611cb4a5 2298
0d62e5e8
DJ
2299 if (event_child != NULL)
2300 {
bd99dc85 2301 if (debug_threads)
87ce2a04 2302 debug_printf ("Got an event from pending child %ld (%04x)\n",
d86d4aaf 2303 lwpid_of (event_thread), event_child->status_pending);
fa96cb38 2304 *wstatp = event_child->status_pending;
bd99dc85
PA
2305 event_child->status_pending_p = 0;
2306 event_child->status_pending = 0;
0bfdf32f 2307 current_thread = event_thread;
d86d4aaf 2308 return lwpid_of (event_thread);
0d62e5e8
DJ
2309 }
2310
fa96cb38
PA
2311 /* But if we don't find a pending event, we'll have to wait.
2312
2313 We only enter this loop if no process has a pending wait status.
2314 Thus any action taken in response to a wait status inside this
2315 loop is responding as soon as we detect the status, not after any
2316 pending events. */
d8301ad1 2317
fa96cb38
PA
2318 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2319 all signals while here. */
2320 sigfillset (&block_mask);
2321 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2322
582511be
PA
2323 /* Always pull all events out of the kernel. We'll randomly select
2324 an event LWP out of all that have events, to prevent
2325 starvation. */
fa96cb38 2326 while (event_child == NULL)
0d62e5e8 2327 {
fa96cb38 2328 pid_t ret = 0;
0d62e5e8 2329
fa96cb38
PA
2330 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2331 quirks:
0d62e5e8 2332
fa96cb38
PA
2333 - If the thread group leader exits while other threads in the
2334 thread group still exist, waitpid(TGID, ...) hangs. That
2335 waitpid won't return an exit status until the other threads
2336 in the group are reaped.
611cb4a5 2337
fa96cb38
PA
2338 - When a non-leader thread execs, that thread just vanishes
2339 without reporting an exit (so we'd hang if we waited for it
2340 explicitly in that case). The exec event is reported to
2341 the TGID pid (although we don't currently enable exec
2342 events). */
2343 errno = 0;
2344 ret = my_waitpid (-1, wstatp, options | WNOHANG);
d8301ad1 2345
fa96cb38
PA
2346 if (debug_threads)
2347 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2348 ret, errno ? strerror (errno) : "ERRNO-OK");
0d62e5e8 2349
fa96cb38 2350 if (ret > 0)
0d62e5e8 2351 {
89be2091 2352 if (debug_threads)
bd99dc85 2353 {
fa96cb38
PA
2354 debug_printf ("LLW: waitpid %ld received %s\n",
2355 (long) ret, status_to_str (*wstatp));
bd99dc85 2356 }
89be2091 2357
582511be
PA
2358 /* Filter all events. IOW, leave all events pending. We'll
2359 randomly select an event LWP out of all that have events
2360 below. */
2361 linux_low_filter_event (ret, *wstatp);
fa96cb38
PA
2362 /* Retry until nothing comes out of waitpid. A single
2363 SIGCHLD can indicate more than one child stopped. */
89be2091
DJ
2364 continue;
2365 }
2366
20ba1ce6
PA
2367 /* Now that we've pulled all events out of the kernel, resume
2368 LWPs that don't have an interesting event to report. */
2369 if (stopping_threads == NOT_STOPPING_THREADS)
2370 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2371
2372 /* ... and find an LWP with a status to report to the core, if
2373 any. */
582511be
PA
2374 event_thread = (struct thread_info *)
2375 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2376 if (event_thread != NULL)
2377 {
2378 event_child = get_thread_lwp (event_thread);
2379 *wstatp = event_child->status_pending;
2380 event_child->status_pending_p = 0;
2381 event_child->status_pending = 0;
2382 break;
2383 }
2384
fa96cb38
PA
2385 /* Check for zombie thread group leaders. Those can't be reaped
2386 until all other threads in the thread group are. */
2387 check_zombie_leaders ();
2388
2389 /* If there are no resumed children left in the set of LWPs we
2390 want to wait for, bail. We can't just block in
2391 waitpid/sigsuspend, because lwps might have been left stopped
2392 in trace-stop state, and we'd be stuck forever waiting for
2393 their status to change (which would only happen if we resumed
2394 them). Even if WNOHANG is set, this return code is preferred
2395 over 0 (below), as it is more detailed. */
2396 if ((find_inferior (&all_threads,
2397 not_stopped_callback,
2398 &wait_ptid) == NULL))
a6dbe5df 2399 {
fa96cb38
PA
2400 if (debug_threads)
2401 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2402 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2403 return -1;
a6dbe5df
PA
2404 }
2405
fa96cb38
PA
2406 /* No interesting event to report to the caller. */
2407 if ((options & WNOHANG))
24a09b5f 2408 {
fa96cb38
PA
2409 if (debug_threads)
2410 debug_printf ("WNOHANG set, no event found\n");
2411
2412 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2413 return 0;
24a09b5f
DJ
2414 }
2415
fa96cb38
PA
2416 /* Block until we get an event reported with SIGCHLD. */
2417 if (debug_threads)
2418 debug_printf ("sigsuspend'ing\n");
d50171e4 2419
fa96cb38
PA
2420 sigsuspend (&prev_mask);
2421 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2422 goto retry;
2423 }
d50171e4 2424
fa96cb38 2425 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
d50171e4 2426
0bfdf32f 2427 current_thread = event_thread;
d50171e4 2428
fa96cb38
PA
2429 /* Check for thread exit. */
2430 if (! WIFSTOPPED (*wstatp))
2431 {
2432 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2433
2434 if (debug_threads)
2435 debug_printf ("LWP %d is the last lwp of process. "
2436 "Process %ld exiting.\n",
2437 pid_of (event_thread), lwpid_of (event_thread));
d86d4aaf 2438 return lwpid_of (event_thread);
611cb4a5 2439 }
0d62e5e8 2440
fa96cb38
PA
2441 return lwpid_of (event_thread);
2442}
2443
2444/* Wait for an event from child(ren) PTID. PTIDs can be:
2445 minus_one_ptid, to specify any child; a pid PTID, specifying all
2446 lwps of a thread group; or a PTID representing a single lwp. Store
2447 the stop status through the status pointer WSTAT. OPTIONS is
2448 passed to the waitpid call. Return 0 if no event was found and
2449 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2450 was found. Return the PID of the stopped child otherwise. */
2451
2452static int
2453linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2454{
2455 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
611cb4a5
DJ
2456}
2457
6bf5e0ba
PA
2458/* Count the LWP's that have had events. */
2459
2460static int
2461count_events_callback (struct inferior_list_entry *entry, void *data)
2462{
d86d4aaf 2463 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2464 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2465 int *count = data;
2466
2467 gdb_assert (count != NULL);
2468
582511be 2469 /* Count only resumed LWPs that have an event pending. */
8336d594 2470 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2471 && lp->status_pending_p)
6bf5e0ba
PA
2472 (*count)++;
2473
2474 return 0;
2475}
2476
2477/* Select the LWP (if any) that is currently being single-stepped. */
2478
2479static int
2480select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2481{
d86d4aaf
DE
2482 struct thread_info *thread = (struct thread_info *) entry;
2483 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba 2484
8336d594
PA
2485 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2486 && thread->last_resume_kind == resume_step
6bf5e0ba
PA
2487 && lp->status_pending_p)
2488 return 1;
2489 else
2490 return 0;
2491}
2492
b90fc188 2493/* Select the Nth LWP that has had an event. */
6bf5e0ba
PA
2494
2495static int
2496select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2497{
d86d4aaf 2498 struct thread_info *thread = (struct thread_info *) entry;
8bf3b159 2499 struct lwp_info *lp = get_thread_lwp (thread);
6bf5e0ba
PA
2500 int *selector = data;
2501
2502 gdb_assert (selector != NULL);
2503
582511be 2504 /* Select only resumed LWPs that have an event pending. */
91baf43f 2505 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
8bf3b159 2506 && lp->status_pending_p)
6bf5e0ba
PA
2507 if ((*selector)-- == 0)
2508 return 1;
2509
2510 return 0;
2511}
2512
6bf5e0ba
PA
2513/* Select one LWP out of those that have events pending. */
2514
2515static void
2516select_event_lwp (struct lwp_info **orig_lp)
2517{
2518 int num_events = 0;
2519 int random_selector;
582511be
PA
2520 struct thread_info *event_thread = NULL;
2521
2522 /* In all-stop, give preference to the LWP that is being
2523 single-stepped. There will be at most one, and it's the LWP that
2524 the core is most interested in. If we didn't do this, then we'd
2525 have to handle pending step SIGTRAPs somehow in case the core
2526 later continues the previously-stepped thread, otherwise we'd
2527 report the pending SIGTRAP, and the core, not having stepped the
2528 thread, wouldn't understand what the trap was for, and therefore
2529 would report it to the user as a random signal. */
2530 if (!non_stop)
6bf5e0ba 2531 {
582511be
PA
2532 event_thread
2533 = (struct thread_info *) find_inferior (&all_threads,
2534 select_singlestep_lwp_callback,
2535 NULL);
2536 if (event_thread != NULL)
2537 {
2538 if (debug_threads)
2539 debug_printf ("SEL: Select single-step %s\n",
2540 target_pid_to_str (ptid_of (event_thread)));
2541 }
6bf5e0ba 2542 }
582511be 2543 if (event_thread == NULL)
6bf5e0ba
PA
2544 {
2545 /* No single-stepping LWP. Select one at random, out of those
b90fc188 2546 which have had events. */
6bf5e0ba 2547
b90fc188 2548 /* First see how many events we have. */
d86d4aaf 2549 find_inferior (&all_threads, count_events_callback, &num_events);
8bf3b159 2550 gdb_assert (num_events > 0);
6bf5e0ba 2551
b90fc188
PA
2552 /* Now randomly pick a LWP out of those that have had
2553 events. */
6bf5e0ba
PA
2554 random_selector = (int)
2555 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2556
2557 if (debug_threads && num_events > 1)
87ce2a04
DE
2558 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2559 num_events, random_selector);
6bf5e0ba 2560
d86d4aaf
DE
2561 event_thread
2562 = (struct thread_info *) find_inferior (&all_threads,
2563 select_event_lwp_callback,
2564 &random_selector);
6bf5e0ba
PA
2565 }
2566
d86d4aaf 2567 if (event_thread != NULL)
6bf5e0ba 2568 {
d86d4aaf
DE
2569 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2570
6bf5e0ba
PA
2571 /* Switch the event LWP. */
2572 *orig_lp = event_lp;
2573 }
2574}
2575
7984d532
PA
2576/* Decrement the suspend count of an LWP. */
2577
2578static int
2579unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2580{
d86d4aaf
DE
2581 struct thread_info *thread = (struct thread_info *) entry;
2582 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
2583
2584 /* Ignore EXCEPT. */
2585 if (lwp == except)
2586 return 0;
2587
2588 lwp->suspended--;
2589
2590 gdb_assert (lwp->suspended >= 0);
2591 return 0;
2592}
2593
2594/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2595 NULL. */
2596
2597static void
2598unsuspend_all_lwps (struct lwp_info *except)
2599{
d86d4aaf 2600 find_inferior (&all_threads, unsuspend_one_lwp, except);
7984d532
PA
2601}
2602
fa593d66
PA
2603static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2604static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2605 void *data);
2606static int lwp_running (struct inferior_list_entry *entry, void *data);
2607static ptid_t linux_wait_1 (ptid_t ptid,
2608 struct target_waitstatus *ourstatus,
2609 int target_options);
2610
2611/* Stabilize threads (move out of jump pads).
2612
2613 If a thread is midway collecting a fast tracepoint, we need to
2614 finish the collection and move it out of the jump pad before
2615 reporting the signal.
2616
2617 This avoids recursion while collecting (when a signal arrives
2618 midway, and the signal handler itself collects), which would trash
2619 the trace buffer. In case the user set a breakpoint in a signal
2620 handler, this avoids the backtrace showing the jump pad, etc..
2621 Most importantly, there are certain things we can't do safely if
2622 threads are stopped in a jump pad (or in its callee's). For
2623 example:
2624
2625 - starting a new trace run. A thread still collecting the
2626 previous run, could trash the trace buffer when resumed. The trace
2627 buffer control structures would have been reset but the thread had
2628 no way to tell. The thread could even midway memcpy'ing to the
2629 buffer, which would mean that when resumed, it would clobber the
2630 trace buffer that had been set for a new run.
2631
2632 - we can't rewrite/reuse the jump pads for new tracepoints
2633 safely. Say you do tstart while a thread is stopped midway while
2634 collecting. When the thread is later resumed, it finishes the
2635 collection, and returns to the jump pad, to execute the original
2636 instruction that was under the tracepoint jump at the time the
2637 older run had been started. If the jump pad had been rewritten
2638 since for something else in the new run, the thread would now
2639 execute the wrong / random instructions. */
2640
2641static void
2642linux_stabilize_threads (void)
2643{
0bfdf32f 2644 struct thread_info *saved_thread;
d86d4aaf 2645 struct thread_info *thread_stuck;
fa593d66 2646
d86d4aaf
DE
2647 thread_stuck
2648 = (struct thread_info *) find_inferior (&all_threads,
2649 stuck_in_jump_pad_callback,
2650 NULL);
2651 if (thread_stuck != NULL)
fa593d66 2652 {
b4d51a55 2653 if (debug_threads)
87ce2a04 2654 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
d86d4aaf 2655 lwpid_of (thread_stuck));
fa593d66
PA
2656 return;
2657 }
2658
0bfdf32f 2659 saved_thread = current_thread;
fa593d66
PA
2660
2661 stabilizing_threads = 1;
2662
2663 /* Kick 'em all. */
d86d4aaf 2664 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
fa593d66
PA
2665
2666 /* Loop until all are stopped out of the jump pads. */
d86d4aaf 2667 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
fa593d66
PA
2668 {
2669 struct target_waitstatus ourstatus;
2670 struct lwp_info *lwp;
fa593d66
PA
2671 int wstat;
2672
2673 /* Note that we go through the full wait even loop. While
2674 moving threads out of jump pad, we need to be able to step
2675 over internal breakpoints and such. */
32fcada3 2676 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
fa593d66
PA
2677
2678 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2679 {
0bfdf32f 2680 lwp = get_thread_lwp (current_thread);
fa593d66
PA
2681
2682 /* Lock it. */
2683 lwp->suspended++;
2684
a493e3e2 2685 if (ourstatus.value.sig != GDB_SIGNAL_0
0bfdf32f 2686 || current_thread->last_resume_kind == resume_stop)
fa593d66 2687 {
2ea28649 2688 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
fa593d66
PA
2689 enqueue_one_deferred_signal (lwp, &wstat);
2690 }
2691 }
2692 }
2693
d86d4aaf 2694 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
fa593d66
PA
2695
2696 stabilizing_threads = 0;
2697
0bfdf32f 2698 current_thread = saved_thread;
fa593d66 2699
b4d51a55 2700 if (debug_threads)
fa593d66 2701 {
d86d4aaf
DE
2702 thread_stuck
2703 = (struct thread_info *) find_inferior (&all_threads,
2704 stuck_in_jump_pad_callback,
2705 NULL);
2706 if (thread_stuck != NULL)
87ce2a04 2707 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
d86d4aaf 2708 lwpid_of (thread_stuck));
fa593d66
PA
2709 }
2710}
2711
582511be
PA
2712static void async_file_mark (void);
2713
2714/* Convenience function that is called when the kernel reports an
2715 event that is not passed out to GDB. */
2716
2717static ptid_t
2718ignore_event (struct target_waitstatus *ourstatus)
2719{
2720 /* If we got an event, there may still be others, as a single
2721 SIGCHLD can indicate more than one child stopped. This forces
2722 another target_wait call. */
2723 async_file_mark ();
2724
2725 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2726 return null_ptid;
2727}
2728
de0d863e
DB
2729/* Return non-zero if WAITSTATUS reflects an extended linux
2730 event. Otherwise, return zero. */
2731
2732static int
2733extended_event_reported (const struct target_waitstatus *waitstatus)
2734{
2735 if (waitstatus == NULL)
2736 return 0;
2737
c269dbdb
DB
2738 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2739 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2740 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
de0d863e
DB
2741}
2742
0d62e5e8 2743/* Wait for process, returns status. */
da6d8c04 2744
95954743
PA
2745static ptid_t
2746linux_wait_1 (ptid_t ptid,
2747 struct target_waitstatus *ourstatus, int target_options)
da6d8c04 2748{
e5f1222d 2749 int w;
fc7238bb 2750 struct lwp_info *event_child;
bd99dc85 2751 int options;
bd99dc85 2752 int pid;
6bf5e0ba
PA
2753 int step_over_finished;
2754 int bp_explains_trap;
2755 int maybe_internal_trap;
2756 int report_to_gdb;
219f2f23 2757 int trace_event;
c2d6af84 2758 int in_step_range;
bd99dc85 2759
87ce2a04
DE
2760 if (debug_threads)
2761 {
2762 debug_enter ();
2763 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2764 }
2765
bd99dc85
PA
2766 /* Translate generic target options into linux options. */
2767 options = __WALL;
2768 if (target_options & TARGET_WNOHANG)
2769 options |= WNOHANG;
0d62e5e8 2770
fa593d66
PA
2771 bp_explains_trap = 0;
2772 trace_event = 0;
c2d6af84 2773 in_step_range = 0;
bd99dc85
PA
2774 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2775
6bf5e0ba
PA
2776 if (ptid_equal (step_over_bkpt, null_ptid))
2777 pid = linux_wait_for_event (ptid, &w, options);
2778 else
2779 {
2780 if (debug_threads)
87ce2a04
DE
2781 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2782 target_pid_to_str (step_over_bkpt));
6bf5e0ba
PA
2783 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2784 }
2785
fa96cb38 2786 if (pid == 0)
87ce2a04 2787 {
fa96cb38
PA
2788 gdb_assert (target_options & TARGET_WNOHANG);
2789
87ce2a04
DE
2790 if (debug_threads)
2791 {
fa96cb38
PA
2792 debug_printf ("linux_wait_1 ret = null_ptid, "
2793 "TARGET_WAITKIND_IGNORE\n");
87ce2a04
DE
2794 debug_exit ();
2795 }
fa96cb38
PA
2796
2797 ourstatus->kind = TARGET_WAITKIND_IGNORE;
87ce2a04
DE
2798 return null_ptid;
2799 }
fa96cb38
PA
2800 else if (pid == -1)
2801 {
2802 if (debug_threads)
2803 {
2804 debug_printf ("linux_wait_1 ret = null_ptid, "
2805 "TARGET_WAITKIND_NO_RESUMED\n");
2806 debug_exit ();
2807 }
bd99dc85 2808
fa96cb38
PA
2809 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2810 return null_ptid;
2811 }
0d62e5e8 2812
0bfdf32f 2813 event_child = get_thread_lwp (current_thread);
0d62e5e8 2814
fa96cb38
PA
2815 /* linux_wait_for_event only returns an exit status for the last
2816 child of a process. Report it. */
2817 if (WIFEXITED (w) || WIFSIGNALED (w))
da6d8c04 2818 {
fa96cb38 2819 if (WIFEXITED (w))
0d62e5e8 2820 {
fa96cb38
PA
2821 ourstatus->kind = TARGET_WAITKIND_EXITED;
2822 ourstatus->value.integer = WEXITSTATUS (w);
bd99dc85 2823
fa96cb38 2824 if (debug_threads)
bd99dc85 2825 {
fa96cb38
PA
2826 debug_printf ("linux_wait_1 ret = %s, exited with "
2827 "retcode %d\n",
0bfdf32f 2828 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2829 WEXITSTATUS (w));
2830 debug_exit ();
bd99dc85 2831 }
fa96cb38
PA
2832 }
2833 else
2834 {
2835 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2836 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
5b1c542e 2837
fa96cb38
PA
2838 if (debug_threads)
2839 {
2840 debug_printf ("linux_wait_1 ret = %s, terminated with "
2841 "signal %d\n",
0bfdf32f 2842 target_pid_to_str (ptid_of (current_thread)),
fa96cb38
PA
2843 WTERMSIG (w));
2844 debug_exit ();
2845 }
0d62e5e8 2846 }
fa96cb38 2847
0bfdf32f 2848 return ptid_of (current_thread);
da6d8c04
DJ
2849 }
2850
8090aef2
PA
2851 /* If step-over executes a breakpoint instruction, it means a
2852 gdb/gdbserver breakpoint had been planted on top of a permanent
2853 breakpoint. The PC has been adjusted by
2854 check_stopped_by_breakpoint to point at the breakpoint address.
2855 Advance the PC manually past the breakpoint, otherwise the
2856 program would keep trapping the permanent breakpoint forever. */
2857 if (!ptid_equal (step_over_bkpt, null_ptid)
15c66dd6 2858 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
8090aef2 2859 {
9beb7c4e 2860 unsigned int increment_pc = the_low_target.breakpoint_len;
8090aef2
PA
2861
2862 if (debug_threads)
2863 {
2864 debug_printf ("step-over for %s executed software breakpoint\n",
2865 target_pid_to_str (ptid_of (current_thread)));
2866 }
2867
2868 if (increment_pc != 0)
2869 {
2870 struct regcache *regcache
2871 = get_thread_regcache (current_thread, 1);
2872
2873 event_child->stop_pc += increment_pc;
2874 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2875
2876 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
15c66dd6 2877 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8090aef2
PA
2878 }
2879 }
2880
6bf5e0ba
PA
2881 /* If this event was not handled before, and is not a SIGTRAP, we
2882 report it. SIGILL and SIGSEGV are also treated as traps in case
2883 a breakpoint is inserted at the current PC. If this target does
2884 not support internal breakpoints at all, we also report the
2885 SIGTRAP without further processing; it's of no concern to us. */
2886 maybe_internal_trap
2887 = (supports_breakpoints ()
2888 && (WSTOPSIG (w) == SIGTRAP
2889 || ((WSTOPSIG (w) == SIGILL
2890 || WSTOPSIG (w) == SIGSEGV)
2891 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2892
2893 if (maybe_internal_trap)
2894 {
2895 /* Handle anything that requires bookkeeping before deciding to
2896 report the event or continue waiting. */
2897
2898 /* First check if we can explain the SIGTRAP with an internal
2899 breakpoint, or if we should possibly report the event to GDB.
2900 Do this before anything that may remove or insert a
2901 breakpoint. */
2902 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2903
2904 /* We have a SIGTRAP, possibly a step-over dance has just
2905 finished. If so, tweak the state machine accordingly,
2906 reinsert breakpoints and delete any reinsert (software
2907 single-step) breakpoints. */
2908 step_over_finished = finish_step_over (event_child);
2909
2910 /* Now invoke the callbacks of any internal breakpoints there. */
2911 check_breakpoints (event_child->stop_pc);
2912
219f2f23
PA
2913 /* Handle tracepoint data collecting. This may overflow the
2914 trace buffer, and cause a tracing stop, removing
2915 breakpoints. */
2916 trace_event = handle_tracepoints (event_child);
2917
6bf5e0ba
PA
2918 if (bp_explains_trap)
2919 {
2920 /* If we stepped or ran into an internal breakpoint, we've
2921 already handled it. So next time we resume (from this
2922 PC), we should step over it. */
2923 if (debug_threads)
87ce2a04 2924 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 2925
8b07ae33
PA
2926 if (breakpoint_here (event_child->stop_pc))
2927 event_child->need_step_over = 1;
6bf5e0ba
PA
2928 }
2929 }
2930 else
2931 {
2932 /* We have some other signal, possibly a step-over dance was in
2933 progress, and it should be cancelled too. */
2934 step_over_finished = finish_step_over (event_child);
fa593d66
PA
2935 }
2936
2937 /* We have all the data we need. Either report the event to GDB, or
2938 resume threads and keep waiting for more. */
2939
2940 /* If we're collecting a fast tracepoint, finish the collection and
2941 move out of the jump pad before delivering a signal. See
2942 linux_stabilize_threads. */
2943
2944 if (WIFSTOPPED (w)
2945 && WSTOPSIG (w) != SIGTRAP
2946 && supports_fast_tracepoints ()
58b4daa5 2947 && agent_loaded_p ())
fa593d66
PA
2948 {
2949 if (debug_threads)
87ce2a04
DE
2950 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2951 "to defer or adjust it.\n",
0bfdf32f 2952 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2953
2954 /* Allow debugging the jump pad itself. */
0bfdf32f 2955 if (current_thread->last_resume_kind != resume_step
fa593d66
PA
2956 && maybe_move_out_of_jump_pad (event_child, &w))
2957 {
2958 enqueue_one_deferred_signal (event_child, &w);
2959
2960 if (debug_threads)
87ce2a04 2961 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
0bfdf32f 2962 WSTOPSIG (w), lwpid_of (current_thread));
fa593d66
PA
2963
2964 linux_resume_one_lwp (event_child, 0, 0, NULL);
582511be
PA
2965
2966 return ignore_event (ourstatus);
fa593d66
PA
2967 }
2968 }
219f2f23 2969
fa593d66
PA
2970 if (event_child->collecting_fast_tracepoint)
2971 {
2972 if (debug_threads)
87ce2a04
DE
2973 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2974 "Check if we're already there.\n",
0bfdf32f 2975 lwpid_of (current_thread),
87ce2a04 2976 event_child->collecting_fast_tracepoint);
fa593d66
PA
2977
2978 trace_event = 1;
2979
2980 event_child->collecting_fast_tracepoint
2981 = linux_fast_tracepoint_collecting (event_child, NULL);
2982
2983 if (event_child->collecting_fast_tracepoint != 1)
2984 {
2985 /* No longer need this breakpoint. */
2986 if (event_child->exit_jump_pad_bkpt != NULL)
2987 {
2988 if (debug_threads)
87ce2a04
DE
2989 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2990 "stopping all threads momentarily.\n");
fa593d66
PA
2991
2992 /* Other running threads could hit this breakpoint.
2993 We don't handle moribund locations like GDB does,
2994 instead we always pause all threads when removing
2995 breakpoints, so that any step-over or
2996 decr_pc_after_break adjustment is always taken
2997 care of while the breakpoint is still
2998 inserted. */
2999 stop_all_lwps (1, event_child);
fa593d66
PA
3000
3001 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3002 event_child->exit_jump_pad_bkpt = NULL;
3003
3004 unstop_all_lwps (1, event_child);
3005
3006 gdb_assert (event_child->suspended >= 0);
3007 }
3008 }
3009
3010 if (event_child->collecting_fast_tracepoint == 0)
3011 {
3012 if (debug_threads)
87ce2a04
DE
3013 debug_printf ("fast tracepoint finished "
3014 "collecting successfully.\n");
fa593d66
PA
3015
3016 /* We may have a deferred signal to report. */
3017 if (dequeue_one_deferred_signal (event_child, &w))
3018 {
3019 if (debug_threads)
87ce2a04 3020 debug_printf ("dequeued one signal.\n");
fa593d66 3021 }
3c11dd79 3022 else
fa593d66 3023 {
3c11dd79 3024 if (debug_threads)
87ce2a04 3025 debug_printf ("no deferred signals.\n");
fa593d66
PA
3026
3027 if (stabilizing_threads)
3028 {
3029 ourstatus->kind = TARGET_WAITKIND_STOPPED;
a493e3e2 3030 ourstatus->value.sig = GDB_SIGNAL_0;
87ce2a04
DE
3031
3032 if (debug_threads)
3033 {
3034 debug_printf ("linux_wait_1 ret = %s, stopped "
3035 "while stabilizing threads\n",
0bfdf32f 3036 target_pid_to_str (ptid_of (current_thread)));
87ce2a04
DE
3037 debug_exit ();
3038 }
3039
0bfdf32f 3040 return ptid_of (current_thread);
fa593d66
PA
3041 }
3042 }
3043 }
6bf5e0ba
PA
3044 }
3045
e471f25b
PA
3046 /* Check whether GDB would be interested in this event. */
3047
3048 /* If GDB is not interested in this signal, don't stop other
3049 threads, and don't report it to GDB. Just resume the inferior
3050 right away. We do this for threading-related signals as well as
3051 any that GDB specifically requested we ignore. But never ignore
3052 SIGSTOP if we sent it ourselves, and do not ignore signals when
3053 stepping - they may require special handling to skip the signal
c9587f88
AT
3054 handler. Also never ignore signals that could be caused by a
3055 breakpoint. */
e471f25b
PA
3056 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3057 thread library? */
3058 if (WIFSTOPPED (w)
0bfdf32f 3059 && current_thread->last_resume_kind != resume_step
e471f25b 3060 && (
1a981360 3061#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
fe978cb0 3062 (current_process ()->priv->thread_db != NULL
e471f25b
PA
3063 && (WSTOPSIG (w) == __SIGRTMIN
3064 || WSTOPSIG (w) == __SIGRTMIN + 1))
3065 ||
3066#endif
2ea28649 3067 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
e471f25b 3068 && !(WSTOPSIG (w) == SIGSTOP
c9587f88
AT
3069 && current_thread->last_resume_kind == resume_stop)
3070 && !linux_wstatus_maybe_breakpoint (w))))
e471f25b
PA
3071 {
3072 siginfo_t info, *info_p;
3073
3074 if (debug_threads)
87ce2a04 3075 debug_printf ("Ignored signal %d for LWP %ld.\n",
0bfdf32f 3076 WSTOPSIG (w), lwpid_of (current_thread));
e471f25b 3077
0bfdf32f 3078 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
b8e1b30e 3079 (PTRACE_TYPE_ARG3) 0, &info) == 0)
e471f25b
PA
3080 info_p = &info;
3081 else
3082 info_p = NULL;
3083 linux_resume_one_lwp (event_child, event_child->stepping,
3084 WSTOPSIG (w), info_p);
582511be 3085 return ignore_event (ourstatus);
e471f25b
PA
3086 }
3087
c2d6af84
PA
3088 /* Note that all addresses are always "out of the step range" when
3089 there's no range to begin with. */
3090 in_step_range = lwp_in_step_range (event_child);
3091
3092 /* If GDB wanted this thread to single step, and the thread is out
3093 of the step range, we always want to report the SIGTRAP, and let
3094 GDB handle it. Watchpoints should always be reported. So should
3095 signals we can't explain. A SIGTRAP we can't explain could be a
3096 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3097 do, we're be able to handle GDB breakpoints on top of internal
3098 breakpoints, by handling the internal breakpoint and still
3099 reporting the event to GDB. If we don't, we're out of luck, GDB
3100 won't see the breakpoint hit. */
6bf5e0ba 3101 report_to_gdb = (!maybe_internal_trap
0bfdf32f 3102 || (current_thread->last_resume_kind == resume_step
c2d6af84 3103 && !in_step_range)
15c66dd6 3104 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
c2d6af84 3105 || (!step_over_finished && !in_step_range
493e2a69 3106 && !bp_explains_trap && !trace_event)
9f3a5c85 3107 || (gdb_breakpoint_here (event_child->stop_pc)
d3ce09f5 3108 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
de0d863e
DB
3109 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3110 || extended_event_reported (&event_child->waitstatus));
d3ce09f5
SS
3111
3112 run_breakpoint_commands (event_child->stop_pc);
6bf5e0ba
PA
3113
3114 /* We found no reason GDB would want us to stop. We either hit one
3115 of our own breakpoints, or finished an internal step GDB
3116 shouldn't know about. */
3117 if (!report_to_gdb)
3118 {
3119 if (debug_threads)
3120 {
3121 if (bp_explains_trap)
87ce2a04 3122 debug_printf ("Hit a gdbserver breakpoint.\n");
6bf5e0ba 3123 if (step_over_finished)
87ce2a04 3124 debug_printf ("Step-over finished.\n");
219f2f23 3125 if (trace_event)
87ce2a04 3126 debug_printf ("Tracepoint event.\n");
c2d6af84 3127 if (lwp_in_step_range (event_child))
87ce2a04
DE
3128 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3129 paddress (event_child->stop_pc),
3130 paddress (event_child->step_range_start),
3131 paddress (event_child->step_range_end));
de0d863e
DB
3132 if (extended_event_reported (&event_child->waitstatus))
3133 {
3134 char *str = target_waitstatus_to_string (ourstatus);
3135 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3136 lwpid_of (get_lwp_thread (event_child)), str);
3137 xfree (str);
3138 }
6bf5e0ba
PA
3139 }
3140
3141 /* We're not reporting this breakpoint to GDB, so apply the
3142 decr_pc_after_break adjustment to the inferior's regcache
3143 ourselves. */
3144
3145 if (the_low_target.set_pc != NULL)
3146 {
3147 struct regcache *regcache
0bfdf32f 3148 = get_thread_regcache (current_thread, 1);
6bf5e0ba
PA
3149 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3150 }
3151
7984d532
PA
3152 /* We may have finished stepping over a breakpoint. If so,
3153 we've stopped and suspended all LWPs momentarily except the
3154 stepping one. This is where we resume them all again. We're
3155 going to keep waiting, so use proceed, which handles stepping
3156 over the next breakpoint. */
6bf5e0ba 3157 if (debug_threads)
87ce2a04 3158 debug_printf ("proceeding all threads.\n");
7984d532
PA
3159
3160 if (step_over_finished)
3161 unsuspend_all_lwps (event_child);
3162
6bf5e0ba 3163 proceed_all_lwps ();
582511be 3164 return ignore_event (ourstatus);
6bf5e0ba
PA
3165 }
3166
3167 if (debug_threads)
3168 {
0bfdf32f 3169 if (current_thread->last_resume_kind == resume_step)
c2d6af84
PA
3170 {
3171 if (event_child->step_range_start == event_child->step_range_end)
87ce2a04 3172 debug_printf ("GDB wanted to single-step, reporting event.\n");
c2d6af84 3173 else if (!lwp_in_step_range (event_child))
87ce2a04 3174 debug_printf ("Out of step range, reporting event.\n");
c2d6af84 3175 }
15c66dd6 3176 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
87ce2a04 3177 debug_printf ("Stopped by watchpoint.\n");
582511be 3178 else if (gdb_breakpoint_here (event_child->stop_pc))
87ce2a04 3179 debug_printf ("Stopped by GDB breakpoint.\n");
6bf5e0ba 3180 if (debug_threads)
87ce2a04 3181 debug_printf ("Hit a non-gdbserver trap event.\n");
6bf5e0ba
PA
3182 }
3183
3184 /* Alright, we're going to report a stop. */
3185
582511be 3186 if (!stabilizing_threads)
6bf5e0ba
PA
3187 {
3188 /* In all-stop, stop all threads. */
582511be
PA
3189 if (!non_stop)
3190 stop_all_lwps (0, NULL);
6bf5e0ba
PA
3191
3192 /* If we're not waiting for a specific LWP, choose an event LWP
3193 from among those that have had events. Giving equal priority
3194 to all LWPs that have had events helps prevent
3195 starvation. */
3196 if (ptid_equal (ptid, minus_one_ptid))
3197 {
3198 event_child->status_pending_p = 1;
3199 event_child->status_pending = w;
3200
3201 select_event_lwp (&event_child);
3202
0bfdf32f
GB
3203 /* current_thread and event_child must stay in sync. */
3204 current_thread = get_lwp_thread (event_child);
ee1e2d4f 3205
6bf5e0ba
PA
3206 event_child->status_pending_p = 0;
3207 w = event_child->status_pending;
3208 }
3209
c03e6ccc 3210 if (step_over_finished)
582511be
PA
3211 {
3212 if (!non_stop)
3213 {
3214 /* If we were doing a step-over, all other threads but
3215 the stepping one had been paused in start_step_over,
3216 with their suspend counts incremented. We don't want
3217 to do a full unstop/unpause, because we're in
3218 all-stop mode (so we want threads stopped), but we
3219 still need to unsuspend the other threads, to
3220 decrement their `suspended' count back. */
3221 unsuspend_all_lwps (event_child);
3222 }
3223 else
3224 {
3225 /* If we just finished a step-over, then all threads had
3226 been momentarily paused. In all-stop, that's fine,
3227 we want threads stopped by now anyway. In non-stop,
3228 we need to re-resume threads that GDB wanted to be
3229 running. */
3230 unstop_all_lwps (1, event_child);
3231 }
3232 }
c03e6ccc 3233
fa593d66 3234 /* Stabilize threads (move out of jump pads). */
582511be
PA
3235 if (!non_stop)
3236 stabilize_threads ();
6bf5e0ba
PA
3237 }
3238 else
3239 {
3240 /* If we just finished a step-over, then all threads had been
3241 momentarily paused. In all-stop, that's fine, we want
3242 threads stopped by now anyway. In non-stop, we need to
3243 re-resume threads that GDB wanted to be running. */
3244 if (step_over_finished)
7984d532 3245 unstop_all_lwps (1, event_child);
6bf5e0ba
PA
3246 }
3247
de0d863e
DB
3248 if (extended_event_reported (&event_child->waitstatus))
3249 {
3250 /* If the reported event is a fork, vfork or exec, let GDB know. */
3251 ourstatus->kind = event_child->waitstatus.kind;
3252 ourstatus->value = event_child->waitstatus.value;
3253
3254 /* Clear the event lwp's waitstatus since we handled it already. */
3255 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3256 }
3257 else
3258 ourstatus->kind = TARGET_WAITKIND_STOPPED;
5b1c542e 3259
582511be 3260 /* Now that we've selected our final event LWP, un-adjust its PC if
3e572f71
PA
3261 it was a software breakpoint, and the client doesn't know we can
3262 adjust the breakpoint ourselves. */
3263 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3264 && !swbreak_feature)
582511be
PA
3265 {
3266 int decr_pc = the_low_target.decr_pc_after_break;
3267
3268 if (decr_pc != 0)
3269 {
3270 struct regcache *regcache
3271 = get_thread_regcache (current_thread, 1);
3272 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3273 }
3274 }
3275
0bfdf32f 3276 if (current_thread->last_resume_kind == resume_stop
8336d594 3277 && WSTOPSIG (w) == SIGSTOP)
bd99dc85
PA
3278 {
3279 /* A thread that has been requested to stop by GDB with vCont;t,
3280 and it stopped cleanly, so report as SIG0. The use of
3281 SIGSTOP is an implementation detail. */
a493e3e2 3282 ourstatus->value.sig = GDB_SIGNAL_0;
bd99dc85 3283 }
0bfdf32f 3284 else if (current_thread->last_resume_kind == resume_stop
8336d594 3285 && WSTOPSIG (w) != SIGSTOP)
bd99dc85
PA
3286 {
3287 /* A thread that has been requested to stop by GDB with vCont;t,
d50171e4 3288 but, it stopped for other reasons. */
2ea28649 3289 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85 3290 }
de0d863e 3291 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
bd99dc85 3292 {
2ea28649 3293 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
bd99dc85
PA
3294 }
3295
d50171e4
PA
3296 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3297
bd99dc85 3298 if (debug_threads)
87ce2a04
DE
3299 {
3300 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
0bfdf32f 3301 target_pid_to_str (ptid_of (current_thread)),
87ce2a04
DE
3302 ourstatus->kind, ourstatus->value.sig);
3303 debug_exit ();
3304 }
bd99dc85 3305
0bfdf32f 3306 return ptid_of (current_thread);
bd99dc85
PA
3307}
3308
3309/* Get rid of any pending event in the pipe. */
3310static void
3311async_file_flush (void)
3312{
3313 int ret;
3314 char buf;
3315
3316 do
3317 ret = read (linux_event_pipe[0], &buf, 1);
3318 while (ret >= 0 || (ret == -1 && errno == EINTR));
3319}
3320
3321/* Put something in the pipe, so the event loop wakes up. */
3322static void
3323async_file_mark (void)
3324{
3325 int ret;
3326
3327 async_file_flush ();
3328
3329 do
3330 ret = write (linux_event_pipe[1], "+", 1);
3331 while (ret == 0 || (ret == -1 && errno == EINTR));
3332
3333 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3334 be awakened anyway. */
3335}
3336
95954743
PA
3337static ptid_t
3338linux_wait (ptid_t ptid,
3339 struct target_waitstatus *ourstatus, int target_options)
bd99dc85 3340{
95954743 3341 ptid_t event_ptid;
bd99dc85 3342
bd99dc85
PA
3343 /* Flush the async file first. */
3344 if (target_is_async_p ())
3345 async_file_flush ();
3346
582511be
PA
3347 do
3348 {
3349 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3350 }
3351 while ((target_options & TARGET_WNOHANG) == 0
3352 && ptid_equal (event_ptid, null_ptid)
3353 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
bd99dc85
PA
3354
3355 /* If at least one stop was reported, there may be more. A single
3356 SIGCHLD can signal more than one child stop. */
3357 if (target_is_async_p ()
3358 && (target_options & TARGET_WNOHANG) != 0
95954743 3359 && !ptid_equal (event_ptid, null_ptid))
bd99dc85
PA
3360 async_file_mark ();
3361
3362 return event_ptid;
da6d8c04
DJ
3363}
3364
c5f62d5f 3365/* Send a signal to an LWP. */
fd500816
DJ
3366
3367static int
a1928bad 3368kill_lwp (unsigned long lwpid, int signo)
fd500816 3369{
c5f62d5f
DE
3370 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3371 fails, then we are not using nptl threads and we should be using kill. */
fd500816 3372
c5f62d5f
DE
3373#ifdef __NR_tkill
3374 {
3375 static int tkill_failed;
fd500816 3376
c5f62d5f
DE
3377 if (!tkill_failed)
3378 {
3379 int ret;
3380
3381 errno = 0;
3382 ret = syscall (__NR_tkill, lwpid, signo);
3383 if (errno != ENOSYS)
3384 return ret;
3385 tkill_failed = 1;
3386 }
3387 }
fd500816
DJ
3388#endif
3389
3390 return kill (lwpid, signo);
3391}
3392
964e4306
PA
3393void
3394linux_stop_lwp (struct lwp_info *lwp)
3395{
3396 send_sigstop (lwp);
3397}
3398
0d62e5e8 3399static void
02fc4de7 3400send_sigstop (struct lwp_info *lwp)
0d62e5e8 3401{
bd99dc85 3402 int pid;
0d62e5e8 3403
d86d4aaf 3404 pid = lwpid_of (get_lwp_thread (lwp));
bd99dc85 3405
0d62e5e8
DJ
3406 /* If we already have a pending stop signal for this process, don't
3407 send another. */
54a0b537 3408 if (lwp->stop_expected)
0d62e5e8 3409 {
ae13219e 3410 if (debug_threads)
87ce2a04 3411 debug_printf ("Have pending sigstop for lwp %d\n", pid);
ae13219e 3412
0d62e5e8
DJ
3413 return;
3414 }
3415
3416 if (debug_threads)
87ce2a04 3417 debug_printf ("Sending sigstop to lwp %d\n", pid);
0d62e5e8 3418
d50171e4 3419 lwp->stop_expected = 1;
bd99dc85 3420 kill_lwp (pid, SIGSTOP);
0d62e5e8
DJ
3421}
3422
7984d532
PA
3423static int
3424send_sigstop_callback (struct inferior_list_entry *entry, void *except)
02fc4de7 3425{
d86d4aaf
DE
3426 struct thread_info *thread = (struct thread_info *) entry;
3427 struct lwp_info *lwp = get_thread_lwp (thread);
02fc4de7 3428
7984d532
PA
3429 /* Ignore EXCEPT. */
3430 if (lwp == except)
3431 return 0;
3432
02fc4de7 3433 if (lwp->stopped)
7984d532 3434 return 0;
02fc4de7
PA
3435
3436 send_sigstop (lwp);
7984d532
PA
3437 return 0;
3438}
3439
3440/* Increment the suspend count of an LWP, and stop it, if not stopped
3441 yet. */
3442static int
3443suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3444 void *except)
3445{
d86d4aaf
DE
3446 struct thread_info *thread = (struct thread_info *) entry;
3447 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
3448
3449 /* Ignore EXCEPT. */
3450 if (lwp == except)
3451 return 0;
3452
3453 lwp->suspended++;
3454
3455 return send_sigstop_callback (entry, except);
02fc4de7
PA
3456}
3457
95954743
PA
3458static void
3459mark_lwp_dead (struct lwp_info *lwp, int wstat)
3460{
3461 /* It's dead, really. */
3462 lwp->dead = 1;
3463
3464 /* Store the exit status for later. */
3465 lwp->status_pending_p = 1;
3466 lwp->status_pending = wstat;
3467
95954743
PA
3468 /* Prevent trying to stop it. */
3469 lwp->stopped = 1;
3470
3471 /* No further stops are expected from a dead lwp. */
3472 lwp->stop_expected = 0;
3473}
3474
fa96cb38
PA
3475/* Wait for all children to stop for the SIGSTOPs we just queued. */
3476
0d62e5e8 3477static void
fa96cb38 3478wait_for_sigstop (void)
0d62e5e8 3479{
0bfdf32f 3480 struct thread_info *saved_thread;
95954743 3481 ptid_t saved_tid;
fa96cb38
PA
3482 int wstat;
3483 int ret;
0d62e5e8 3484
0bfdf32f
GB
3485 saved_thread = current_thread;
3486 if (saved_thread != NULL)
3487 saved_tid = saved_thread->entry.id;
bd99dc85 3488 else
95954743 3489 saved_tid = null_ptid; /* avoid bogus unused warning */
bd99dc85 3490
d50171e4 3491 if (debug_threads)
fa96cb38 3492 debug_printf ("wait_for_sigstop: pulling events\n");
d50171e4 3493
fa96cb38
PA
3494 /* Passing NULL_PTID as filter indicates we want all events to be
3495 left pending. Eventually this returns when there are no
3496 unwaited-for children left. */
3497 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3498 &wstat, __WALL);
3499 gdb_assert (ret == -1);
0d62e5e8 3500
0bfdf32f
GB
3501 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3502 current_thread = saved_thread;
0d62e5e8
DJ
3503 else
3504 {
3505 if (debug_threads)
87ce2a04 3506 debug_printf ("Previously current thread died.\n");
0d62e5e8 3507
bd99dc85
PA
3508 if (non_stop)
3509 {
3510 /* We can't change the current inferior behind GDB's back,
3511 otherwise, a subsequent command may apply to the wrong
3512 process. */
0bfdf32f 3513 current_thread = NULL;
bd99dc85
PA
3514 }
3515 else
3516 {
3517 /* Set a valid thread as current. */
0bfdf32f 3518 set_desired_thread (0);
bd99dc85 3519 }
0d62e5e8
DJ
3520 }
3521}
3522
fa593d66
PA
3523/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3524 move it out, because we need to report the stop event to GDB. For
3525 example, if the user puts a breakpoint in the jump pad, it's
3526 because she wants to debug it. */
3527
3528static int
3529stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3530{
d86d4aaf
DE
3531 struct thread_info *thread = (struct thread_info *) entry;
3532 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3533
3534 gdb_assert (lwp->suspended == 0);
3535 gdb_assert (lwp->stopped);
3536
3537 /* Allow debugging the jump pad, gdb_collect, etc.. */
3538 return (supports_fast_tracepoints ()
58b4daa5 3539 && agent_loaded_p ()
fa593d66 3540 && (gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3541 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3542 || thread->last_resume_kind == resume_step)
3543 && linux_fast_tracepoint_collecting (lwp, NULL));
3544}
3545
3546static void
3547move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3548{
d86d4aaf
DE
3549 struct thread_info *thread = (struct thread_info *) entry;
3550 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3551 int *wstat;
3552
3553 gdb_assert (lwp->suspended == 0);
3554 gdb_assert (lwp->stopped);
3555
3556 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3557
3558 /* Allow debugging the jump pad, gdb_collect, etc. */
3559 if (!gdb_breakpoint_here (lwp->stop_pc)
15c66dd6 3560 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
fa593d66
PA
3561 && thread->last_resume_kind != resume_step
3562 && maybe_move_out_of_jump_pad (lwp, wstat))
3563 {
3564 if (debug_threads)
87ce2a04 3565 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
d86d4aaf 3566 lwpid_of (thread));
fa593d66
PA
3567
3568 if (wstat)
3569 {
3570 lwp->status_pending_p = 0;
3571 enqueue_one_deferred_signal (lwp, wstat);
3572
3573 if (debug_threads)
87ce2a04
DE
3574 debug_printf ("Signal %d for LWP %ld deferred "
3575 "(in jump pad)\n",
d86d4aaf 3576 WSTOPSIG (*wstat), lwpid_of (thread));
fa593d66
PA
3577 }
3578
3579 linux_resume_one_lwp (lwp, 0, 0, NULL);
3580 }
3581 else
3582 lwp->suspended++;
3583}
3584
3585static int
3586lwp_running (struct inferior_list_entry *entry, void *data)
3587{
d86d4aaf
DE
3588 struct thread_info *thread = (struct thread_info *) entry;
3589 struct lwp_info *lwp = get_thread_lwp (thread);
fa593d66
PA
3590
3591 if (lwp->dead)
3592 return 0;
3593 if (lwp->stopped)
3594 return 0;
3595 return 1;
3596}
3597
7984d532
PA
3598/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3599 If SUSPEND, then also increase the suspend count of every LWP,
3600 except EXCEPT. */
3601
0d62e5e8 3602static void
7984d532 3603stop_all_lwps (int suspend, struct lwp_info *except)
0d62e5e8 3604{
bde24c0a
PA
3605 /* Should not be called recursively. */
3606 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3607
87ce2a04
DE
3608 if (debug_threads)
3609 {
3610 debug_enter ();
3611 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3612 suspend ? "stop-and-suspend" : "stop",
3613 except != NULL
d86d4aaf 3614 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
87ce2a04
DE
3615 : "none");
3616 }
3617
bde24c0a
PA
3618 stopping_threads = (suspend
3619 ? STOPPING_AND_SUSPENDING_THREADS
3620 : STOPPING_THREADS);
7984d532
PA
3621
3622 if (suspend)
d86d4aaf 3623 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
7984d532 3624 else
d86d4aaf 3625 find_inferior (&all_threads, send_sigstop_callback, except);
fa96cb38 3626 wait_for_sigstop ();
bde24c0a 3627 stopping_threads = NOT_STOPPING_THREADS;
87ce2a04
DE
3628
3629 if (debug_threads)
3630 {
3631 debug_printf ("stop_all_lwps done, setting stopping_threads "
3632 "back to !stopping\n");
3633 debug_exit ();
3634 }
0d62e5e8
DJ
3635}
3636
23f238d3
PA
3637/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3638 SIGNAL is nonzero, give it that signal. */
da6d8c04 3639
ce3a066d 3640static void
23f238d3
PA
3641linux_resume_one_lwp_throw (struct lwp_info *lwp,
3642 int step, int signal, siginfo_t *info)
da6d8c04 3643{
d86d4aaf 3644 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 3645 struct thread_info *saved_thread;
fa593d66 3646 int fast_tp_collecting;
0d62e5e8 3647
54a0b537 3648 if (lwp->stopped == 0)
0d62e5e8
DJ
3649 return;
3650
fa593d66
PA
3651 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3652
3653 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3654
219f2f23
PA
3655 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3656 user used the "jump" command, or "set $pc = foo"). */
3657 if (lwp->stop_pc != get_pc (lwp))
3658 {
3659 /* Collecting 'while-stepping' actions doesn't make sense
3660 anymore. */
d86d4aaf 3661 release_while_stepping_state_list (thread);
219f2f23
PA
3662 }
3663
0d62e5e8
DJ
3664 /* If we have pending signals or status, and a new signal, enqueue the
3665 signal. Also enqueue the signal if we are waiting to reinsert a
3666 breakpoint; it will be picked up again below. */
3667 if (signal != 0
fa593d66
PA
3668 && (lwp->status_pending_p
3669 || lwp->pending_signals != NULL
3670 || lwp->bp_reinsert != 0
3671 || fast_tp_collecting))
0d62e5e8
DJ
3672 {
3673 struct pending_signals *p_sig;
bca929d3 3674 p_sig = xmalloc (sizeof (*p_sig));
54a0b537 3675 p_sig->prev = lwp->pending_signals;
0d62e5e8 3676 p_sig->signal = signal;
32ca6d61
DJ
3677 if (info == NULL)
3678 memset (&p_sig->info, 0, sizeof (siginfo_t));
3679 else
3680 memcpy (&p_sig->info, info, sizeof (siginfo_t));
54a0b537 3681 lwp->pending_signals = p_sig;
0d62e5e8
DJ
3682 }
3683
d50171e4
PA
3684 if (lwp->status_pending_p)
3685 {
3686 if (debug_threads)
87ce2a04
DE
3687 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3688 " has pending status\n",
d86d4aaf 3689 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3690 lwp->stop_expected ? "expected" : "not expected");
d50171e4
PA
3691 return;
3692 }
0d62e5e8 3693
0bfdf32f
GB
3694 saved_thread = current_thread;
3695 current_thread = thread;
0d62e5e8
DJ
3696
3697 if (debug_threads)
87ce2a04 3698 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
d86d4aaf 3699 lwpid_of (thread), step ? "step" : "continue", signal,
87ce2a04 3700 lwp->stop_expected ? "expected" : "not expected");
0d62e5e8
DJ
3701
3702 /* This bit needs some thinking about. If we get a signal that
3703 we must report while a single-step reinsert is still pending,
3704 we often end up resuming the thread. It might be better to
3705 (ew) allow a stack of pending events; then we could be sure that
3706 the reinsert happened right away and not lose any signals.
3707
3708 Making this stack would also shrink the window in which breakpoints are
54a0b537 3709 uninserted (see comment in linux_wait_for_lwp) but not enough for
0d62e5e8
DJ
3710 complete correctness, so it won't solve that problem. It may be
3711 worthwhile just to solve this one, however. */
54a0b537 3712 if (lwp->bp_reinsert != 0)
0d62e5e8
DJ
3713 {
3714 if (debug_threads)
87ce2a04
DE
3715 debug_printf (" pending reinsert at 0x%s\n",
3716 paddress (lwp->bp_reinsert));
d50171e4 3717
85e00e85 3718 if (can_hardware_single_step ())
d50171e4 3719 {
fa593d66
PA
3720 if (fast_tp_collecting == 0)
3721 {
3722 if (step == 0)
3723 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3724 if (lwp->suspended)
3725 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3726 lwp->suspended);
3727 }
d50171e4
PA
3728
3729 step = 1;
3730 }
0d62e5e8
DJ
3731
3732 /* Postpone any pending signal. It was enqueued above. */
3733 signal = 0;
3734 }
3735
fa593d66
PA
3736 if (fast_tp_collecting == 1)
3737 {
3738 if (debug_threads)
87ce2a04
DE
3739 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3740 " (exit-jump-pad-bkpt)\n",
d86d4aaf 3741 lwpid_of (thread));
fa593d66
PA
3742
3743 /* Postpone any pending signal. It was enqueued above. */
3744 signal = 0;
3745 }
3746 else if (fast_tp_collecting == 2)
3747 {
3748 if (debug_threads)
87ce2a04
DE
3749 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3750 " single-stepping\n",
d86d4aaf 3751 lwpid_of (thread));
fa593d66
PA
3752
3753 if (can_hardware_single_step ())
3754 step = 1;
3755 else
38e08fca
GB
3756 {
3757 internal_error (__FILE__, __LINE__,
3758 "moving out of jump pad single-stepping"
3759 " not implemented on this target");
3760 }
fa593d66
PA
3761
3762 /* Postpone any pending signal. It was enqueued above. */
3763 signal = 0;
3764 }
3765
219f2f23
PA
3766 /* If we have while-stepping actions in this thread set it stepping.
3767 If we have a signal to deliver, it may or may not be set to
3768 SIG_IGN, we don't know. Assume so, and allow collecting
3769 while-stepping into a signal handler. A possible smart thing to
3770 do would be to set an internal breakpoint at the signal return
3771 address, continue, and carry on catching this while-stepping
3772 action only when that breakpoint is hit. A future
3773 enhancement. */
d86d4aaf 3774 if (thread->while_stepping != NULL
219f2f23
PA
3775 && can_hardware_single_step ())
3776 {
3777 if (debug_threads)
87ce2a04 3778 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
d86d4aaf 3779 lwpid_of (thread));
219f2f23
PA
3780 step = 1;
3781 }
3782
582511be 3783 if (the_low_target.get_pc != NULL)
0d62e5e8 3784 {
0bfdf32f 3785 struct regcache *regcache = get_thread_regcache (current_thread, 1);
582511be
PA
3786
3787 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3788
3789 if (debug_threads)
3790 {
3791 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3792 (long) lwp->stop_pc);
3793 }
0d62e5e8
DJ
3794 }
3795
fa593d66
PA
3796 /* If we have pending signals, consume one unless we are trying to
3797 reinsert a breakpoint or we're trying to finish a fast tracepoint
3798 collect. */
3799 if (lwp->pending_signals != NULL
3800 && lwp->bp_reinsert == 0
3801 && fast_tp_collecting == 0)
0d62e5e8
DJ
3802 {
3803 struct pending_signals **p_sig;
3804
54a0b537 3805 p_sig = &lwp->pending_signals;
0d62e5e8
DJ
3806 while ((*p_sig)->prev != NULL)
3807 p_sig = &(*p_sig)->prev;
3808
3809 signal = (*p_sig)->signal;
32ca6d61 3810 if ((*p_sig)->info.si_signo != 0)
d86d4aaf 3811 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 3812 &(*p_sig)->info);
32ca6d61 3813
0d62e5e8
DJ
3814 free (*p_sig);
3815 *p_sig = NULL;
3816 }
3817
aa5ca48f
DE
3818 if (the_low_target.prepare_to_resume != NULL)
3819 the_low_target.prepare_to_resume (lwp);
3820
d86d4aaf 3821 regcache_invalidate_thread (thread);
da6d8c04 3822 errno = 0;
54a0b537 3823 lwp->stepping = step;
d86d4aaf 3824 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
b8e1b30e 3825 (PTRACE_TYPE_ARG3) 0,
14ce3065
DE
3826 /* Coerce to a uintptr_t first to avoid potential gcc warning
3827 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 3828 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
0d62e5e8 3829
0bfdf32f 3830 current_thread = saved_thread;
da6d8c04 3831 if (errno)
23f238d3
PA
3832 perror_with_name ("resuming thread");
3833
3834 /* Successfully resumed. Clear state that no longer makes sense,
3835 and mark the LWP as running. Must not do this before resuming
3836 otherwise if that fails other code will be confused. E.g., we'd
3837 later try to stop the LWP and hang forever waiting for a stop
3838 status. Note that we must not throw after this is cleared,
3839 otherwise handle_zombie_lwp_error would get confused. */
3840 lwp->stopped = 0;
3841 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3842}
3843
3844/* Called when we try to resume a stopped LWP and that errors out. If
3845 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3846 or about to become), discard the error, clear any pending status
3847 the LWP may have, and return true (we'll collect the exit status
3848 soon enough). Otherwise, return false. */
3849
3850static int
3851check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3852{
3853 struct thread_info *thread = get_lwp_thread (lp);
3854
3855 /* If we get an error after resuming the LWP successfully, we'd
3856 confuse !T state for the LWP being gone. */
3857 gdb_assert (lp->stopped);
3858
3859 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3860 because even if ptrace failed with ESRCH, the tracee may be "not
3861 yet fully dead", but already refusing ptrace requests. In that
3862 case the tracee has 'R (Running)' state for a little bit
3863 (observed in Linux 3.18). See also the note on ESRCH in the
3864 ptrace(2) man page. Instead, check whether the LWP has any state
3865 other than ptrace-stopped. */
3866
3867 /* Don't assume anything if /proc/PID/status can't be read. */
3868 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3221518c 3869 {
23f238d3
PA
3870 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3871 lp->status_pending_p = 0;
3872 return 1;
3873 }
3874 return 0;
3875}
3876
3877/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3878 disappears while we try to resume it. */
3221518c 3879
23f238d3
PA
3880static void
3881linux_resume_one_lwp (struct lwp_info *lwp,
3882 int step, int signal, siginfo_t *info)
3883{
3884 TRY
3885 {
3886 linux_resume_one_lwp_throw (lwp, step, signal, info);
3887 }
3888 CATCH (ex, RETURN_MASK_ERROR)
3889 {
3890 if (!check_ptrace_stopped_lwp_gone (lwp))
3891 throw_exception (ex);
3221518c 3892 }
23f238d3 3893 END_CATCH
da6d8c04
DJ
3894}
3895
2bd7c093
PA
3896struct thread_resume_array
3897{
3898 struct thread_resume *resume;
3899 size_t n;
3900};
64386c31 3901
ebcf782c
DE
3902/* This function is called once per thread via find_inferior.
3903 ARG is a pointer to a thread_resume_array struct.
3904 We look up the thread specified by ENTRY in ARG, and mark the thread
3905 with a pointer to the appropriate resume request.
5544ad89
DJ
3906
3907 This algorithm is O(threads * resume elements), but resume elements
3908 is small (and will remain small at least until GDB supports thread
3909 suspension). */
ebcf782c 3910
2bd7c093
PA
3911static int
3912linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
0d62e5e8 3913{
d86d4aaf
DE
3914 struct thread_info *thread = (struct thread_info *) entry;
3915 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3916 int ndx;
2bd7c093 3917 struct thread_resume_array *r;
64386c31 3918
2bd7c093 3919 r = arg;
64386c31 3920
2bd7c093 3921 for (ndx = 0; ndx < r->n; ndx++)
95954743
PA
3922 {
3923 ptid_t ptid = r->resume[ndx].thread;
3924 if (ptid_equal (ptid, minus_one_ptid)
3925 || ptid_equal (ptid, entry->id)
0c9070b3
YQ
3926 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3927 of PID'. */
d86d4aaf 3928 || (ptid_get_pid (ptid) == pid_of (thread)
0c9070b3
YQ
3929 && (ptid_is_pid (ptid)
3930 || ptid_get_lwp (ptid) == -1)))
95954743 3931 {
d50171e4 3932 if (r->resume[ndx].kind == resume_stop
8336d594 3933 && thread->last_resume_kind == resume_stop)
d50171e4
PA
3934 {
3935 if (debug_threads)
87ce2a04
DE
3936 debug_printf ("already %s LWP %ld at GDB's request\n",
3937 (thread->last_status.kind
3938 == TARGET_WAITKIND_STOPPED)
3939 ? "stopped"
3940 : "stopping",
d86d4aaf 3941 lwpid_of (thread));
d50171e4
PA
3942
3943 continue;
3944 }
3945
95954743 3946 lwp->resume = &r->resume[ndx];
8336d594 3947 thread->last_resume_kind = lwp->resume->kind;
fa593d66 3948
c2d6af84
PA
3949 lwp->step_range_start = lwp->resume->step_range_start;
3950 lwp->step_range_end = lwp->resume->step_range_end;
3951
fa593d66
PA
3952 /* If we had a deferred signal to report, dequeue one now.
3953 This can happen if LWP gets more than one signal while
3954 trying to get out of a jump pad. */
3955 if (lwp->stopped
3956 && !lwp->status_pending_p
3957 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3958 {
3959 lwp->status_pending_p = 1;
3960
3961 if (debug_threads)
87ce2a04
DE
3962 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3963 "leaving status pending.\n",
d86d4aaf
DE
3964 WSTOPSIG (lwp->status_pending),
3965 lwpid_of (thread));
fa593d66
PA
3966 }
3967
95954743
PA
3968 return 0;
3969 }
3970 }
2bd7c093
PA
3971
3972 /* No resume action for this thread. */
3973 lwp->resume = NULL;
64386c31 3974
2bd7c093 3975 return 0;
5544ad89
DJ
3976}
3977
20ad9378
DE
3978/* find_inferior callback for linux_resume.
3979 Set *FLAG_P if this lwp has an interesting status pending. */
5544ad89 3980
bd99dc85
PA
3981static int
3982resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
5544ad89 3983{
d86d4aaf
DE
3984 struct thread_info *thread = (struct thread_info *) entry;
3985 struct lwp_info *lwp = get_thread_lwp (thread);
5544ad89 3986
bd99dc85
PA
3987 /* LWPs which will not be resumed are not interesting, because
3988 we might not wait for them next time through linux_wait. */
2bd7c093 3989 if (lwp->resume == NULL)
bd99dc85 3990 return 0;
64386c31 3991
582511be 3992 if (thread_still_has_status_pending_p (thread))
d50171e4
PA
3993 * (int *) flag_p = 1;
3994
3995 return 0;
3996}
3997
3998/* Return 1 if this lwp that GDB wants running is stopped at an
3999 internal breakpoint that we need to step over. It assumes that any
4000 required STOP_PC adjustment has already been propagated to the
4001 inferior's regcache. */
4002
4003static int
4004need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4005{
d86d4aaf
DE
4006 struct thread_info *thread = (struct thread_info *) entry;
4007 struct lwp_info *lwp = get_thread_lwp (thread);
0bfdf32f 4008 struct thread_info *saved_thread;
d50171e4
PA
4009 CORE_ADDR pc;
4010
4011 /* LWPs which will not be resumed are not interesting, because we
4012 might not wait for them next time through linux_wait. */
4013
4014 if (!lwp->stopped)
4015 {
4016 if (debug_threads)
87ce2a04 4017 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
d86d4aaf 4018 lwpid_of (thread));
d50171e4
PA
4019 return 0;
4020 }
4021
8336d594 4022 if (thread->last_resume_kind == resume_stop)
d50171e4
PA
4023 {
4024 if (debug_threads)
87ce2a04
DE
4025 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4026 " stopped\n",
d86d4aaf 4027 lwpid_of (thread));
d50171e4
PA
4028 return 0;
4029 }
4030
7984d532
PA
4031 gdb_assert (lwp->suspended >= 0);
4032
4033 if (lwp->suspended)
4034 {
4035 if (debug_threads)
87ce2a04 4036 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
d86d4aaf 4037 lwpid_of (thread));
7984d532
PA
4038 return 0;
4039 }
4040
d50171e4
PA
4041 if (!lwp->need_step_over)
4042 {
4043 if (debug_threads)
d86d4aaf 4044 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
d50171e4 4045 }
5544ad89 4046
bd99dc85 4047 if (lwp->status_pending_p)
d50171e4
PA
4048 {
4049 if (debug_threads)
87ce2a04
DE
4050 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4051 " status.\n",
d86d4aaf 4052 lwpid_of (thread));
d50171e4
PA
4053 return 0;
4054 }
4055
4056 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4057 or we have. */
4058 pc = get_pc (lwp);
4059
4060 /* If the PC has changed since we stopped, then don't do anything,
4061 and let the breakpoint/tracepoint be hit. This happens if, for
4062 instance, GDB handled the decr_pc_after_break subtraction itself,
4063 GDB is OOL stepping this thread, or the user has issued a "jump"
4064 command, or poked thread's registers herself. */
4065 if (pc != lwp->stop_pc)
4066 {
4067 if (debug_threads)
87ce2a04
DE
4068 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4069 "Old stop_pc was 0x%s, PC is now 0x%s\n",
d86d4aaf
DE
4070 lwpid_of (thread),
4071 paddress (lwp->stop_pc), paddress (pc));
d50171e4
PA
4072
4073 lwp->need_step_over = 0;
4074 return 0;
4075 }
4076
0bfdf32f
GB
4077 saved_thread = current_thread;
4078 current_thread = thread;
d50171e4 4079
8b07ae33 4080 /* We can only step over breakpoints we know about. */
fa593d66 4081 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
d50171e4 4082 {
8b07ae33 4083 /* Don't step over a breakpoint that GDB expects to hit
9f3a5c85
LM
4084 though. If the condition is being evaluated on the target's side
4085 and it evaluate to false, step over this breakpoint as well. */
4086 if (gdb_breakpoint_here (pc)
d3ce09f5
SS
4087 && gdb_condition_true_at_breakpoint (pc)
4088 && gdb_no_commands_at_breakpoint (pc))
8b07ae33
PA
4089 {
4090 if (debug_threads)
87ce2a04
DE
4091 debug_printf ("Need step over [LWP %ld]? yes, but found"
4092 " GDB breakpoint at 0x%s; skipping step over\n",
d86d4aaf 4093 lwpid_of (thread), paddress (pc));
d50171e4 4094
0bfdf32f 4095 current_thread = saved_thread;
8b07ae33
PA
4096 return 0;
4097 }
4098 else
4099 {
4100 if (debug_threads)
87ce2a04
DE
4101 debug_printf ("Need step over [LWP %ld]? yes, "
4102 "found breakpoint at 0x%s\n",
d86d4aaf 4103 lwpid_of (thread), paddress (pc));
d50171e4 4104
8b07ae33
PA
4105 /* We've found an lwp that needs stepping over --- return 1 so
4106 that find_inferior stops looking. */
0bfdf32f 4107 current_thread = saved_thread;
8b07ae33
PA
4108
4109 /* If the step over is cancelled, this is set again. */
4110 lwp->need_step_over = 0;
4111 return 1;
4112 }
d50171e4
PA
4113 }
4114
0bfdf32f 4115 current_thread = saved_thread;
d50171e4
PA
4116
4117 if (debug_threads)
87ce2a04
DE
4118 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4119 " at 0x%s\n",
d86d4aaf 4120 lwpid_of (thread), paddress (pc));
c6ecbae5 4121
bd99dc85 4122 return 0;
5544ad89
DJ
4123}
4124
d50171e4
PA
4125/* Start a step-over operation on LWP. When LWP stopped at a
4126 breakpoint, to make progress, we need to remove the breakpoint out
4127 of the way. If we let other threads run while we do that, they may
4128 pass by the breakpoint location and miss hitting it. To avoid
4129 that, a step-over momentarily stops all threads while LWP is
4130 single-stepped while the breakpoint is temporarily uninserted from
4131 the inferior. When the single-step finishes, we reinsert the
4132 breakpoint, and let all threads that are supposed to be running,
4133 run again.
4134
4135 On targets that don't support hardware single-step, we don't
4136 currently support full software single-stepping. Instead, we only
4137 support stepping over the thread event breakpoint, by asking the
4138 low target where to place a reinsert breakpoint. Since this
4139 routine assumes the breakpoint being stepped over is a thread event
4140 breakpoint, it usually assumes the return address of the current
4141 function is a good enough place to set the reinsert breakpoint. */
4142
4143static int
4144start_step_over (struct lwp_info *lwp)
4145{
d86d4aaf 4146 struct thread_info *thread = get_lwp_thread (lwp);
0bfdf32f 4147 struct thread_info *saved_thread;
d50171e4
PA
4148 CORE_ADDR pc;
4149 int step;
4150
4151 if (debug_threads)
87ce2a04 4152 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
d86d4aaf 4153 lwpid_of (thread));
d50171e4 4154
7984d532
PA
4155 stop_all_lwps (1, lwp);
4156 gdb_assert (lwp->suspended == 0);
d50171e4
PA
4157
4158 if (debug_threads)
87ce2a04 4159 debug_printf ("Done stopping all threads for step-over.\n");
d50171e4
PA
4160
4161 /* Note, we should always reach here with an already adjusted PC,
4162 either by GDB (if we're resuming due to GDB's request), or by our
4163 caller, if we just finished handling an internal breakpoint GDB
4164 shouldn't care about. */
4165 pc = get_pc (lwp);
4166
0bfdf32f
GB
4167 saved_thread = current_thread;
4168 current_thread = thread;
d50171e4
PA
4169
4170 lwp->bp_reinsert = pc;
4171 uninsert_breakpoints_at (pc);
fa593d66 4172 uninsert_fast_tracepoint_jumps_at (pc);
d50171e4
PA
4173
4174 if (can_hardware_single_step ())
4175 {
4176 step = 1;
4177 }
4178 else
4179 {
4180 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4181 set_reinsert_breakpoint (raddr);
4182 step = 0;
4183 }
4184
0bfdf32f 4185 current_thread = saved_thread;
d50171e4
PA
4186
4187 linux_resume_one_lwp (lwp, step, 0, NULL);
4188
4189 /* Require next event from this LWP. */
d86d4aaf 4190 step_over_bkpt = thread->entry.id;
d50171e4
PA
4191 return 1;
4192}
4193
4194/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4195 start_step_over, if still there, and delete any reinsert
4196 breakpoints we've set, on non hardware single-step targets. */
4197
4198static int
4199finish_step_over (struct lwp_info *lwp)
4200{
4201 if (lwp->bp_reinsert != 0)
4202 {
4203 if (debug_threads)
87ce2a04 4204 debug_printf ("Finished step over.\n");
d50171e4
PA
4205
4206 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4207 may be no breakpoint to reinsert there by now. */
4208 reinsert_breakpoints_at (lwp->bp_reinsert);
fa593d66 4209 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
d50171e4
PA
4210
4211 lwp->bp_reinsert = 0;
4212
4213 /* Delete any software-single-step reinsert breakpoints. No
4214 longer needed. We don't have to worry about other threads
4215 hitting this trap, and later not being able to explain it,
4216 because we were stepping over a breakpoint, and we hold all
4217 threads but LWP stopped while doing that. */
4218 if (!can_hardware_single_step ())
4219 delete_reinsert_breakpoints ();
4220
4221 step_over_bkpt = null_ptid;
4222 return 1;
4223 }
4224 else
4225 return 0;
4226}
4227
5544ad89
DJ
4228/* This function is called once per thread. We check the thread's resume
4229 request, which will tell us whether to resume, step, or leave the thread
bd99dc85 4230 stopped; and what signal, if any, it should be sent.
5544ad89 4231
bd99dc85
PA
4232 For threads which we aren't explicitly told otherwise, we preserve
4233 the stepping flag; this is used for stepping over gdbserver-placed
4234 breakpoints.
4235
4236 If pending_flags was set in any thread, we queue any needed
4237 signals, since we won't actually resume. We already have a pending
4238 event to report, so we don't need to preserve any step requests;
4239 they should be re-issued if necessary. */
4240
4241static int
4242linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5544ad89 4243{
d86d4aaf
DE
4244 struct thread_info *thread = (struct thread_info *) entry;
4245 struct lwp_info *lwp = get_thread_lwp (thread);
bd99dc85 4246 int step;
d50171e4
PA
4247 int leave_all_stopped = * (int *) arg;
4248 int leave_pending;
5544ad89 4249
2bd7c093 4250 if (lwp->resume == NULL)
bd99dc85 4251 return 0;
5544ad89 4252
bd99dc85 4253 if (lwp->resume->kind == resume_stop)
5544ad89 4254 {
bd99dc85 4255 if (debug_threads)
d86d4aaf 4256 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
bd99dc85
PA
4257
4258 if (!lwp->stopped)
4259 {
4260 if (debug_threads)
d86d4aaf 4261 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
bd99dc85 4262
d50171e4
PA
4263 /* Stop the thread, and wait for the event asynchronously,
4264 through the event loop. */
02fc4de7 4265 send_sigstop (lwp);
bd99dc85
PA
4266 }
4267 else
4268 {
4269 if (debug_threads)
87ce2a04 4270 debug_printf ("already stopped LWP %ld\n",
d86d4aaf 4271 lwpid_of (thread));
d50171e4
PA
4272
4273 /* The LWP may have been stopped in an internal event that
4274 was not meant to be notified back to GDB (e.g., gdbserver
4275 breakpoint), so we should be reporting a stop event in
4276 this case too. */
4277
4278 /* If the thread already has a pending SIGSTOP, this is a
4279 no-op. Otherwise, something later will presumably resume
4280 the thread and this will cause it to cancel any pending
4281 operation, due to last_resume_kind == resume_stop. If
4282 the thread already has a pending status to report, we
4283 will still report it the next time we wait - see
4284 status_pending_p_callback. */
1a981360
PA
4285
4286 /* If we already have a pending signal to report, then
4287 there's no need to queue a SIGSTOP, as this means we're
4288 midway through moving the LWP out of the jumppad, and we
4289 will report the pending signal as soon as that is
4290 finished. */
4291 if (lwp->pending_signals_to_report == NULL)
4292 send_sigstop (lwp);
bd99dc85 4293 }
32ca6d61 4294
bd99dc85
PA
4295 /* For stop requests, we're done. */
4296 lwp->resume = NULL;
fc7238bb 4297 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4298 return 0;
5544ad89
DJ
4299 }
4300
bd99dc85
PA
4301 /* If this thread which is about to be resumed has a pending status,
4302 then don't resume any threads - we can just report the pending
4303 status. Make sure to queue any signals that would otherwise be
4304 sent. In all-stop mode, we do this decision based on if *any*
d50171e4
PA
4305 thread has a pending status. If there's a thread that needs the
4306 step-over-breakpoint dance, then don't resume any other thread
4307 but that particular one. */
4308 leave_pending = (lwp->status_pending_p || leave_all_stopped);
5544ad89 4309
d50171e4 4310 if (!leave_pending)
bd99dc85
PA
4311 {
4312 if (debug_threads)
d86d4aaf 4313 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5544ad89 4314
d50171e4 4315 step = (lwp->resume->kind == resume_step);
2acc282a 4316 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
bd99dc85
PA
4317 }
4318 else
4319 {
4320 if (debug_threads)
d86d4aaf 4321 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5544ad89 4322
bd99dc85
PA
4323 /* If we have a new signal, enqueue the signal. */
4324 if (lwp->resume->sig != 0)
4325 {
4326 struct pending_signals *p_sig;
4327 p_sig = xmalloc (sizeof (*p_sig));
4328 p_sig->prev = lwp->pending_signals;
4329 p_sig->signal = lwp->resume->sig;
4330 memset (&p_sig->info, 0, sizeof (siginfo_t));
4331
4332 /* If this is the same signal we were previously stopped by,
4333 make sure to queue its siginfo. We can ignore the return
4334 value of ptrace; if it fails, we'll skip
4335 PTRACE_SETSIGINFO. */
4336 if (WIFSTOPPED (lwp->last_status)
4337 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
d86d4aaf 4338 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
56f7af9c 4339 &p_sig->info);
bd99dc85
PA
4340
4341 lwp->pending_signals = p_sig;
4342 }
4343 }
5544ad89 4344
fc7238bb 4345 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
bd99dc85 4346 lwp->resume = NULL;
5544ad89 4347 return 0;
0d62e5e8
DJ
4348}
4349
4350static void
2bd7c093 4351linux_resume (struct thread_resume *resume_info, size_t n)
0d62e5e8 4352{
2bd7c093 4353 struct thread_resume_array array = { resume_info, n };
d86d4aaf 4354 struct thread_info *need_step_over = NULL;
d50171e4
PA
4355 int any_pending;
4356 int leave_all_stopped;
c6ecbae5 4357
87ce2a04
DE
4358 if (debug_threads)
4359 {
4360 debug_enter ();
4361 debug_printf ("linux_resume:\n");
4362 }
4363
2bd7c093 4364 find_inferior (&all_threads, linux_set_resume_request, &array);
5544ad89 4365
d50171e4
PA
4366 /* If there is a thread which would otherwise be resumed, which has
4367 a pending status, then don't resume any threads - we can just
4368 report the pending status. Make sure to queue any signals that
4369 would otherwise be sent. In non-stop mode, we'll apply this
4370 logic to each thread individually. We consume all pending events
4371 before considering to start a step-over (in all-stop). */
4372 any_pending = 0;
bd99dc85 4373 if (!non_stop)
d86d4aaf 4374 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
d50171e4
PA
4375
4376 /* If there is a thread which would otherwise be resumed, which is
4377 stopped at a breakpoint that needs stepping over, then don't
4378 resume any threads - have it step over the breakpoint with all
4379 other threads stopped, then resume all threads again. Make sure
4380 to queue any signals that would otherwise be delivered or
4381 queued. */
4382 if (!any_pending && supports_breakpoints ())
4383 need_step_over
d86d4aaf
DE
4384 = (struct thread_info *) find_inferior (&all_threads,
4385 need_step_over_p, NULL);
d50171e4
PA
4386
4387 leave_all_stopped = (need_step_over != NULL || any_pending);
4388
4389 if (debug_threads)
4390 {
4391 if (need_step_over != NULL)
87ce2a04 4392 debug_printf ("Not resuming all, need step over\n");
d50171e4 4393 else if (any_pending)
87ce2a04
DE
4394 debug_printf ("Not resuming, all-stop and found "
4395 "an LWP with pending status\n");
d50171e4 4396 else
87ce2a04 4397 debug_printf ("Resuming, no pending status or step over needed\n");
d50171e4
PA
4398 }
4399
4400 /* Even if we're leaving threads stopped, queue all signals we'd
4401 otherwise deliver. */
4402 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4403
4404 if (need_step_over)
d86d4aaf 4405 start_step_over (get_thread_lwp (need_step_over));
87ce2a04
DE
4406
4407 if (debug_threads)
4408 {
4409 debug_printf ("linux_resume done\n");
4410 debug_exit ();
4411 }
d50171e4
PA
4412}
4413
4414/* This function is called once per thread. We check the thread's
4415 last resume request, which will tell us whether to resume, step, or
4416 leave the thread stopped. Any signal the client requested to be
4417 delivered has already been enqueued at this point.
4418
4419 If any thread that GDB wants running is stopped at an internal
4420 breakpoint that needs stepping over, we start a step-over operation
4421 on that particular thread, and leave all others stopped. */
4422
7984d532
PA
4423static int
4424proceed_one_lwp (struct inferior_list_entry *entry, void *except)
d50171e4 4425{
d86d4aaf
DE
4426 struct thread_info *thread = (struct thread_info *) entry;
4427 struct lwp_info *lwp = get_thread_lwp (thread);
d50171e4
PA
4428 int step;
4429
7984d532
PA
4430 if (lwp == except)
4431 return 0;
d50171e4
PA
4432
4433 if (debug_threads)
d86d4aaf 4434 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
d50171e4
PA
4435
4436 if (!lwp->stopped)
4437 {
4438 if (debug_threads)
d86d4aaf 4439 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
7984d532 4440 return 0;
d50171e4
PA
4441 }
4442
02fc4de7
PA
4443 if (thread->last_resume_kind == resume_stop
4444 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
d50171e4
PA
4445 {
4446 if (debug_threads)
87ce2a04 4447 debug_printf (" client wants LWP to remain %ld stopped\n",
d86d4aaf 4448 lwpid_of (thread));
7984d532 4449 return 0;
d50171e4
PA
4450 }
4451
4452 if (lwp->status_pending_p)
4453 {
4454 if (debug_threads)
87ce2a04 4455 debug_printf (" LWP %ld has pending status, leaving stopped\n",
d86d4aaf 4456 lwpid_of (thread));
7984d532 4457 return 0;
d50171e4
PA
4458 }
4459
7984d532
PA
4460 gdb_assert (lwp->suspended >= 0);
4461
d50171e4
PA
4462 if (lwp->suspended)
4463 {
4464 if (debug_threads)
d86d4aaf 4465 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
7984d532 4466 return 0;
d50171e4
PA
4467 }
4468
1a981360
PA
4469 if (thread->last_resume_kind == resume_stop
4470 && lwp->pending_signals_to_report == NULL
4471 && lwp->collecting_fast_tracepoint == 0)
02fc4de7
PA
4472 {
4473 /* We haven't reported this LWP as stopped yet (otherwise, the
4474 last_status.kind check above would catch it, and we wouldn't
4475 reach here. This LWP may have been momentarily paused by a
4476 stop_all_lwps call while handling for example, another LWP's
4477 step-over. In that case, the pending expected SIGSTOP signal
4478 that was queued at vCont;t handling time will have already
4479 been consumed by wait_for_sigstop, and so we need to requeue
4480 another one here. Note that if the LWP already has a SIGSTOP
4481 pending, this is a no-op. */
4482
4483 if (debug_threads)
87ce2a04
DE
4484 debug_printf ("Client wants LWP %ld to stop. "
4485 "Making sure it has a SIGSTOP pending\n",
d86d4aaf 4486 lwpid_of (thread));
02fc4de7
PA
4487
4488 send_sigstop (lwp);
4489 }
4490
8336d594 4491 step = thread->last_resume_kind == resume_step;
d50171e4 4492 linux_resume_one_lwp (lwp, step, 0, NULL);
7984d532
PA
4493 return 0;
4494}
4495
4496static int
4497unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4498{
d86d4aaf
DE
4499 struct thread_info *thread = (struct thread_info *) entry;
4500 struct lwp_info *lwp = get_thread_lwp (thread);
7984d532
PA
4501
4502 if (lwp == except)
4503 return 0;
4504
4505 lwp->suspended--;
4506 gdb_assert (lwp->suspended >= 0);
4507
4508 return proceed_one_lwp (entry, except);
d50171e4
PA
4509}
4510
4511/* When we finish a step-over, set threads running again. If there's
4512 another thread that may need a step-over, now's the time to start
4513 it. Eventually, we'll move all threads past their breakpoints. */
4514
4515static void
4516proceed_all_lwps (void)
4517{
d86d4aaf 4518 struct thread_info *need_step_over;
d50171e4
PA
4519
4520 /* If there is a thread which would otherwise be resumed, which is
4521 stopped at a breakpoint that needs stepping over, then don't
4522 resume any threads - have it step over the breakpoint with all
4523 other threads stopped, then resume all threads again. */
4524
4525 if (supports_breakpoints ())
4526 {
4527 need_step_over
d86d4aaf
DE
4528 = (struct thread_info *) find_inferior (&all_threads,
4529 need_step_over_p, NULL);
d50171e4
PA
4530
4531 if (need_step_over != NULL)
4532 {
4533 if (debug_threads)
87ce2a04
DE
4534 debug_printf ("proceed_all_lwps: found "
4535 "thread %ld needing a step-over\n",
4536 lwpid_of (need_step_over));
d50171e4 4537
d86d4aaf 4538 start_step_over (get_thread_lwp (need_step_over));
d50171e4
PA
4539 return;
4540 }
4541 }
5544ad89 4542
d50171e4 4543 if (debug_threads)
87ce2a04 4544 debug_printf ("Proceeding, no step-over needed\n");
d50171e4 4545
d86d4aaf 4546 find_inferior (&all_threads, proceed_one_lwp, NULL);
d50171e4
PA
4547}
4548
4549/* Stopped LWPs that the client wanted to be running, that don't have
4550 pending statuses, are set to run again, except for EXCEPT, if not
4551 NULL. This undoes a stop_all_lwps call. */
4552
4553static void
7984d532 4554unstop_all_lwps (int unsuspend, struct lwp_info *except)
d50171e4 4555{
5544ad89
DJ
4556 if (debug_threads)
4557 {
87ce2a04 4558 debug_enter ();
d50171e4 4559 if (except)
87ce2a04 4560 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
d86d4aaf 4561 lwpid_of (get_lwp_thread (except)));
5544ad89 4562 else
87ce2a04 4563 debug_printf ("unstopping all lwps\n");
5544ad89
DJ
4564 }
4565
7984d532 4566 if (unsuspend)
d86d4aaf 4567 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
7984d532 4568 else
d86d4aaf 4569 find_inferior (&all_threads, proceed_one_lwp, except);
87ce2a04
DE
4570
4571 if (debug_threads)
4572 {
4573 debug_printf ("unstop_all_lwps done\n");
4574 debug_exit ();
4575 }
0d62e5e8
DJ
4576}
4577
58caa3dc
DJ
4578
4579#ifdef HAVE_LINUX_REGSETS
4580
1faeff08
MR
4581#define use_linux_regsets 1
4582
030031ee
PA
4583/* Returns true if REGSET has been disabled. */
4584
4585static int
4586regset_disabled (struct regsets_info *info, struct regset_info *regset)
4587{
4588 return (info->disabled_regsets != NULL
4589 && info->disabled_regsets[regset - info->regsets]);
4590}
4591
4592/* Disable REGSET. */
4593
4594static void
4595disable_regset (struct regsets_info *info, struct regset_info *regset)
4596{
4597 int dr_offset;
4598
4599 dr_offset = regset - info->regsets;
4600 if (info->disabled_regsets == NULL)
4601 info->disabled_regsets = xcalloc (1, info->num_regsets);
4602 info->disabled_regsets[dr_offset] = 1;
4603}
4604
58caa3dc 4605static int
3aee8918
PA
4606regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4607 struct regcache *regcache)
58caa3dc
DJ
4608{
4609 struct regset_info *regset;
e9d25b98 4610 int saw_general_regs = 0;
95954743 4611 int pid;
1570b33e 4612 struct iovec iov;
58caa3dc 4613
0bfdf32f 4614 pid = lwpid_of (current_thread);
28eef672 4615 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4616 {
1570b33e
L
4617 void *buf, *data;
4618 int nt_type, res;
58caa3dc 4619
030031ee 4620 if (regset->size == 0 || regset_disabled (regsets_info, regset))
28eef672 4621 continue;
58caa3dc 4622
bca929d3 4623 buf = xmalloc (regset->size);
1570b33e
L
4624
4625 nt_type = regset->nt_type;
4626 if (nt_type)
4627 {
4628 iov.iov_base = buf;
4629 iov.iov_len = regset->size;
4630 data = (void *) &iov;
4631 }
4632 else
4633 data = buf;
4634
dfb64f85 4635#ifndef __sparc__
f15f9948 4636 res = ptrace (regset->get_request, pid,
b8e1b30e 4637 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4638#else
1570b33e 4639 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4640#endif
58caa3dc
DJ
4641 if (res < 0)
4642 {
4643 if (errno == EIO)
4644 {
52fa2412 4645 /* If we get EIO on a regset, do not try it again for
3aee8918 4646 this process mode. */
030031ee 4647 disable_regset (regsets_info, regset);
58caa3dc 4648 }
e5a9158d
AA
4649 else if (errno == ENODATA)
4650 {
4651 /* ENODATA may be returned if the regset is currently
4652 not "active". This can happen in normal operation,
4653 so suppress the warning in this case. */
4654 }
58caa3dc
DJ
4655 else
4656 {
0d62e5e8 4657 char s[256];
95954743
PA
4658 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4659 pid);
0d62e5e8 4660 perror (s);
58caa3dc
DJ
4661 }
4662 }
098dbe61
AA
4663 else
4664 {
4665 if (regset->type == GENERAL_REGS)
4666 saw_general_regs = 1;
4667 regset->store_function (regcache, buf);
4668 }
fdeb2a12 4669 free (buf);
58caa3dc 4670 }
e9d25b98
DJ
4671 if (saw_general_regs)
4672 return 0;
4673 else
4674 return 1;
58caa3dc
DJ
4675}
4676
4677static int
3aee8918
PA
4678regsets_store_inferior_registers (struct regsets_info *regsets_info,
4679 struct regcache *regcache)
58caa3dc
DJ
4680{
4681 struct regset_info *regset;
e9d25b98 4682 int saw_general_regs = 0;
95954743 4683 int pid;
1570b33e 4684 struct iovec iov;
58caa3dc 4685
0bfdf32f 4686 pid = lwpid_of (current_thread);
28eef672 4687 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
58caa3dc 4688 {
1570b33e
L
4689 void *buf, *data;
4690 int nt_type, res;
58caa3dc 4691
feea5f36
AA
4692 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4693 || regset->fill_function == NULL)
28eef672 4694 continue;
58caa3dc 4695
bca929d3 4696 buf = xmalloc (regset->size);
545587ee
DJ
4697
4698 /* First fill the buffer with the current register set contents,
4699 in case there are any items in the kernel's regset that are
4700 not in gdbserver's regcache. */
1570b33e
L
4701
4702 nt_type = regset->nt_type;
4703 if (nt_type)
4704 {
4705 iov.iov_base = buf;
4706 iov.iov_len = regset->size;
4707 data = (void *) &iov;
4708 }
4709 else
4710 data = buf;
4711
dfb64f85 4712#ifndef __sparc__
f15f9948 4713 res = ptrace (regset->get_request, pid,
b8e1b30e 4714 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4715#else
689cc2ae 4716 res = ptrace (regset->get_request, pid, data, nt_type);
dfb64f85 4717#endif
545587ee
DJ
4718
4719 if (res == 0)
4720 {
4721 /* Then overlay our cached registers on that. */
442ea881 4722 regset->fill_function (regcache, buf);
545587ee
DJ
4723
4724 /* Only now do we write the register set. */
dfb64f85 4725#ifndef __sparc__
f15f9948 4726 res = ptrace (regset->set_request, pid,
b8e1b30e 4727 (PTRACE_TYPE_ARG3) (long) nt_type, data);
dfb64f85 4728#else
1570b33e 4729 res = ptrace (regset->set_request, pid, data, nt_type);
dfb64f85 4730#endif
545587ee
DJ
4731 }
4732
58caa3dc
DJ
4733 if (res < 0)
4734 {
4735 if (errno == EIO)
4736 {
52fa2412 4737 /* If we get EIO on a regset, do not try it again for
3aee8918 4738 this process mode. */
030031ee 4739 disable_regset (regsets_info, regset);
58caa3dc 4740 }
3221518c
UW
4741 else if (errno == ESRCH)
4742 {
1b3f6016
PA
4743 /* At this point, ESRCH should mean the process is
4744 already gone, in which case we simply ignore attempts
4745 to change its registers. See also the related
4746 comment in linux_resume_one_lwp. */
fdeb2a12 4747 free (buf);
3221518c
UW
4748 return 0;
4749 }
58caa3dc
DJ
4750 else
4751 {
ce3a066d 4752 perror ("Warning: ptrace(regsets_store_inferior_registers)");
58caa3dc
DJ
4753 }
4754 }
e9d25b98
DJ
4755 else if (regset->type == GENERAL_REGS)
4756 saw_general_regs = 1;
09ec9b38 4757 free (buf);
58caa3dc 4758 }
e9d25b98
DJ
4759 if (saw_general_regs)
4760 return 0;
4761 else
4762 return 1;
58caa3dc
DJ
4763}
4764
1faeff08 4765#else /* !HAVE_LINUX_REGSETS */
58caa3dc 4766
1faeff08 4767#define use_linux_regsets 0
3aee8918
PA
4768#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4769#define regsets_store_inferior_registers(regsets_info, regcache) 1
58caa3dc 4770
58caa3dc 4771#endif
1faeff08
MR
4772
4773/* Return 1 if register REGNO is supported by one of the regset ptrace
4774 calls or 0 if it has to be transferred individually. */
4775
4776static int
3aee8918 4777linux_register_in_regsets (const struct regs_info *regs_info, int regno)
1faeff08
MR
4778{
4779 unsigned char mask = 1 << (regno % 8);
4780 size_t index = regno / 8;
4781
4782 return (use_linux_regsets
3aee8918
PA
4783 && (regs_info->regset_bitmap == NULL
4784 || (regs_info->regset_bitmap[index] & mask) != 0));
1faeff08
MR
4785}
4786
58caa3dc 4787#ifdef HAVE_LINUX_USRREGS
1faeff08
MR
4788
4789int
3aee8918 4790register_addr (const struct usrregs_info *usrregs, int regnum)
1faeff08
MR
4791{
4792 int addr;
4793
3aee8918 4794 if (regnum < 0 || regnum >= usrregs->num_regs)
1faeff08
MR
4795 error ("Invalid register number %d.", regnum);
4796
3aee8918 4797 addr = usrregs->regmap[regnum];
1faeff08
MR
4798
4799 return addr;
4800}
4801
4802/* Fetch one register. */
4803static void
3aee8918
PA
4804fetch_register (const struct usrregs_info *usrregs,
4805 struct regcache *regcache, int regno)
1faeff08
MR
4806{
4807 CORE_ADDR regaddr;
4808 int i, size;
4809 char *buf;
4810 int pid;
4811
3aee8918 4812 if (regno >= usrregs->num_regs)
1faeff08
MR
4813 return;
4814 if ((*the_low_target.cannot_fetch_register) (regno))
4815 return;
4816
3aee8918 4817 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4818 if (regaddr == -1)
4819 return;
4820
3aee8918
PA
4821 size = ((register_size (regcache->tdesc, regno)
4822 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4823 & -sizeof (PTRACE_XFER_TYPE));
4824 buf = alloca (size);
4825
0bfdf32f 4826 pid = lwpid_of (current_thread);
1faeff08
MR
4827 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4828 {
4829 errno = 0;
4830 *(PTRACE_XFER_TYPE *) (buf + i) =
4831 ptrace (PTRACE_PEEKUSER, pid,
4832 /* Coerce to a uintptr_t first to avoid potential gcc warning
4833 of coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 4834 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
1faeff08
MR
4835 regaddr += sizeof (PTRACE_XFER_TYPE);
4836 if (errno != 0)
4837 error ("reading register %d: %s", regno, strerror (errno));
4838 }
4839
4840 if (the_low_target.supply_ptrace_register)
4841 the_low_target.supply_ptrace_register (regcache, regno, buf);
4842 else
4843 supply_register (regcache, regno, buf);
4844}
4845
4846/* Store one register. */
4847static void
3aee8918
PA
4848store_register (const struct usrregs_info *usrregs,
4849 struct regcache *regcache, int regno)
1faeff08
MR
4850{
4851 CORE_ADDR regaddr;
4852 int i, size;
4853 char *buf;
4854 int pid;
4855
3aee8918 4856 if (regno >= usrregs->num_regs)
1faeff08
MR
4857 return;
4858 if ((*the_low_target.cannot_store_register) (regno))
4859 return;
4860
3aee8918 4861 regaddr = register_addr (usrregs, regno);
1faeff08
MR
4862 if (regaddr == -1)
4863 return;
4864
3aee8918
PA
4865 size = ((register_size (regcache->tdesc, regno)
4866 + sizeof (PTRACE_XFER_TYPE) - 1)
1faeff08
MR
4867 & -sizeof (PTRACE_XFER_TYPE));
4868 buf = alloca (size);
4869 memset (buf, 0, size);
4870
4871 if (the_low_target.collect_ptrace_register)
4872 the_low_target.collect_ptrace_register (regcache, regno, buf);
4873 else
4874 collect_register (regcache, regno, buf);
4875
0bfdf32f 4876 pid = lwpid_of (current_thread);
1faeff08
MR
4877 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4878 {
4879 errno = 0;
4880 ptrace (PTRACE_POKEUSER, pid,
4881 /* Coerce to a uintptr_t first to avoid potential gcc warning
4882 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
4883 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4884 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
1faeff08
MR
4885 if (errno != 0)
4886 {
4887 /* At this point, ESRCH should mean the process is
4888 already gone, in which case we simply ignore attempts
4889 to change its registers. See also the related
4890 comment in linux_resume_one_lwp. */
4891 if (errno == ESRCH)
4892 return;
4893
4894 if ((*the_low_target.cannot_store_register) (regno) == 0)
4895 error ("writing register %d: %s", regno, strerror (errno));
4896 }
4897 regaddr += sizeof (PTRACE_XFER_TYPE);
4898 }
4899}
4900
4901/* Fetch all registers, or just one, from the child process.
4902 If REGNO is -1, do this for all registers, skipping any that are
4903 assumed to have been retrieved by regsets_fetch_inferior_registers,
4904 unless ALL is non-zero.
4905 Otherwise, REGNO specifies which register (so we can save time). */
4906static void
3aee8918
PA
4907usr_fetch_inferior_registers (const struct regs_info *regs_info,
4908 struct regcache *regcache, int regno, int all)
1faeff08 4909{
3aee8918
PA
4910 struct usrregs_info *usr = regs_info->usrregs;
4911
1faeff08
MR
4912 if (regno == -1)
4913 {
3aee8918
PA
4914 for (regno = 0; regno < usr->num_regs; regno++)
4915 if (all || !linux_register_in_regsets (regs_info, regno))
4916 fetch_register (usr, regcache, regno);
1faeff08
MR
4917 }
4918 else
3aee8918 4919 fetch_register (usr, regcache, regno);
1faeff08
MR
4920}
4921
4922/* Store our register values back into the inferior.
4923 If REGNO is -1, do this for all registers, skipping any that are
4924 assumed to have been saved by regsets_store_inferior_registers,
4925 unless ALL is non-zero.
4926 Otherwise, REGNO specifies which register (so we can save time). */
4927static void
3aee8918
PA
4928usr_store_inferior_registers (const struct regs_info *regs_info,
4929 struct regcache *regcache, int regno, int all)
1faeff08 4930{
3aee8918
PA
4931 struct usrregs_info *usr = regs_info->usrregs;
4932
1faeff08
MR
4933 if (regno == -1)
4934 {
3aee8918
PA
4935 for (regno = 0; regno < usr->num_regs; regno++)
4936 if (all || !linux_register_in_regsets (regs_info, regno))
4937 store_register (usr, regcache, regno);
1faeff08
MR
4938 }
4939 else
3aee8918 4940 store_register (usr, regcache, regno);
1faeff08
MR
4941}
4942
4943#else /* !HAVE_LINUX_USRREGS */
4944
3aee8918
PA
4945#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4946#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
1faeff08 4947
58caa3dc 4948#endif
1faeff08
MR
4949
4950
4951void
4952linux_fetch_registers (struct regcache *regcache, int regno)
4953{
4954 int use_regsets;
4955 int all = 0;
3aee8918 4956 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4957
4958 if (regno == -1)
4959 {
3aee8918
PA
4960 if (the_low_target.fetch_register != NULL
4961 && regs_info->usrregs != NULL)
4962 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
c14dfd32
PA
4963 (*the_low_target.fetch_register) (regcache, regno);
4964
3aee8918
PA
4965 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4966 if (regs_info->usrregs != NULL)
4967 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
1faeff08
MR
4968 }
4969 else
4970 {
c14dfd32
PA
4971 if (the_low_target.fetch_register != NULL
4972 && (*the_low_target.fetch_register) (regcache, regno))
4973 return;
4974
3aee8918 4975 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 4976 if (use_regsets)
3aee8918
PA
4977 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4978 regcache);
4979 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4980 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 4981 }
58caa3dc
DJ
4982}
4983
4984void
442ea881 4985linux_store_registers (struct regcache *regcache, int regno)
58caa3dc 4986{
1faeff08
MR
4987 int use_regsets;
4988 int all = 0;
3aee8918 4989 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
1faeff08
MR
4990
4991 if (regno == -1)
4992 {
3aee8918
PA
4993 all = regsets_store_inferior_registers (regs_info->regsets_info,
4994 regcache);
4995 if (regs_info->usrregs != NULL)
4996 usr_store_inferior_registers (regs_info, regcache, regno, all);
1faeff08
MR
4997 }
4998 else
4999 {
3aee8918 5000 use_regsets = linux_register_in_regsets (regs_info, regno);
1faeff08 5001 if (use_regsets)
3aee8918
PA
5002 all = regsets_store_inferior_registers (regs_info->regsets_info,
5003 regcache);
5004 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5005 usr_store_inferior_registers (regs_info, regcache, regno, 1);
1faeff08 5006 }
58caa3dc
DJ
5007}
5008
da6d8c04 5009
da6d8c04
DJ
5010/* Copy LEN bytes from inferior's memory starting at MEMADDR
5011 to debugger memory starting at MYADDR. */
5012
c3e735a6 5013static int
f450004a 5014linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
da6d8c04 5015{
0bfdf32f 5016 int pid = lwpid_of (current_thread);
4934b29e
MR
5017 register PTRACE_XFER_TYPE *buffer;
5018 register CORE_ADDR addr;
5019 register int count;
5020 char filename[64];
da6d8c04 5021 register int i;
4934b29e 5022 int ret;
fd462a61 5023 int fd;
fd462a61
DJ
5024
5025 /* Try using /proc. Don't bother for one word. */
5026 if (len >= 3 * sizeof (long))
5027 {
4934b29e
MR
5028 int bytes;
5029
fd462a61
DJ
5030 /* We could keep this file open and cache it - possibly one per
5031 thread. That requires some juggling, but is even faster. */
95954743 5032 sprintf (filename, "/proc/%d/mem", pid);
fd462a61
DJ
5033 fd = open (filename, O_RDONLY | O_LARGEFILE);
5034 if (fd == -1)
5035 goto no_proc;
5036
5037 /* If pread64 is available, use it. It's faster if the kernel
5038 supports it (only one syscall), and it's 64-bit safe even on
5039 32-bit platforms (for instance, SPARC debugging a SPARC64
5040 application). */
5041#ifdef HAVE_PREAD64
4934b29e 5042 bytes = pread64 (fd, myaddr, len, memaddr);
fd462a61 5043#else
4934b29e
MR
5044 bytes = -1;
5045 if (lseek (fd, memaddr, SEEK_SET) != -1)
5046 bytes = read (fd, myaddr, len);
fd462a61 5047#endif
fd462a61
DJ
5048
5049 close (fd);
4934b29e
MR
5050 if (bytes == len)
5051 return 0;
5052
5053 /* Some data was read, we'll try to get the rest with ptrace. */
5054 if (bytes > 0)
5055 {
5056 memaddr += bytes;
5057 myaddr += bytes;
5058 len -= bytes;
5059 }
fd462a61 5060 }
da6d8c04 5061
fd462a61 5062 no_proc:
4934b29e
MR
5063 /* Round starting address down to longword boundary. */
5064 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5065 /* Round ending address up; get number of longwords that makes. */
5066 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5067 / sizeof (PTRACE_XFER_TYPE));
5068 /* Allocate buffer of that many longwords. */
5069 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5070
da6d8c04 5071 /* Read all the longwords */
4934b29e 5072 errno = 0;
da6d8c04
DJ
5073 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5074 {
14ce3065
DE
5075 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5076 about coercing an 8 byte integer to a 4 byte pointer. */
5077 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5078 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5079 (PTRACE_TYPE_ARG4) 0);
c3e735a6 5080 if (errno)
4934b29e 5081 break;
da6d8c04 5082 }
4934b29e 5083 ret = errno;
da6d8c04
DJ
5084
5085 /* Copy appropriate bytes out of the buffer. */
8d409d16
MR
5086 if (i > 0)
5087 {
5088 i *= sizeof (PTRACE_XFER_TYPE);
5089 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5090 memcpy (myaddr,
5091 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5092 i < len ? i : len);
5093 }
c3e735a6 5094
4934b29e 5095 return ret;
da6d8c04
DJ
5096}
5097
93ae6fdc
PA
5098/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5099 memory at MEMADDR. On failure (cannot write to the inferior)
f0ae6fc3 5100 returns the value of errno. Always succeeds if LEN is zero. */
da6d8c04 5101
ce3a066d 5102static int
f450004a 5103linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
da6d8c04
DJ
5104{
5105 register int i;
5106 /* Round starting address down to longword boundary. */
5107 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5108 /* Round ending address up; get number of longwords that makes. */
5109 register int count
493e2a69
MS
5110 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5111 / sizeof (PTRACE_XFER_TYPE);
5112
da6d8c04 5113 /* Allocate buffer of that many longwords. */
493e2a69
MS
5114 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5115 alloca (count * sizeof (PTRACE_XFER_TYPE));
5116
0bfdf32f 5117 int pid = lwpid_of (current_thread);
da6d8c04 5118
f0ae6fc3
PA
5119 if (len == 0)
5120 {
5121 /* Zero length write always succeeds. */
5122 return 0;
5123 }
5124
0d62e5e8
DJ
5125 if (debug_threads)
5126 {
58d6951d
DJ
5127 /* Dump up to four bytes. */
5128 unsigned int val = * (unsigned int *) myaddr;
5129 if (len == 1)
5130 val = val & 0xff;
5131 else if (len == 2)
5132 val = val & 0xffff;
5133 else if (len == 3)
5134 val = val & 0xffffff;
de0d863e
DB
5135 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5136 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
0d62e5e8
DJ
5137 }
5138
da6d8c04
DJ
5139 /* Fill start and end extra bytes of buffer with existing memory data. */
5140
93ae6fdc 5141 errno = 0;
14ce3065
DE
5142 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5143 about coercing an 8 byte integer to a 4 byte pointer. */
5144 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
b8e1b30e
LM
5145 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5146 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5147 if (errno)
5148 return errno;
da6d8c04
DJ
5149
5150 if (count > 1)
5151 {
93ae6fdc 5152 errno = 0;
da6d8c04 5153 buffer[count - 1]
95954743 5154 = ptrace (PTRACE_PEEKTEXT, pid,
14ce3065
DE
5155 /* Coerce to a uintptr_t first to avoid potential gcc warning
5156 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e 5157 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
14ce3065 5158 * sizeof (PTRACE_XFER_TYPE)),
b8e1b30e 5159 (PTRACE_TYPE_ARG4) 0);
93ae6fdc
PA
5160 if (errno)
5161 return errno;
da6d8c04
DJ
5162 }
5163
93ae6fdc 5164 /* Copy data to be written over corresponding part of buffer. */
da6d8c04 5165
493e2a69
MS
5166 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5167 myaddr, len);
da6d8c04
DJ
5168
5169 /* Write the entire buffer. */
5170
5171 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5172 {
5173 errno = 0;
14ce3065
DE
5174 ptrace (PTRACE_POKETEXT, pid,
5175 /* Coerce to a uintptr_t first to avoid potential gcc warning
5176 about coercing an 8 byte integer to a 4 byte pointer. */
b8e1b30e
LM
5177 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5178 (PTRACE_TYPE_ARG4) buffer[i]);
da6d8c04
DJ
5179 if (errno)
5180 return errno;
5181 }
5182
5183 return 0;
5184}
2f2893d9
DJ
5185
5186static void
5187linux_look_up_symbols (void)
5188{
0d62e5e8 5189#ifdef USE_THREAD_DB
95954743
PA
5190 struct process_info *proc = current_process ();
5191
fe978cb0 5192 if (proc->priv->thread_db != NULL)
0d62e5e8
DJ
5193 return;
5194
96d7229d
LM
5195 /* If the kernel supports tracing clones, then we don't need to
5196 use the magic thread event breakpoint to learn about
5197 threads. */
5198 thread_db_init (!linux_supports_traceclone ());
0d62e5e8
DJ
5199#endif
5200}
5201
e5379b03 5202static void
ef57601b 5203linux_request_interrupt (void)
e5379b03 5204{
a1928bad 5205 extern unsigned long signal_pid;
e5379b03 5206
78708b7c
PA
5207 /* Send a SIGINT to the process group. This acts just like the user
5208 typed a ^C on the controlling terminal. */
5209 kill (-signal_pid, SIGINT);
e5379b03
DJ
5210}
5211
aa691b87
RM
5212/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5213 to debugger memory starting at MYADDR. */
5214
5215static int
f450004a 5216linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
aa691b87
RM
5217{
5218 char filename[PATH_MAX];
5219 int fd, n;
0bfdf32f 5220 int pid = lwpid_of (current_thread);
aa691b87 5221
6cebaf6e 5222 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
aa691b87
RM
5223
5224 fd = open (filename, O_RDONLY);
5225 if (fd < 0)
5226 return -1;
5227
5228 if (offset != (CORE_ADDR) 0
5229 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5230 n = -1;
5231 else
5232 n = read (fd, myaddr, len);
5233
5234 close (fd);
5235
5236 return n;
5237}
5238
d993e290
PA
5239/* These breakpoint and watchpoint related wrapper functions simply
5240 pass on the function call if the target has registered a
5241 corresponding function. */
e013ee27
OF
5242
5243static int
802e8e6d
PA
5244linux_supports_z_point_type (char z_type)
5245{
5246 return (the_low_target.supports_z_point_type != NULL
5247 && the_low_target.supports_z_point_type (z_type));
5248}
5249
5250static int
5251linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5252 int size, struct raw_breakpoint *bp)
e013ee27 5253{
c8f4bfdd
YQ
5254 if (type == raw_bkpt_type_sw)
5255 return insert_memory_breakpoint (bp);
5256 else if (the_low_target.insert_point != NULL)
802e8e6d 5257 return the_low_target.insert_point (type, addr, size, bp);
e013ee27
OF
5258 else
5259 /* Unsupported (see target.h). */
5260 return 1;
5261}
5262
5263static int
802e8e6d
PA
5264linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5265 int size, struct raw_breakpoint *bp)
e013ee27 5266{
c8f4bfdd
YQ
5267 if (type == raw_bkpt_type_sw)
5268 return remove_memory_breakpoint (bp);
5269 else if (the_low_target.remove_point != NULL)
802e8e6d 5270 return the_low_target.remove_point (type, addr, size, bp);
e013ee27
OF
5271 else
5272 /* Unsupported (see target.h). */
5273 return 1;
5274}
5275
3e572f71
PA
5276/* Implement the to_stopped_by_sw_breakpoint target_ops
5277 method. */
5278
5279static int
5280linux_stopped_by_sw_breakpoint (void)
5281{
5282 struct lwp_info *lwp = get_thread_lwp (current_thread);
5283
5284 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5285}
5286
5287/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5288 method. */
5289
5290static int
5291linux_supports_stopped_by_sw_breakpoint (void)
5292{
5293 return USE_SIGTRAP_SIGINFO;
5294}
5295
5296/* Implement the to_stopped_by_hw_breakpoint target_ops
5297 method. */
5298
5299static int
5300linux_stopped_by_hw_breakpoint (void)
5301{
5302 struct lwp_info *lwp = get_thread_lwp (current_thread);
5303
5304 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5305}
5306
5307/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5308 method. */
5309
5310static int
5311linux_supports_stopped_by_hw_breakpoint (void)
5312{
5313 return USE_SIGTRAP_SIGINFO;
5314}
5315
45614f15
YQ
5316/* Implement the supports_conditional_breakpoints target_ops
5317 method. */
5318
5319static int
5320linux_supports_conditional_breakpoints (void)
5321{
5322 /* GDBserver needs to step over the breakpoint if the condition is
5323 false. GDBserver software single step is too simple, so disable
5324 conditional breakpoints if the target doesn't have hardware single
5325 step. */
5326 return can_hardware_single_step ();
5327}
5328
e013ee27
OF
5329static int
5330linux_stopped_by_watchpoint (void)
5331{
0bfdf32f 5332 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c 5333
15c66dd6 5334 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
e013ee27
OF
5335}
5336
5337static CORE_ADDR
5338linux_stopped_data_address (void)
5339{
0bfdf32f 5340 struct lwp_info *lwp = get_thread_lwp (current_thread);
c3adc08c
PA
5341
5342 return lwp->stopped_data_address;
e013ee27
OF
5343}
5344
db0dfaa0
LM
5345#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5346 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5347 && defined(PT_TEXT_END_ADDR)
5348
5349/* This is only used for targets that define PT_TEXT_ADDR,
5350 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5351 the target has different ways of acquiring this information, like
5352 loadmaps. */
52fb6437
NS
5353
5354/* Under uClinux, programs are loaded at non-zero offsets, which we need
5355 to tell gdb about. */
5356
5357static int
5358linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5359{
52fb6437 5360 unsigned long text, text_end, data;
62828379 5361 int pid = lwpid_of (current_thread);
52fb6437
NS
5362
5363 errno = 0;
5364
b8e1b30e
LM
5365 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5366 (PTRACE_TYPE_ARG4) 0);
5367 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5368 (PTRACE_TYPE_ARG4) 0);
5369 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5370 (PTRACE_TYPE_ARG4) 0);
52fb6437
NS
5371
5372 if (errno == 0)
5373 {
5374 /* Both text and data offsets produced at compile-time (and so
1b3f6016
PA
5375 used by gdb) are relative to the beginning of the program,
5376 with the data segment immediately following the text segment.
5377 However, the actual runtime layout in memory may put the data
5378 somewhere else, so when we send gdb a data base-address, we
5379 use the real data base address and subtract the compile-time
5380 data base-address from it (which is just the length of the
5381 text segment). BSS immediately follows data in both
5382 cases. */
52fb6437
NS
5383 *text_p = text;
5384 *data_p = data - (text_end - text);
1b3f6016 5385
52fb6437
NS
5386 return 1;
5387 }
52fb6437
NS
5388 return 0;
5389}
5390#endif
5391
07e059b5
VP
5392static int
5393linux_qxfer_osdata (const char *annex,
1b3f6016
PA
5394 unsigned char *readbuf, unsigned const char *writebuf,
5395 CORE_ADDR offset, int len)
07e059b5 5396{
d26e3629 5397 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5398}
5399
d0722149
DE
5400/* Convert a native/host siginfo object, into/from the siginfo in the
5401 layout of the inferiors' architecture. */
5402
5403static void
a5362b9a 5404siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
d0722149
DE
5405{
5406 int done = 0;
5407
5408 if (the_low_target.siginfo_fixup != NULL)
5409 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5410
5411 /* If there was no callback, or the callback didn't do anything,
5412 then just do a straight memcpy. */
5413 if (!done)
5414 {
5415 if (direction == 1)
a5362b9a 5416 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
d0722149 5417 else
a5362b9a 5418 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
d0722149
DE
5419 }
5420}
5421
4aa995e1
PA
5422static int
5423linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5424 unsigned const char *writebuf, CORE_ADDR offset, int len)
5425{
d0722149 5426 int pid;
a5362b9a
TS
5427 siginfo_t siginfo;
5428 char inf_siginfo[sizeof (siginfo_t)];
4aa995e1 5429
0bfdf32f 5430 if (current_thread == NULL)
4aa995e1
PA
5431 return -1;
5432
0bfdf32f 5433 pid = lwpid_of (current_thread);
4aa995e1
PA
5434
5435 if (debug_threads)
87ce2a04
DE
5436 debug_printf ("%s siginfo for lwp %d.\n",
5437 readbuf != NULL ? "Reading" : "Writing",
5438 pid);
4aa995e1 5439
0adea5f7 5440 if (offset >= sizeof (siginfo))
4aa995e1
PA
5441 return -1;
5442
b8e1b30e 5443 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5444 return -1;
5445
d0722149
DE
5446 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5447 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5448 inferior with a 64-bit GDBSERVER should look the same as debugging it
5449 with a 32-bit GDBSERVER, we need to convert it. */
5450 siginfo_fixup (&siginfo, inf_siginfo, 0);
5451
4aa995e1
PA
5452 if (offset + len > sizeof (siginfo))
5453 len = sizeof (siginfo) - offset;
5454
5455 if (readbuf != NULL)
d0722149 5456 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
5457 else
5458 {
d0722149
DE
5459 memcpy (inf_siginfo + offset, writebuf, len);
5460
5461 /* Convert back to ptrace layout before flushing it out. */
5462 siginfo_fixup (&siginfo, inf_siginfo, 1);
5463
b8e1b30e 5464 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4aa995e1
PA
5465 return -1;
5466 }
5467
5468 return len;
5469}
5470
bd99dc85
PA
5471/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5472 so we notice when children change state; as the handler for the
5473 sigsuspend in my_waitpid. */
5474
5475static void
5476sigchld_handler (int signo)
5477{
5478 int old_errno = errno;
5479
5480 if (debug_threads)
e581f2b4
PA
5481 {
5482 do
5483 {
5484 /* fprintf is not async-signal-safe, so call write
5485 directly. */
5486 if (write (2, "sigchld_handler\n",
5487 sizeof ("sigchld_handler\n") - 1) < 0)
5488 break; /* just ignore */
5489 } while (0);
5490 }
bd99dc85
PA
5491
5492 if (target_is_async_p ())
5493 async_file_mark (); /* trigger a linux_wait */
5494
5495 errno = old_errno;
5496}
5497
5498static int
5499linux_supports_non_stop (void)
5500{
5501 return 1;
5502}
5503
5504static int
5505linux_async (int enable)
5506{
7089dca4 5507 int previous = target_is_async_p ();
bd99dc85 5508
8336d594 5509 if (debug_threads)
87ce2a04
DE
5510 debug_printf ("linux_async (%d), previous=%d\n",
5511 enable, previous);
8336d594 5512
bd99dc85
PA
5513 if (previous != enable)
5514 {
5515 sigset_t mask;
5516 sigemptyset (&mask);
5517 sigaddset (&mask, SIGCHLD);
5518
5519 sigprocmask (SIG_BLOCK, &mask, NULL);
5520
5521 if (enable)
5522 {
5523 if (pipe (linux_event_pipe) == -1)
aa96c426
GB
5524 {
5525 linux_event_pipe[0] = -1;
5526 linux_event_pipe[1] = -1;
5527 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5528
5529 warning ("creating event pipe failed.");
5530 return previous;
5531 }
bd99dc85
PA
5532
5533 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5534 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5535
5536 /* Register the event loop handler. */
5537 add_file_handler (linux_event_pipe[0],
5538 handle_target_event, NULL);
5539
5540 /* Always trigger a linux_wait. */
5541 async_file_mark ();
5542 }
5543 else
5544 {
5545 delete_file_handler (linux_event_pipe[0]);
5546
5547 close (linux_event_pipe[0]);
5548 close (linux_event_pipe[1]);
5549 linux_event_pipe[0] = -1;
5550 linux_event_pipe[1] = -1;
5551 }
5552
5553 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5554 }
5555
5556 return previous;
5557}
5558
5559static int
5560linux_start_non_stop (int nonstop)
5561{
5562 /* Register or unregister from event-loop accordingly. */
5563 linux_async (nonstop);
aa96c426
GB
5564
5565 if (target_is_async_p () != (nonstop != 0))
5566 return -1;
5567
bd99dc85
PA
5568 return 0;
5569}
5570
cf8fd78b
PA
5571static int
5572linux_supports_multi_process (void)
5573{
5574 return 1;
5575}
5576
89245bc0
DB
5577/* Check if fork events are supported. */
5578
5579static int
5580linux_supports_fork_events (void)
5581{
5582 return linux_supports_tracefork ();
5583}
5584
5585/* Check if vfork events are supported. */
5586
5587static int
5588linux_supports_vfork_events (void)
5589{
5590 return linux_supports_tracefork ();
5591}
5592
de0d863e
DB
5593/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5594 options for the specified lwp. */
5595
5596static int
5597reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5598 void *args)
5599{
5600 struct thread_info *thread = (struct thread_info *) entry;
5601 struct lwp_info *lwp = get_thread_lwp (thread);
5602
5603 if (!lwp->stopped)
5604 {
5605 /* Stop the lwp so we can modify its ptrace options. */
5606 lwp->must_set_ptrace_flags = 1;
5607 linux_stop_lwp (lwp);
5608 }
5609 else
5610 {
5611 /* Already stopped; go ahead and set the ptrace options. */
5612 struct process_info *proc = find_process_pid (pid_of (thread));
5613 int options = linux_low_ptrace_options (proc->attached);
5614
5615 linux_enable_event_reporting (lwpid_of (thread), options);
5616 lwp->must_set_ptrace_flags = 0;
5617 }
5618
5619 return 0;
5620}
5621
5622/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5623 ptrace flags for all inferiors. This is in case the new GDB connection
5624 doesn't support the same set of events that the previous one did. */
5625
5626static void
5627linux_handle_new_gdb_connection (void)
5628{
5629 pid_t pid;
5630
5631 /* Request that all the lwps reset their ptrace options. */
5632 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5633}
5634
03583c20
UW
5635static int
5636linux_supports_disable_randomization (void)
5637{
5638#ifdef HAVE_PERSONALITY
5639 return 1;
5640#else
5641 return 0;
5642#endif
5643}
efcbbd14 5644
d1feda86
YQ
5645static int
5646linux_supports_agent (void)
5647{
5648 return 1;
5649}
5650
c2d6af84
PA
5651static int
5652linux_supports_range_stepping (void)
5653{
5654 if (*the_low_target.supports_range_stepping == NULL)
5655 return 0;
5656
5657 return (*the_low_target.supports_range_stepping) ();
5658}
5659
efcbbd14
UW
5660/* Enumerate spufs IDs for process PID. */
5661static int
5662spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5663{
5664 int pos = 0;
5665 int written = 0;
5666 char path[128];
5667 DIR *dir;
5668 struct dirent *entry;
5669
5670 sprintf (path, "/proc/%ld/fd", pid);
5671 dir = opendir (path);
5672 if (!dir)
5673 return -1;
5674
5675 rewinddir (dir);
5676 while ((entry = readdir (dir)) != NULL)
5677 {
5678 struct stat st;
5679 struct statfs stfs;
5680 int fd;
5681
5682 fd = atoi (entry->d_name);
5683 if (!fd)
5684 continue;
5685
5686 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5687 if (stat (path, &st) != 0)
5688 continue;
5689 if (!S_ISDIR (st.st_mode))
5690 continue;
5691
5692 if (statfs (path, &stfs) != 0)
5693 continue;
5694 if (stfs.f_type != SPUFS_MAGIC)
5695 continue;
5696
5697 if (pos >= offset && pos + 4 <= offset + len)
5698 {
5699 *(unsigned int *)(buf + pos - offset) = fd;
5700 written += 4;
5701 }
5702 pos += 4;
5703 }
5704
5705 closedir (dir);
5706 return written;
5707}
5708
5709/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5710 object type, using the /proc file system. */
5711static int
5712linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5713 unsigned const char *writebuf,
5714 CORE_ADDR offset, int len)
5715{
0bfdf32f 5716 long pid = lwpid_of (current_thread);
efcbbd14
UW
5717 char buf[128];
5718 int fd = 0;
5719 int ret = 0;
5720
5721 if (!writebuf && !readbuf)
5722 return -1;
5723
5724 if (!*annex)
5725 {
5726 if (!readbuf)
5727 return -1;
5728 else
5729 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5730 }
5731
5732 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5733 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5734 if (fd <= 0)
5735 return -1;
5736
5737 if (offset != 0
5738 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5739 {
5740 close (fd);
5741 return 0;
5742 }
5743
5744 if (writebuf)
5745 ret = write (fd, writebuf, (size_t) len);
5746 else
5747 ret = read (fd, readbuf, (size_t) len);
5748
5749 close (fd);
5750 return ret;
5751}
5752
723b724b 5753#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
78d85199
YQ
5754struct target_loadseg
5755{
5756 /* Core address to which the segment is mapped. */
5757 Elf32_Addr addr;
5758 /* VMA recorded in the program header. */
5759 Elf32_Addr p_vaddr;
5760 /* Size of this segment in memory. */
5761 Elf32_Word p_memsz;
5762};
5763
723b724b 5764# if defined PT_GETDSBT
78d85199
YQ
5765struct target_loadmap
5766{
5767 /* Protocol version number, must be zero. */
5768 Elf32_Word version;
5769 /* Pointer to the DSBT table, its size, and the DSBT index. */
5770 unsigned *dsbt_table;
5771 unsigned dsbt_size, dsbt_index;
5772 /* Number of segments in this map. */
5773 Elf32_Word nsegs;
5774 /* The actual memory map. */
5775 struct target_loadseg segs[/*nsegs*/];
5776};
723b724b
MF
5777# define LINUX_LOADMAP PT_GETDSBT
5778# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5779# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5780# else
5781struct target_loadmap
5782{
5783 /* Protocol version number, must be zero. */
5784 Elf32_Half version;
5785 /* Number of segments in this map. */
5786 Elf32_Half nsegs;
5787 /* The actual memory map. */
5788 struct target_loadseg segs[/*nsegs*/];
5789};
5790# define LINUX_LOADMAP PTRACE_GETFDPIC
5791# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5792# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5793# endif
78d85199 5794
78d85199
YQ
5795static int
5796linux_read_loadmap (const char *annex, CORE_ADDR offset,
5797 unsigned char *myaddr, unsigned int len)
5798{
0bfdf32f 5799 int pid = lwpid_of (current_thread);
78d85199
YQ
5800 int addr = -1;
5801 struct target_loadmap *data = NULL;
5802 unsigned int actual_length, copy_length;
5803
5804 if (strcmp (annex, "exec") == 0)
723b724b 5805 addr = (int) LINUX_LOADMAP_EXEC;
78d85199 5806 else if (strcmp (annex, "interp") == 0)
723b724b 5807 addr = (int) LINUX_LOADMAP_INTERP;
78d85199
YQ
5808 else
5809 return -1;
5810
723b724b 5811 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
78d85199
YQ
5812 return -1;
5813
5814 if (data == NULL)
5815 return -1;
5816
5817 actual_length = sizeof (struct target_loadmap)
5818 + sizeof (struct target_loadseg) * data->nsegs;
5819
5820 if (offset < 0 || offset > actual_length)
5821 return -1;
5822
5823 copy_length = actual_length - offset < len ? actual_length - offset : len;
5824 memcpy (myaddr, (char *) data + offset, copy_length);
5825 return copy_length;
5826}
723b724b
MF
5827#else
5828# define linux_read_loadmap NULL
5829#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
78d85199 5830
1570b33e
L
5831static void
5832linux_process_qsupported (const char *query)
5833{
5834 if (the_low_target.process_qsupported != NULL)
5835 the_low_target.process_qsupported (query);
5836}
5837
219f2f23
PA
5838static int
5839linux_supports_tracepoints (void)
5840{
5841 if (*the_low_target.supports_tracepoints == NULL)
5842 return 0;
5843
5844 return (*the_low_target.supports_tracepoints) ();
5845}
5846
5847static CORE_ADDR
5848linux_read_pc (struct regcache *regcache)
5849{
5850 if (the_low_target.get_pc == NULL)
5851 return 0;
5852
5853 return (*the_low_target.get_pc) (regcache);
5854}
5855
5856static void
5857linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5858{
5859 gdb_assert (the_low_target.set_pc != NULL);
5860
5861 (*the_low_target.set_pc) (regcache, pc);
5862}
5863
8336d594
PA
5864static int
5865linux_thread_stopped (struct thread_info *thread)
5866{
5867 return get_thread_lwp (thread)->stopped;
5868}
5869
5870/* This exposes stop-all-threads functionality to other modules. */
5871
5872static void
7984d532 5873linux_pause_all (int freeze)
8336d594 5874{
7984d532
PA
5875 stop_all_lwps (freeze, NULL);
5876}
5877
5878/* This exposes unstop-all-threads functionality to other gdbserver
5879 modules. */
5880
5881static void
5882linux_unpause_all (int unfreeze)
5883{
5884 unstop_all_lwps (unfreeze, NULL);
8336d594
PA
5885}
5886
90d74c30
PA
5887static int
5888linux_prepare_to_access_memory (void)
5889{
5890 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5891 running LWP. */
5892 if (non_stop)
5893 linux_pause_all (1);
5894 return 0;
5895}
5896
5897static void
0146f85b 5898linux_done_accessing_memory (void)
90d74c30
PA
5899{
5900 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5901 running LWP. */
5902 if (non_stop)
5903 linux_unpause_all (1);
5904}
5905
fa593d66
PA
5906static int
5907linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5908 CORE_ADDR collector,
5909 CORE_ADDR lockaddr,
5910 ULONGEST orig_size,
5911 CORE_ADDR *jump_entry,
405f8e94
SS
5912 CORE_ADDR *trampoline,
5913 ULONGEST *trampoline_size,
fa593d66
PA
5914 unsigned char *jjump_pad_insn,
5915 ULONGEST *jjump_pad_insn_size,
5916 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
5917 CORE_ADDR *adjusted_insn_addr_end,
5918 char *err)
fa593d66
PA
5919{
5920 return (*the_low_target.install_fast_tracepoint_jump_pad)
5921 (tpoint, tpaddr, collector, lockaddr, orig_size,
405f8e94
SS
5922 jump_entry, trampoline, trampoline_size,
5923 jjump_pad_insn, jjump_pad_insn_size,
5924 adjusted_insn_addr, adjusted_insn_addr_end,
5925 err);
fa593d66
PA
5926}
5927
6a271cae
PA
5928static struct emit_ops *
5929linux_emit_ops (void)
5930{
5931 if (the_low_target.emit_ops != NULL)
5932 return (*the_low_target.emit_ops) ();
5933 else
5934 return NULL;
5935}
5936
405f8e94
SS
5937static int
5938linux_get_min_fast_tracepoint_insn_len (void)
5939{
5940 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5941}
5942
2268b414
JK
5943/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5944
5945static int
5946get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5947 CORE_ADDR *phdr_memaddr, int *num_phdr)
5948{
5949 char filename[PATH_MAX];
5950 int fd;
5951 const int auxv_size = is_elf64
5952 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5953 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5954
5955 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5956
5957 fd = open (filename, O_RDONLY);
5958 if (fd < 0)
5959 return 1;
5960
5961 *phdr_memaddr = 0;
5962 *num_phdr = 0;
5963 while (read (fd, buf, auxv_size) == auxv_size
5964 && (*phdr_memaddr == 0 || *num_phdr == 0))
5965 {
5966 if (is_elf64)
5967 {
5968 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5969
5970 switch (aux->a_type)
5971 {
5972 case AT_PHDR:
5973 *phdr_memaddr = aux->a_un.a_val;
5974 break;
5975 case AT_PHNUM:
5976 *num_phdr = aux->a_un.a_val;
5977 break;
5978 }
5979 }
5980 else
5981 {
5982 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5983
5984 switch (aux->a_type)
5985 {
5986 case AT_PHDR:
5987 *phdr_memaddr = aux->a_un.a_val;
5988 break;
5989 case AT_PHNUM:
5990 *num_phdr = aux->a_un.a_val;
5991 break;
5992 }
5993 }
5994 }
5995
5996 close (fd);
5997
5998 if (*phdr_memaddr == 0 || *num_phdr == 0)
5999 {
6000 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6001 "phdr_memaddr = %ld, phdr_num = %d",
6002 (long) *phdr_memaddr, *num_phdr);
6003 return 2;
6004 }
6005
6006 return 0;
6007}
6008
6009/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6010
6011static CORE_ADDR
6012get_dynamic (const int pid, const int is_elf64)
6013{
6014 CORE_ADDR phdr_memaddr, relocation;
6015 int num_phdr, i;
6016 unsigned char *phdr_buf;
6017 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6018
6019 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6020 return 0;
6021
6022 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6023 phdr_buf = alloca (num_phdr * phdr_size);
6024
6025 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6026 return 0;
6027
6028 /* Compute relocation: it is expected to be 0 for "regular" executables,
6029 non-zero for PIE ones. */
6030 relocation = -1;
6031 for (i = 0; relocation == -1 && i < num_phdr; i++)
6032 if (is_elf64)
6033 {
6034 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6035
6036 if (p->p_type == PT_PHDR)
6037 relocation = phdr_memaddr - p->p_vaddr;
6038 }
6039 else
6040 {
6041 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6042
6043 if (p->p_type == PT_PHDR)
6044 relocation = phdr_memaddr - p->p_vaddr;
6045 }
6046
6047 if (relocation == -1)
6048 {
e237a7e2
JK
6049 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6050 any real world executables, including PIE executables, have always
6051 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6052 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6053 or present DT_DEBUG anyway (fpc binaries are statically linked).
6054
6055 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6056
6057 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6058
2268b414
JK
6059 return 0;
6060 }
6061
6062 for (i = 0; i < num_phdr; i++)
6063 {
6064 if (is_elf64)
6065 {
6066 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6067
6068 if (p->p_type == PT_DYNAMIC)
6069 return p->p_vaddr + relocation;
6070 }
6071 else
6072 {
6073 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6074
6075 if (p->p_type == PT_DYNAMIC)
6076 return p->p_vaddr + relocation;
6077 }
6078 }
6079
6080 return 0;
6081}
6082
6083/* Return &_r_debug in the inferior, or -1 if not present. Return value
367ba2c2
MR
6084 can be 0 if the inferior does not yet have the library list initialized.
6085 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6086 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
2268b414
JK
6087
6088static CORE_ADDR
6089get_r_debug (const int pid, const int is_elf64)
6090{
6091 CORE_ADDR dynamic_memaddr;
6092 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6093 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
367ba2c2 6094 CORE_ADDR map = -1;
2268b414
JK
6095
6096 dynamic_memaddr = get_dynamic (pid, is_elf64);
6097 if (dynamic_memaddr == 0)
367ba2c2 6098 return map;
2268b414
JK
6099
6100 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6101 {
6102 if (is_elf64)
6103 {
6104 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
75f62ce7 6105#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6106 union
6107 {
6108 Elf64_Xword map;
6109 unsigned char buf[sizeof (Elf64_Xword)];
6110 }
6111 rld_map;
6112
6113 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6114 {
6115 if (linux_read_memory (dyn->d_un.d_val,
6116 rld_map.buf, sizeof (rld_map.buf)) == 0)
6117 return rld_map.map;
6118 else
6119 break;
6120 }
75f62ce7 6121#endif /* DT_MIPS_RLD_MAP */
2268b414 6122
367ba2c2
MR
6123 if (dyn->d_tag == DT_DEBUG && map == -1)
6124 map = dyn->d_un.d_val;
2268b414
JK
6125
6126 if (dyn->d_tag == DT_NULL)
6127 break;
6128 }
6129 else
6130 {
6131 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
75f62ce7 6132#ifdef DT_MIPS_RLD_MAP
367ba2c2
MR
6133 union
6134 {
6135 Elf32_Word map;
6136 unsigned char buf[sizeof (Elf32_Word)];
6137 }
6138 rld_map;
6139
6140 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6141 {
6142 if (linux_read_memory (dyn->d_un.d_val,
6143 rld_map.buf, sizeof (rld_map.buf)) == 0)
6144 return rld_map.map;
6145 else
6146 break;
6147 }
75f62ce7 6148#endif /* DT_MIPS_RLD_MAP */
2268b414 6149
367ba2c2
MR
6150 if (dyn->d_tag == DT_DEBUG && map == -1)
6151 map = dyn->d_un.d_val;
2268b414
JK
6152
6153 if (dyn->d_tag == DT_NULL)
6154 break;
6155 }
6156
6157 dynamic_memaddr += dyn_size;
6158 }
6159
367ba2c2 6160 return map;
2268b414
JK
6161}
6162
6163/* Read one pointer from MEMADDR in the inferior. */
6164
6165static int
6166read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6167{
485f1ee4
PA
6168 int ret;
6169
6170 /* Go through a union so this works on either big or little endian
6171 hosts, when the inferior's pointer size is smaller than the size
6172 of CORE_ADDR. It is assumed the inferior's endianness is the
6173 same of the superior's. */
6174 union
6175 {
6176 CORE_ADDR core_addr;
6177 unsigned int ui;
6178 unsigned char uc;
6179 } addr;
6180
6181 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6182 if (ret == 0)
6183 {
6184 if (ptr_size == sizeof (CORE_ADDR))
6185 *ptr = addr.core_addr;
6186 else if (ptr_size == sizeof (unsigned int))
6187 *ptr = addr.ui;
6188 else
6189 gdb_assert_not_reached ("unhandled pointer size");
6190 }
6191 return ret;
2268b414
JK
6192}
6193
6194struct link_map_offsets
6195 {
6196 /* Offset and size of r_debug.r_version. */
6197 int r_version_offset;
6198
6199 /* Offset and size of r_debug.r_map. */
6200 int r_map_offset;
6201
6202 /* Offset to l_addr field in struct link_map. */
6203 int l_addr_offset;
6204
6205 /* Offset to l_name field in struct link_map. */
6206 int l_name_offset;
6207
6208 /* Offset to l_ld field in struct link_map. */
6209 int l_ld_offset;
6210
6211 /* Offset to l_next field in struct link_map. */
6212 int l_next_offset;
6213
6214 /* Offset to l_prev field in struct link_map. */
6215 int l_prev_offset;
6216 };
6217
fb723180 6218/* Construct qXfer:libraries-svr4:read reply. */
2268b414
JK
6219
6220static int
6221linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6222 unsigned const char *writebuf,
6223 CORE_ADDR offset, int len)
6224{
6225 char *document;
6226 unsigned document_len;
fe978cb0 6227 struct process_info_private *const priv = current_process ()->priv;
2268b414
JK
6228 char filename[PATH_MAX];
6229 int pid, is_elf64;
6230
6231 static const struct link_map_offsets lmo_32bit_offsets =
6232 {
6233 0, /* r_version offset. */
6234 4, /* r_debug.r_map offset. */
6235 0, /* l_addr offset in link_map. */
6236 4, /* l_name offset in link_map. */
6237 8, /* l_ld offset in link_map. */
6238 12, /* l_next offset in link_map. */
6239 16 /* l_prev offset in link_map. */
6240 };
6241
6242 static const struct link_map_offsets lmo_64bit_offsets =
6243 {
6244 0, /* r_version offset. */
6245 8, /* r_debug.r_map offset. */
6246 0, /* l_addr offset in link_map. */
6247 8, /* l_name offset in link_map. */
6248 16, /* l_ld offset in link_map. */
6249 24, /* l_next offset in link_map. */
6250 32 /* l_prev offset in link_map. */
6251 };
6252 const struct link_map_offsets *lmo;
214d508e 6253 unsigned int machine;
b1fbec62
GB
6254 int ptr_size;
6255 CORE_ADDR lm_addr = 0, lm_prev = 0;
6256 int allocated = 1024;
6257 char *p;
6258 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6259 int header_done = 0;
2268b414
JK
6260
6261 if (writebuf != NULL)
6262 return -2;
6263 if (readbuf == NULL)
6264 return -1;
6265
0bfdf32f 6266 pid = lwpid_of (current_thread);
2268b414 6267 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
214d508e 6268 is_elf64 = elf_64_file_p (filename, &machine);
2268b414 6269 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
b1fbec62 6270 ptr_size = is_elf64 ? 8 : 4;
2268b414 6271
b1fbec62
GB
6272 while (annex[0] != '\0')
6273 {
6274 const char *sep;
6275 CORE_ADDR *addrp;
6276 int len;
2268b414 6277
b1fbec62
GB
6278 sep = strchr (annex, '=');
6279 if (sep == NULL)
6280 break;
0c5bf5a9 6281
b1fbec62 6282 len = sep - annex;
61012eef 6283 if (len == 5 && startswith (annex, "start"))
b1fbec62 6284 addrp = &lm_addr;
61012eef 6285 else if (len == 4 && startswith (annex, "prev"))
b1fbec62
GB
6286 addrp = &lm_prev;
6287 else
6288 {
6289 annex = strchr (sep, ';');
6290 if (annex == NULL)
6291 break;
6292 annex++;
6293 continue;
6294 }
6295
6296 annex = decode_address_to_semicolon (addrp, sep + 1);
2268b414 6297 }
b1fbec62
GB
6298
6299 if (lm_addr == 0)
2268b414 6300 {
b1fbec62
GB
6301 int r_version = 0;
6302
6303 if (priv->r_debug == 0)
6304 priv->r_debug = get_r_debug (pid, is_elf64);
6305
6306 /* We failed to find DT_DEBUG. Such situation will not change
6307 for this inferior - do not retry it. Report it to GDB as
6308 E01, see for the reasons at the GDB solib-svr4.c side. */
6309 if (priv->r_debug == (CORE_ADDR) -1)
6310 return -1;
6311
6312 if (priv->r_debug != 0)
2268b414 6313 {
b1fbec62
GB
6314 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6315 (unsigned char *) &r_version,
6316 sizeof (r_version)) != 0
6317 || r_version != 1)
6318 {
6319 warning ("unexpected r_debug version %d", r_version);
6320 }
6321 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6322 &lm_addr, ptr_size) != 0)
6323 {
6324 warning ("unable to read r_map from 0x%lx",
6325 (long) priv->r_debug + lmo->r_map_offset);
6326 }
2268b414 6327 }
b1fbec62 6328 }
2268b414 6329
b1fbec62
GB
6330 document = xmalloc (allocated);
6331 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6332 p = document + strlen (document);
6333
6334 while (lm_addr
6335 && read_one_ptr (lm_addr + lmo->l_name_offset,
6336 &l_name, ptr_size) == 0
6337 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6338 &l_addr, ptr_size) == 0
6339 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6340 &l_ld, ptr_size) == 0
6341 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6342 &l_prev, ptr_size) == 0
6343 && read_one_ptr (lm_addr + lmo->l_next_offset,
6344 &l_next, ptr_size) == 0)
6345 {
6346 unsigned char libname[PATH_MAX];
6347
6348 if (lm_prev != l_prev)
2268b414 6349 {
b1fbec62
GB
6350 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6351 (long) lm_prev, (long) l_prev);
6352 break;
2268b414
JK
6353 }
6354
d878444c
JK
6355 /* Ignore the first entry even if it has valid name as the first entry
6356 corresponds to the main executable. The first entry should not be
6357 skipped if the dynamic loader was loaded late by a static executable
6358 (see solib-svr4.c parameter ignore_first). But in such case the main
6359 executable does not have PT_DYNAMIC present and this function already
6360 exited above due to failed get_r_debug. */
6361 if (lm_prev == 0)
2268b414 6362 {
d878444c
JK
6363 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6364 p = p + strlen (p);
6365 }
6366 else
6367 {
6368 /* Not checking for error because reading may stop before
6369 we've got PATH_MAX worth of characters. */
6370 libname[0] = '\0';
6371 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6372 libname[sizeof (libname) - 1] = '\0';
6373 if (libname[0] != '\0')
2268b414 6374 {
d878444c
JK
6375 /* 6x the size for xml_escape_text below. */
6376 size_t len = 6 * strlen ((char *) libname);
6377 char *name;
2268b414 6378
d878444c
JK
6379 if (!header_done)
6380 {
6381 /* Terminate `<library-list-svr4'. */
6382 *p++ = '>';
6383 header_done = 1;
6384 }
2268b414 6385
d878444c
JK
6386 while (allocated < p - document + len + 200)
6387 {
6388 /* Expand to guarantee sufficient storage. */
6389 uintptr_t document_len = p - document;
2268b414 6390
d878444c
JK
6391 document = xrealloc (document, 2 * allocated);
6392 allocated *= 2;
6393 p = document + document_len;
6394 }
6395
6396 name = xml_escape_text ((char *) libname);
6397 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6398 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6399 name, (unsigned long) lm_addr,
6400 (unsigned long) l_addr, (unsigned long) l_ld);
6401 free (name);
6402 }
0afae3cf 6403 }
b1fbec62
GB
6404
6405 lm_prev = lm_addr;
6406 lm_addr = l_next;
2268b414
JK
6407 }
6408
b1fbec62
GB
6409 if (!header_done)
6410 {
6411 /* Empty list; terminate `<library-list-svr4'. */
6412 strcpy (p, "/>");
6413 }
6414 else
6415 strcpy (p, "</library-list-svr4>");
6416
2268b414
JK
6417 document_len = strlen (document);
6418 if (offset < document_len)
6419 document_len -= offset;
6420 else
6421 document_len = 0;
6422 if (len > document_len)
6423 len = document_len;
6424
6425 memcpy (readbuf, document + offset, len);
6426 xfree (document);
6427
6428 return len;
6429}
6430
9accd112
MM
6431#ifdef HAVE_LINUX_BTRACE
6432
969c39fb 6433/* See to_enable_btrace target method. */
9accd112
MM
6434
6435static struct btrace_target_info *
f4abbc16 6436linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
9accd112
MM
6437{
6438 struct btrace_target_info *tinfo;
6439
f4abbc16 6440 tinfo = linux_enable_btrace (ptid, conf);
3aee8918 6441
d68e53f4 6442 if (tinfo != NULL && tinfo->ptr_bits == 0)
3aee8918
PA
6443 {
6444 struct thread_info *thread = find_thread_ptid (ptid);
6445 struct regcache *regcache = get_thread_regcache (thread, 0);
6446
6447 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6448 }
9accd112
MM
6449
6450 return tinfo;
6451}
6452
969c39fb 6453/* See to_disable_btrace target method. */
9accd112 6454
969c39fb
MM
6455static int
6456linux_low_disable_btrace (struct btrace_target_info *tinfo)
6457{
6458 enum btrace_error err;
6459
6460 err = linux_disable_btrace (tinfo);
6461 return (err == BTRACE_ERR_NONE ? 0 : -1);
6462}
6463
6464/* See to_read_btrace target method. */
6465
6466static int
9accd112
MM
6467linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6468 int type)
6469{
734b0e4b 6470 struct btrace_data btrace;
9accd112 6471 struct btrace_block *block;
969c39fb 6472 enum btrace_error err;
9accd112
MM
6473 int i;
6474
734b0e4b
MM
6475 btrace_data_init (&btrace);
6476
969c39fb
MM
6477 err = linux_read_btrace (&btrace, tinfo, type);
6478 if (err != BTRACE_ERR_NONE)
6479 {
6480 if (err == BTRACE_ERR_OVERFLOW)
6481 buffer_grow_str0 (buffer, "E.Overflow.");
6482 else
6483 buffer_grow_str0 (buffer, "E.Generic Error.");
6484
734b0e4b 6485 btrace_data_fini (&btrace);
969c39fb
MM
6486 return -1;
6487 }
9accd112 6488
734b0e4b
MM
6489 switch (btrace.format)
6490 {
6491 case BTRACE_FORMAT_NONE:
6492 buffer_grow_str0 (buffer, "E.No Trace.");
6493 break;
6494
6495 case BTRACE_FORMAT_BTS:
6496 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6497 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
9accd112 6498
734b0e4b
MM
6499 for (i = 0;
6500 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6501 i++)
6502 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6503 paddress (block->begin), paddress (block->end));
9accd112 6504
734b0e4b
MM
6505 buffer_grow_str0 (buffer, "</btrace>\n");
6506 break;
6507
6508 default:
6509 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
9accd112 6510
734b0e4b
MM
6511 btrace_data_fini (&btrace);
6512 return -1;
6513 }
969c39fb 6514
734b0e4b 6515 btrace_data_fini (&btrace);
969c39fb 6516 return 0;
9accd112 6517}
f4abbc16
MM
6518
6519/* See to_btrace_conf target method. */
6520
6521static int
6522linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6523 struct buffer *buffer)
6524{
6525 const struct btrace_config *conf;
6526
6527 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6528 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6529
6530 conf = linux_btrace_conf (tinfo);
6531 if (conf != NULL)
6532 {
6533 switch (conf->format)
6534 {
6535 case BTRACE_FORMAT_NONE:
6536 break;
6537
6538 case BTRACE_FORMAT_BTS:
d33501a5
MM
6539 buffer_xml_printf (buffer, "<bts");
6540 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6541 buffer_xml_printf (buffer, " />\n");
f4abbc16
MM
6542 break;
6543 }
6544 }
6545
6546 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6547 return 0;
6548}
9accd112
MM
6549#endif /* HAVE_LINUX_BTRACE */
6550
7b669087
GB
6551/* See nat/linux-nat.h. */
6552
6553ptid_t
6554current_lwp_ptid (void)
6555{
6556 return ptid_of (current_thread);
6557}
6558
ce3a066d
DJ
6559static struct target_ops linux_target_ops = {
6560 linux_create_inferior,
6561 linux_attach,
6562 linux_kill,
6ad8ae5c 6563 linux_detach,
8336d594 6564 linux_mourn,
444d6139 6565 linux_join,
ce3a066d
DJ
6566 linux_thread_alive,
6567 linux_resume,
6568 linux_wait,
6569 linux_fetch_registers,
6570 linux_store_registers,
90d74c30 6571 linux_prepare_to_access_memory,
0146f85b 6572 linux_done_accessing_memory,
ce3a066d
DJ
6573 linux_read_memory,
6574 linux_write_memory,
2f2893d9 6575 linux_look_up_symbols,
ef57601b 6576 linux_request_interrupt,
aa691b87 6577 linux_read_auxv,
802e8e6d 6578 linux_supports_z_point_type,
d993e290
PA
6579 linux_insert_point,
6580 linux_remove_point,
3e572f71
PA
6581 linux_stopped_by_sw_breakpoint,
6582 linux_supports_stopped_by_sw_breakpoint,
6583 linux_stopped_by_hw_breakpoint,
6584 linux_supports_stopped_by_hw_breakpoint,
45614f15 6585 linux_supports_conditional_breakpoints,
e013ee27
OF
6586 linux_stopped_by_watchpoint,
6587 linux_stopped_data_address,
db0dfaa0
LM
6588#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6589 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6590 && defined(PT_TEXT_END_ADDR)
52fb6437 6591 linux_read_offsets,
dae5f5cf
DJ
6592#else
6593 NULL,
6594#endif
6595#ifdef USE_THREAD_DB
6596 thread_db_get_tls_address,
6597#else
6598 NULL,
52fb6437 6599#endif
efcbbd14 6600 linux_qxfer_spu,
59a016f0 6601 hostio_last_error_from_errno,
07e059b5 6602 linux_qxfer_osdata,
4aa995e1 6603 linux_xfer_siginfo,
bd99dc85
PA
6604 linux_supports_non_stop,
6605 linux_async,
6606 linux_start_non_stop,
cdbfd419 6607 linux_supports_multi_process,
89245bc0
DB
6608 linux_supports_fork_events,
6609 linux_supports_vfork_events,
de0d863e 6610 linux_handle_new_gdb_connection,
cdbfd419 6611#ifdef USE_THREAD_DB
dc146f7c 6612 thread_db_handle_monitor_command,
cdbfd419 6613#else
dc146f7c 6614 NULL,
cdbfd419 6615#endif
d26e3629 6616 linux_common_core_of_thread,
78d85199 6617 linux_read_loadmap,
219f2f23
PA
6618 linux_process_qsupported,
6619 linux_supports_tracepoints,
6620 linux_read_pc,
8336d594
PA
6621 linux_write_pc,
6622 linux_thread_stopped,
7984d532 6623 NULL,
711e434b 6624 linux_pause_all,
7984d532 6625 linux_unpause_all,
fa593d66 6626 linux_stabilize_threads,
6a271cae 6627 linux_install_fast_tracepoint_jump_pad,
03583c20
UW
6628 linux_emit_ops,
6629 linux_supports_disable_randomization,
405f8e94 6630 linux_get_min_fast_tracepoint_insn_len,
2268b414 6631 linux_qxfer_libraries_svr4,
d1feda86 6632 linux_supports_agent,
9accd112
MM
6633#ifdef HAVE_LINUX_BTRACE
6634 linux_supports_btrace,
6635 linux_low_enable_btrace,
969c39fb 6636 linux_low_disable_btrace,
9accd112 6637 linux_low_read_btrace,
f4abbc16 6638 linux_low_btrace_conf,
9accd112
MM
6639#else
6640 NULL,
6641 NULL,
6642 NULL,
6643 NULL,
f4abbc16 6644 NULL,
9accd112 6645#endif
c2d6af84 6646 linux_supports_range_stepping,
e57f1de3 6647 linux_proc_pid_to_exec_file,
14d2069a
GB
6648 linux_mntns_open_cloexec,
6649 linux_mntns_unlink,
6650 linux_mntns_readlink,
ce3a066d
DJ
6651};
6652
0d62e5e8
DJ
6653static void
6654linux_init_signals ()
6655{
6656 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6657 to find what the cancel signal actually is. */
1a981360 6658#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
254787d4 6659 signal (__SIGRTMIN+1, SIG_IGN);
60c3d7b0 6660#endif
0d62e5e8
DJ
6661}
6662
3aee8918
PA
6663#ifdef HAVE_LINUX_REGSETS
6664void
6665initialize_regsets_info (struct regsets_info *info)
6666{
6667 for (info->num_regsets = 0;
6668 info->regsets[info->num_regsets].size >= 0;
6669 info->num_regsets++)
6670 ;
3aee8918
PA
6671}
6672#endif
6673
da6d8c04
DJ
6674void
6675initialize_low (void)
6676{
bd99dc85
PA
6677 struct sigaction sigchld_action;
6678 memset (&sigchld_action, 0, sizeof (sigchld_action));
ce3a066d 6679 set_target_ops (&linux_target_ops);
611cb4a5
DJ
6680 set_breakpoint_data (the_low_target.breakpoint,
6681 the_low_target.breakpoint_len);
0d62e5e8 6682 linux_init_signals ();
aa7c7447 6683 linux_ptrace_init_warnings ();
bd99dc85
PA
6684
6685 sigchld_action.sa_handler = sigchld_handler;
6686 sigemptyset (&sigchld_action.sa_mask);
6687 sigchld_action.sa_flags = SA_RESTART;
6688 sigaction (SIGCHLD, &sigchld_action, NULL);
3aee8918
PA
6689
6690 initialize_low_arch ();
89245bc0
DB
6691
6692 linux_check_ptrace_features ();
da6d8c04 6693}
This page took 1.841111 seconds and 4 git commands to generate.