Fix one heap buffer overflow in aarch64_push_dummy_call
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
... / ...
CommitLineData
1/* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19#include "server.h"
20#include "linux-low.h"
21#include "nat/linux-osdata.h"
22#include "agent.h"
23#include "tdesc.h"
24#include "rsp-low.h"
25
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
28#include "gdb_wait.h"
29#include "nat/gdb_ptrace.h"
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
32#include "nat/linux-personality.h"
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <sys/syscall.h>
38#include <sched.h>
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
43#include <sys/stat.h>
44#include <sys/vfs.h>
45#include <sys/uio.h>
46#include "filestuff.h"
47#include "tracepoint.h"
48#include "hostio.h"
49#ifndef ELFMAG0
50/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54#include <elf.h>
55#endif
56#include "nat/linux-namespaces.h"
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
61
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
72
73/* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76#if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels. */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR 51*4
84/* BFIN already defines these since at least 2.6.32 kernels. */
85#elif defined(BFIN)
86#define PT_TEXT_ADDR 220
87#define PT_TEXT_END_ADDR 224
88#define PT_DATA_ADDR 228
89/* These are still undefined in 3.10 kernels. */
90#elif defined(__TMS320C6X__)
91#define PT_TEXT_ADDR (0x10000*4)
92#define PT_DATA_ADDR (0x10004*4)
93#define PT_TEXT_END_ADDR (0x10008*4)
94#endif
95#endif
96
97#ifdef HAVE_LINUX_BTRACE
98# include "nat/linux-btrace.h"
99# include "btrace-common.h"
100#endif
101
102#ifndef HAVE_ELF32_AUXV_T
103/* Copied from glibc's elf.h. */
104typedef struct
105{
106 uint32_t a_type; /* Entry type */
107 union
108 {
109 uint32_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114} Elf32_auxv_t;
115#endif
116
117#ifndef HAVE_ELF64_AUXV_T
118/* Copied from glibc's elf.h. */
119typedef struct
120{
121 uint64_t a_type; /* Entry type */
122 union
123 {
124 uint64_t a_val; /* Integer value */
125 /* We use to have pointer elements added here. We cannot do that,
126 though, since it does not work when using 32-bit definitions
127 on 64-bit platforms and vice versa. */
128 } a_un;
129} Elf64_auxv_t;
130#endif
131
132/* Does the current host support PTRACE_GETREGSET? */
133int have_ptrace_getregset = -1;
134
135/* LWP accessors. */
136
137/* See nat/linux-nat.h. */
138
139ptid_t
140ptid_of_lwp (struct lwp_info *lwp)
141{
142 return ptid_of (get_lwp_thread (lwp));
143}
144
145/* See nat/linux-nat.h. */
146
147void
148lwp_set_arch_private_info (struct lwp_info *lwp,
149 struct arch_lwp_info *info)
150{
151 lwp->arch_private = info;
152}
153
154/* See nat/linux-nat.h. */
155
156struct arch_lwp_info *
157lwp_arch_private_info (struct lwp_info *lwp)
158{
159 return lwp->arch_private;
160}
161
162/* See nat/linux-nat.h. */
163
164int
165lwp_is_stopped (struct lwp_info *lwp)
166{
167 return lwp->stopped;
168}
169
170/* See nat/linux-nat.h. */
171
172enum target_stop_reason
173lwp_stop_reason (struct lwp_info *lwp)
174{
175 return lwp->stop_reason;
176}
177
178/* A list of all unknown processes which receive stop signals. Some
179 other process will presumably claim each of these as forked
180 children momentarily. */
181
182struct simple_pid_list
183{
184 /* The process ID. */
185 int pid;
186
187 /* The status as reported by waitpid. */
188 int status;
189
190 /* Next in chain. */
191 struct simple_pid_list *next;
192};
193struct simple_pid_list *stopped_pids;
194
195/* Trivial list manipulation functions to keep track of a list of new
196 stopped processes. */
197
198static void
199add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
200{
201 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
202
203 new_pid->pid = pid;
204 new_pid->status = status;
205 new_pid->next = *listp;
206 *listp = new_pid;
207}
208
209static int
210pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
211{
212 struct simple_pid_list **p;
213
214 for (p = listp; *p != NULL; p = &(*p)->next)
215 if ((*p)->pid == pid)
216 {
217 struct simple_pid_list *next = (*p)->next;
218
219 *statusp = (*p)->status;
220 xfree (*p);
221 *p = next;
222 return 1;
223 }
224 return 0;
225}
226
227enum stopping_threads_kind
228 {
229 /* Not stopping threads presently. */
230 NOT_STOPPING_THREADS,
231
232 /* Stopping threads. */
233 STOPPING_THREADS,
234
235 /* Stopping and suspending threads. */
236 STOPPING_AND_SUSPENDING_THREADS
237 };
238
239/* This is set while stop_all_lwps is in effect. */
240enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
241
242/* FIXME make into a target method? */
243int using_threads = 1;
244
245/* True if we're presently stabilizing threads (moving them out of
246 jump pads). */
247static int stabilizing_threads;
248
249static void linux_resume_one_lwp (struct lwp_info *lwp,
250 int step, int signal, siginfo_t *info);
251static void linux_resume (struct thread_resume *resume_info, size_t n);
252static void stop_all_lwps (int suspend, struct lwp_info *except);
253static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
254static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
255 int *wstat, int options);
256static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
257static struct lwp_info *add_lwp (ptid_t ptid);
258static void linux_mourn (struct process_info *process);
259static int linux_stopped_by_watchpoint (void);
260static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
261static int lwp_is_marked_dead (struct lwp_info *lwp);
262static void proceed_all_lwps (void);
263static int finish_step_over (struct lwp_info *lwp);
264static int kill_lwp (unsigned long lwpid, int signo);
265static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
266static void complete_ongoing_step_over (void);
267static int linux_low_ptrace_options (int attached);
268
269/* When the event-loop is doing a step-over, this points at the thread
270 being stepped. */
271ptid_t step_over_bkpt;
272
273/* True if the low target can hardware single-step. */
274
275static int
276can_hardware_single_step (void)
277{
278 if (the_low_target.supports_hardware_single_step != NULL)
279 return the_low_target.supports_hardware_single_step ();
280 else
281 return 0;
282}
283
284/* True if the low target can software single-step. Such targets
285 implement the BREAKPOINT_REINSERT_ADDR callback. */
286
287static int
288can_software_single_step (void)
289{
290 return (the_low_target.breakpoint_reinsert_addr != NULL);
291}
292
293/* True if the low target supports memory breakpoints. If so, we'll
294 have a GET_PC implementation. */
295
296static int
297supports_breakpoints (void)
298{
299 return (the_low_target.get_pc != NULL);
300}
301
302/* Returns true if this target can support fast tracepoints. This
303 does not mean that the in-process agent has been loaded in the
304 inferior. */
305
306static int
307supports_fast_tracepoints (void)
308{
309 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
310}
311
312/* True if LWP is stopped in its stepping range. */
313
314static int
315lwp_in_step_range (struct lwp_info *lwp)
316{
317 CORE_ADDR pc = lwp->stop_pc;
318
319 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320}
321
322struct pending_signals
323{
324 int signal;
325 siginfo_t info;
326 struct pending_signals *prev;
327};
328
329/* The read/write ends of the pipe registered as waitable file in the
330 event loop. */
331static int linux_event_pipe[2] = { -1, -1 };
332
333/* True if we're currently in async mode. */
334#define target_is_async_p() (linux_event_pipe[0] != -1)
335
336static void send_sigstop (struct lwp_info *lwp);
337static void wait_for_sigstop (void);
338
339/* Return non-zero if HEADER is a 64-bit ELF file. */
340
341static int
342elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
343{
344 if (header->e_ident[EI_MAG0] == ELFMAG0
345 && header->e_ident[EI_MAG1] == ELFMAG1
346 && header->e_ident[EI_MAG2] == ELFMAG2
347 && header->e_ident[EI_MAG3] == ELFMAG3)
348 {
349 *machine = header->e_machine;
350 return header->e_ident[EI_CLASS] == ELFCLASS64;
351
352 }
353 *machine = EM_NONE;
354 return -1;
355}
356
357/* Return non-zero if FILE is a 64-bit ELF file,
358 zero if the file is not a 64-bit ELF file,
359 and -1 if the file is not accessible or doesn't exist. */
360
361static int
362elf_64_file_p (const char *file, unsigned int *machine)
363{
364 Elf64_Ehdr header;
365 int fd;
366
367 fd = open (file, O_RDONLY);
368 if (fd < 0)
369 return -1;
370
371 if (read (fd, &header, sizeof (header)) != sizeof (header))
372 {
373 close (fd);
374 return 0;
375 }
376 close (fd);
377
378 return elf_64_header_p (&header, machine);
379}
380
381/* Accepts an integer PID; Returns true if the executable PID is
382 running is a 64-bit ELF file.. */
383
384int
385linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
386{
387 char file[PATH_MAX];
388
389 sprintf (file, "/proc/%d/exe", pid);
390 return elf_64_file_p (file, machine);
391}
392
393static void
394delete_lwp (struct lwp_info *lwp)
395{
396 struct thread_info *thr = get_lwp_thread (lwp);
397
398 if (debug_threads)
399 debug_printf ("deleting %ld\n", lwpid_of (thr));
400
401 remove_thread (thr);
402 free (lwp->arch_private);
403 free (lwp);
404}
405
406/* Add a process to the common process list, and set its private
407 data. */
408
409static struct process_info *
410linux_add_process (int pid, int attached)
411{
412 struct process_info *proc;
413
414 proc = add_process (pid, attached);
415 proc->priv = XCNEW (struct process_info_private);
416
417 if (the_low_target.new_process != NULL)
418 proc->priv->arch_private = the_low_target.new_process ();
419
420 return proc;
421}
422
423static CORE_ADDR get_pc (struct lwp_info *lwp);
424
425/* Call the target arch_setup function on the current thread. */
426
427static void
428linux_arch_setup (void)
429{
430 the_low_target.arch_setup ();
431}
432
433/* Call the target arch_setup function on THREAD. */
434
435static void
436linux_arch_setup_thread (struct thread_info *thread)
437{
438 struct thread_info *saved_thread;
439
440 saved_thread = current_thread;
441 current_thread = thread;
442
443 linux_arch_setup ();
444
445 current_thread = saved_thread;
446}
447
448/* Handle a GNU/Linux extended wait response. If we see a clone,
449 fork, or vfork event, we need to add the new LWP to our list
450 (and return 0 so as not to report the trap to higher layers).
451 If we see an exec event, we will modify ORIG_EVENT_LWP to point
452 to a new LWP representing the new program. */
453
454static int
455handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
456{
457 struct lwp_info *event_lwp = *orig_event_lwp;
458 int event = linux_ptrace_get_extended_event (wstat);
459 struct thread_info *event_thr = get_lwp_thread (event_lwp);
460 struct lwp_info *new_lwp;
461
462 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
463
464 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
465 || (event == PTRACE_EVENT_CLONE))
466 {
467 ptid_t ptid;
468 unsigned long new_pid;
469 int ret, status;
470
471 /* Get the pid of the new lwp. */
472 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
473 &new_pid);
474
475 /* If we haven't already seen the new PID stop, wait for it now. */
476 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
477 {
478 /* The new child has a pending SIGSTOP. We can't affect it until it
479 hits the SIGSTOP, but we're already attached. */
480
481 ret = my_waitpid (new_pid, &status, __WALL);
482
483 if (ret == -1)
484 perror_with_name ("waiting for new child");
485 else if (ret != new_pid)
486 warning ("wait returned unexpected PID %d", ret);
487 else if (!WIFSTOPPED (status))
488 warning ("wait returned unexpected status 0x%x", status);
489 }
490
491 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
492 {
493 struct process_info *parent_proc;
494 struct process_info *child_proc;
495 struct lwp_info *child_lwp;
496 struct thread_info *child_thr;
497 struct target_desc *tdesc;
498
499 ptid = ptid_build (new_pid, new_pid, 0);
500
501 if (debug_threads)
502 {
503 debug_printf ("HEW: Got fork event from LWP %ld, "
504 "new child is %d\n",
505 ptid_get_lwp (ptid_of (event_thr)),
506 ptid_get_pid (ptid));
507 }
508
509 /* Add the new process to the tables and clone the breakpoint
510 lists of the parent. We need to do this even if the new process
511 will be detached, since we will need the process object and the
512 breakpoints to remove any breakpoints from memory when we
513 detach, and the client side will access registers. */
514 child_proc = linux_add_process (new_pid, 0);
515 gdb_assert (child_proc != NULL);
516 child_lwp = add_lwp (ptid);
517 gdb_assert (child_lwp != NULL);
518 child_lwp->stopped = 1;
519 child_lwp->must_set_ptrace_flags = 1;
520 child_lwp->status_pending_p = 0;
521 child_thr = get_lwp_thread (child_lwp);
522 child_thr->last_resume_kind = resume_stop;
523 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
524
525 /* If we're suspending all threads, leave this one suspended
526 too. */
527 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
528 {
529 if (debug_threads)
530 debug_printf ("HEW: leaving child suspended\n");
531 child_lwp->suspended = 1;
532 }
533
534 parent_proc = get_thread_process (event_thr);
535 child_proc->attached = parent_proc->attached;
536 clone_all_breakpoints (&child_proc->breakpoints,
537 &child_proc->raw_breakpoints,
538 parent_proc->breakpoints);
539
540 tdesc = XNEW (struct target_desc);
541 copy_target_description (tdesc, parent_proc->tdesc);
542 child_proc->tdesc = tdesc;
543
544 /* Clone arch-specific process data. */
545 if (the_low_target.new_fork != NULL)
546 the_low_target.new_fork (parent_proc, child_proc);
547
548 /* Save fork info in the parent thread. */
549 if (event == PTRACE_EVENT_FORK)
550 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
551 else if (event == PTRACE_EVENT_VFORK)
552 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
553
554 event_lwp->waitstatus.value.related_pid = ptid;
555
556 /* The status_pending field contains bits denoting the
557 extended event, so when the pending event is handled,
558 the handler will look at lwp->waitstatus. */
559 event_lwp->status_pending_p = 1;
560 event_lwp->status_pending = wstat;
561
562 /* Report the event. */
563 return 0;
564 }
565
566 if (debug_threads)
567 debug_printf ("HEW: Got clone event "
568 "from LWP %ld, new child is LWP %ld\n",
569 lwpid_of (event_thr), new_pid);
570
571 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
572 new_lwp = add_lwp (ptid);
573
574 /* Either we're going to immediately resume the new thread
575 or leave it stopped. linux_resume_one_lwp is a nop if it
576 thinks the thread is currently running, so set this first
577 before calling linux_resume_one_lwp. */
578 new_lwp->stopped = 1;
579
580 /* If we're suspending all threads, leave this one suspended
581 too. */
582 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
583 new_lwp->suspended = 1;
584
585 /* Normally we will get the pending SIGSTOP. But in some cases
586 we might get another signal delivered to the group first.
587 If we do get another signal, be sure not to lose it. */
588 if (WSTOPSIG (status) != SIGSTOP)
589 {
590 new_lwp->stop_expected = 1;
591 new_lwp->status_pending_p = 1;
592 new_lwp->status_pending = status;
593 }
594 else if (report_thread_events)
595 {
596 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
597 new_lwp->status_pending_p = 1;
598 new_lwp->status_pending = status;
599 }
600
601 /* Don't report the event. */
602 return 1;
603 }
604 else if (event == PTRACE_EVENT_VFORK_DONE)
605 {
606 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
607
608 /* Report the event. */
609 return 0;
610 }
611 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
612 {
613 struct process_info *proc;
614 ptid_t event_ptid;
615 pid_t event_pid;
616
617 if (debug_threads)
618 {
619 debug_printf ("HEW: Got exec event from LWP %ld\n",
620 lwpid_of (event_thr));
621 }
622
623 /* Get the event ptid. */
624 event_ptid = ptid_of (event_thr);
625 event_pid = ptid_get_pid (event_ptid);
626
627 /* Delete the execing process and all its threads. */
628 proc = get_thread_process (event_thr);
629 linux_mourn (proc);
630 current_thread = NULL;
631
632 /* Create a new process/lwp/thread. */
633 proc = linux_add_process (event_pid, 0);
634 event_lwp = add_lwp (event_ptid);
635 event_thr = get_lwp_thread (event_lwp);
636 gdb_assert (current_thread == event_thr);
637 linux_arch_setup_thread (event_thr);
638
639 /* Set the event status. */
640 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
641 event_lwp->waitstatus.value.execd_pathname
642 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
643
644 /* Mark the exec status as pending. */
645 event_lwp->stopped = 1;
646 event_lwp->status_pending_p = 1;
647 event_lwp->status_pending = wstat;
648 event_thr->last_resume_kind = resume_continue;
649 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
650
651 /* Report the event. */
652 *orig_event_lwp = event_lwp;
653 return 0;
654 }
655
656 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
657}
658
659/* Return the PC as read from the regcache of LWP, without any
660 adjustment. */
661
662static CORE_ADDR
663get_pc (struct lwp_info *lwp)
664{
665 struct thread_info *saved_thread;
666 struct regcache *regcache;
667 CORE_ADDR pc;
668
669 if (the_low_target.get_pc == NULL)
670 return 0;
671
672 saved_thread = current_thread;
673 current_thread = get_lwp_thread (lwp);
674
675 regcache = get_thread_regcache (current_thread, 1);
676 pc = (*the_low_target.get_pc) (regcache);
677
678 if (debug_threads)
679 debug_printf ("pc is 0x%lx\n", (long) pc);
680
681 current_thread = saved_thread;
682 return pc;
683}
684
685/* This function should only be called if LWP got a SIGTRAP.
686 The SIGTRAP could mean several things.
687
688 On i386, where decr_pc_after_break is non-zero:
689
690 If we were single-stepping this process using PTRACE_SINGLESTEP, we
691 will get only the one SIGTRAP. The value of $eip will be the next
692 instruction. If the instruction we stepped over was a breakpoint,
693 we need to decrement the PC.
694
695 If we continue the process using PTRACE_CONT, we will get a
696 SIGTRAP when we hit a breakpoint. The value of $eip will be
697 the instruction after the breakpoint (i.e. needs to be
698 decremented). If we report the SIGTRAP to GDB, we must also
699 report the undecremented PC. If the breakpoint is removed, we
700 must resume at the decremented PC.
701
702 On a non-decr_pc_after_break machine with hardware or kernel
703 single-step:
704
705 If we either single-step a breakpoint instruction, or continue and
706 hit a breakpoint instruction, our PC will point at the breakpoint
707 instruction. */
708
709static int
710check_stopped_by_breakpoint (struct lwp_info *lwp)
711{
712 CORE_ADDR pc;
713 CORE_ADDR sw_breakpoint_pc;
714 struct thread_info *saved_thread;
715#if USE_SIGTRAP_SIGINFO
716 siginfo_t siginfo;
717#endif
718
719 if (the_low_target.get_pc == NULL)
720 return 0;
721
722 pc = get_pc (lwp);
723 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
724
725 /* breakpoint_at reads from the current thread. */
726 saved_thread = current_thread;
727 current_thread = get_lwp_thread (lwp);
728
729#if USE_SIGTRAP_SIGINFO
730 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
731 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
732 {
733 if (siginfo.si_signo == SIGTRAP)
734 {
735 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
736 {
737 if (debug_threads)
738 {
739 struct thread_info *thr = get_lwp_thread (lwp);
740
741 debug_printf ("CSBB: %s stopped by software breakpoint\n",
742 target_pid_to_str (ptid_of (thr)));
743 }
744
745 /* Back up the PC if necessary. */
746 if (pc != sw_breakpoint_pc)
747 {
748 struct regcache *regcache
749 = get_thread_regcache (current_thread, 1);
750 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
751 }
752
753 lwp->stop_pc = sw_breakpoint_pc;
754 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
755 current_thread = saved_thread;
756 return 1;
757 }
758 else if (siginfo.si_code == TRAP_HWBKPT)
759 {
760 if (debug_threads)
761 {
762 struct thread_info *thr = get_lwp_thread (lwp);
763
764 debug_printf ("CSBB: %s stopped by hardware "
765 "breakpoint/watchpoint\n",
766 target_pid_to_str (ptid_of (thr)));
767 }
768
769 lwp->stop_pc = pc;
770 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
771 current_thread = saved_thread;
772 return 1;
773 }
774 else if (siginfo.si_code == TRAP_TRACE)
775 {
776 if (debug_threads)
777 {
778 struct thread_info *thr = get_lwp_thread (lwp);
779
780 debug_printf ("CSBB: %s stopped by trace\n",
781 target_pid_to_str (ptid_of (thr)));
782 }
783
784 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
785 }
786 }
787 }
788#else
789 /* We may have just stepped a breakpoint instruction. E.g., in
790 non-stop mode, GDB first tells the thread A to step a range, and
791 then the user inserts a breakpoint inside the range. In that
792 case we need to report the breakpoint PC. */
793 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
794 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
795 {
796 if (debug_threads)
797 {
798 struct thread_info *thr = get_lwp_thread (lwp);
799
800 debug_printf ("CSBB: %s stopped by software breakpoint\n",
801 target_pid_to_str (ptid_of (thr)));
802 }
803
804 /* Back up the PC if necessary. */
805 if (pc != sw_breakpoint_pc)
806 {
807 struct regcache *regcache
808 = get_thread_regcache (current_thread, 1);
809 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
810 }
811
812 lwp->stop_pc = sw_breakpoint_pc;
813 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
814 current_thread = saved_thread;
815 return 1;
816 }
817
818 if (hardware_breakpoint_inserted_here (pc))
819 {
820 if (debug_threads)
821 {
822 struct thread_info *thr = get_lwp_thread (lwp);
823
824 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
825 target_pid_to_str (ptid_of (thr)));
826 }
827
828 lwp->stop_pc = pc;
829 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
830 current_thread = saved_thread;
831 return 1;
832 }
833#endif
834
835 current_thread = saved_thread;
836 return 0;
837}
838
839static struct lwp_info *
840add_lwp (ptid_t ptid)
841{
842 struct lwp_info *lwp;
843
844 lwp = XCNEW (struct lwp_info);
845
846 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
847
848 if (the_low_target.new_thread != NULL)
849 the_low_target.new_thread (lwp);
850
851 lwp->thread = add_thread (ptid, lwp);
852
853 return lwp;
854}
855
856/* Start an inferior process and returns its pid.
857 ALLARGS is a vector of program-name and args. */
858
859static int
860linux_create_inferior (char *program, char **allargs)
861{
862 struct lwp_info *new_lwp;
863 int pid;
864 ptid_t ptid;
865 struct cleanup *restore_personality
866 = maybe_disable_address_space_randomization (disable_randomization);
867
868#if defined(__UCLIBC__) && defined(HAS_NOMMU)
869 pid = vfork ();
870#else
871 pid = fork ();
872#endif
873 if (pid < 0)
874 perror_with_name ("fork");
875
876 if (pid == 0)
877 {
878 close_most_fds ();
879 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
880
881#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
882 signal (__SIGRTMIN + 1, SIG_DFL);
883#endif
884
885 setpgid (0, 0);
886
887 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
888 stdout to stderr so that inferior i/o doesn't corrupt the connection.
889 Also, redirect stdin to /dev/null. */
890 if (remote_connection_is_stdio ())
891 {
892 close (0);
893 open ("/dev/null", O_RDONLY);
894 dup2 (2, 1);
895 if (write (2, "stdin/stdout redirected\n",
896 sizeof ("stdin/stdout redirected\n") - 1) < 0)
897 {
898 /* Errors ignored. */;
899 }
900 }
901
902 execv (program, allargs);
903 if (errno == ENOENT)
904 execvp (program, allargs);
905
906 fprintf (stderr, "Cannot exec %s: %s.\n", program,
907 strerror (errno));
908 fflush (stderr);
909 _exit (0177);
910 }
911
912 do_cleanups (restore_personality);
913
914 linux_add_process (pid, 0);
915
916 ptid = ptid_build (pid, pid, 0);
917 new_lwp = add_lwp (ptid);
918 new_lwp->must_set_ptrace_flags = 1;
919
920 return pid;
921}
922
923/* Implement the post_create_inferior target_ops method. */
924
925static void
926linux_post_create_inferior (void)
927{
928 struct lwp_info *lwp = get_thread_lwp (current_thread);
929
930 linux_arch_setup ();
931
932 if (lwp->must_set_ptrace_flags)
933 {
934 struct process_info *proc = current_process ();
935 int options = linux_low_ptrace_options (proc->attached);
936
937 linux_enable_event_reporting (lwpid_of (current_thread), options);
938 lwp->must_set_ptrace_flags = 0;
939 }
940}
941
942/* Attach to an inferior process. Returns 0 on success, ERRNO on
943 error. */
944
945int
946linux_attach_lwp (ptid_t ptid)
947{
948 struct lwp_info *new_lwp;
949 int lwpid = ptid_get_lwp (ptid);
950
951 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
952 != 0)
953 return errno;
954
955 new_lwp = add_lwp (ptid);
956
957 /* We need to wait for SIGSTOP before being able to make the next
958 ptrace call on this LWP. */
959 new_lwp->must_set_ptrace_flags = 1;
960
961 if (linux_proc_pid_is_stopped (lwpid))
962 {
963 if (debug_threads)
964 debug_printf ("Attached to a stopped process\n");
965
966 /* The process is definitely stopped. It is in a job control
967 stop, unless the kernel predates the TASK_STOPPED /
968 TASK_TRACED distinction, in which case it might be in a
969 ptrace stop. Make sure it is in a ptrace stop; from there we
970 can kill it, signal it, et cetera.
971
972 First make sure there is a pending SIGSTOP. Since we are
973 already attached, the process can not transition from stopped
974 to running without a PTRACE_CONT; so we know this signal will
975 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
976 probably already in the queue (unless this kernel is old
977 enough to use TASK_STOPPED for ptrace stops); but since
978 SIGSTOP is not an RT signal, it can only be queued once. */
979 kill_lwp (lwpid, SIGSTOP);
980
981 /* Finally, resume the stopped process. This will deliver the
982 SIGSTOP (or a higher priority signal, just like normal
983 PTRACE_ATTACH), which we'll catch later on. */
984 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
985 }
986
987 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
988 brings it to a halt.
989
990 There are several cases to consider here:
991
992 1) gdbserver has already attached to the process and is being notified
993 of a new thread that is being created.
994 In this case we should ignore that SIGSTOP and resume the
995 process. This is handled below by setting stop_expected = 1,
996 and the fact that add_thread sets last_resume_kind ==
997 resume_continue.
998
999 2) This is the first thread (the process thread), and we're attaching
1000 to it via attach_inferior.
1001 In this case we want the process thread to stop.
1002 This is handled by having linux_attach set last_resume_kind ==
1003 resume_stop after we return.
1004
1005 If the pid we are attaching to is also the tgid, we attach to and
1006 stop all the existing threads. Otherwise, we attach to pid and
1007 ignore any other threads in the same group as this pid.
1008
1009 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1010 existing threads.
1011 In this case we want the thread to stop.
1012 FIXME: This case is currently not properly handled.
1013 We should wait for the SIGSTOP but don't. Things work apparently
1014 because enough time passes between when we ptrace (ATTACH) and when
1015 gdb makes the next ptrace call on the thread.
1016
1017 On the other hand, if we are currently trying to stop all threads, we
1018 should treat the new thread as if we had sent it a SIGSTOP. This works
1019 because we are guaranteed that the add_lwp call above added us to the
1020 end of the list, and so the new thread has not yet reached
1021 wait_for_sigstop (but will). */
1022 new_lwp->stop_expected = 1;
1023
1024 return 0;
1025}
1026
1027/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1028 already attached. Returns true if a new LWP is found, false
1029 otherwise. */
1030
1031static int
1032attach_proc_task_lwp_callback (ptid_t ptid)
1033{
1034 /* Is this a new thread? */
1035 if (find_thread_ptid (ptid) == NULL)
1036 {
1037 int lwpid = ptid_get_lwp (ptid);
1038 int err;
1039
1040 if (debug_threads)
1041 debug_printf ("Found new lwp %d\n", lwpid);
1042
1043 err = linux_attach_lwp (ptid);
1044
1045 /* Be quiet if we simply raced with the thread exiting. EPERM
1046 is returned if the thread's task still exists, and is marked
1047 as exited or zombie, as well as other conditions, so in that
1048 case, confirm the status in /proc/PID/status. */
1049 if (err == ESRCH
1050 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1051 {
1052 if (debug_threads)
1053 {
1054 debug_printf ("Cannot attach to lwp %d: "
1055 "thread is gone (%d: %s)\n",
1056 lwpid, err, strerror (err));
1057 }
1058 }
1059 else if (err != 0)
1060 {
1061 warning (_("Cannot attach to lwp %d: %s"),
1062 lwpid,
1063 linux_ptrace_attach_fail_reason_string (ptid, err));
1064 }
1065
1066 return 1;
1067 }
1068 return 0;
1069}
1070
1071static void async_file_mark (void);
1072
1073/* Attach to PID. If PID is the tgid, attach to it and all
1074 of its threads. */
1075
1076static int
1077linux_attach (unsigned long pid)
1078{
1079 struct process_info *proc;
1080 struct thread_info *initial_thread;
1081 ptid_t ptid = ptid_build (pid, pid, 0);
1082 int err;
1083
1084 /* Attach to PID. We will check for other threads
1085 soon. */
1086 err = linux_attach_lwp (ptid);
1087 if (err != 0)
1088 error ("Cannot attach to process %ld: %s",
1089 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1090
1091 proc = linux_add_process (pid, 1);
1092
1093 /* Don't ignore the initial SIGSTOP if we just attached to this
1094 process. It will be collected by wait shortly. */
1095 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1096 initial_thread->last_resume_kind = resume_stop;
1097
1098 /* We must attach to every LWP. If /proc is mounted, use that to
1099 find them now. On the one hand, the inferior may be using raw
1100 clone instead of using pthreads. On the other hand, even if it
1101 is using pthreads, GDB may not be connected yet (thread_db needs
1102 to do symbol lookups, through qSymbol). Also, thread_db walks
1103 structures in the inferior's address space to find the list of
1104 threads/LWPs, and those structures may well be corrupted. Note
1105 that once thread_db is loaded, we'll still use it to list threads
1106 and associate pthread info with each LWP. */
1107 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1108
1109 /* GDB will shortly read the xml target description for this
1110 process, to figure out the process' architecture. But the target
1111 description is only filled in when the first process/thread in
1112 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1113 that now, otherwise, if GDB is fast enough, it could read the
1114 target description _before_ that initial stop. */
1115 if (non_stop)
1116 {
1117 struct lwp_info *lwp;
1118 int wstat, lwpid;
1119 ptid_t pid_ptid = pid_to_ptid (pid);
1120
1121 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1122 &wstat, __WALL);
1123 gdb_assert (lwpid > 0);
1124
1125 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1126
1127 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1128 {
1129 lwp->status_pending_p = 1;
1130 lwp->status_pending = wstat;
1131 }
1132
1133 initial_thread->last_resume_kind = resume_continue;
1134
1135 async_file_mark ();
1136
1137 gdb_assert (proc->tdesc != NULL);
1138 }
1139
1140 return 0;
1141}
1142
1143struct counter
1144{
1145 int pid;
1146 int count;
1147};
1148
1149static int
1150second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1151{
1152 struct counter *counter = (struct counter *) args;
1153
1154 if (ptid_get_pid (entry->id) == counter->pid)
1155 {
1156 if (++counter->count > 1)
1157 return 1;
1158 }
1159
1160 return 0;
1161}
1162
1163static int
1164last_thread_of_process_p (int pid)
1165{
1166 struct counter counter = { pid , 0 };
1167
1168 return (find_inferior (&all_threads,
1169 second_thread_of_pid_p, &counter) == NULL);
1170}
1171
1172/* Kill LWP. */
1173
1174static void
1175linux_kill_one_lwp (struct lwp_info *lwp)
1176{
1177 struct thread_info *thr = get_lwp_thread (lwp);
1178 int pid = lwpid_of (thr);
1179
1180 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1181 there is no signal context, and ptrace(PTRACE_KILL) (or
1182 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1183 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1184 alternative is to kill with SIGKILL. We only need one SIGKILL
1185 per process, not one for each thread. But since we still support
1186 linuxthreads, and we also support debugging programs using raw
1187 clone without CLONE_THREAD, we send one for each thread. For
1188 years, we used PTRACE_KILL only, so we're being a bit paranoid
1189 about some old kernels where PTRACE_KILL might work better
1190 (dubious if there are any such, but that's why it's paranoia), so
1191 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1192 everywhere. */
1193
1194 errno = 0;
1195 kill_lwp (pid, SIGKILL);
1196 if (debug_threads)
1197 {
1198 int save_errno = errno;
1199
1200 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1201 target_pid_to_str (ptid_of (thr)),
1202 save_errno ? strerror (save_errno) : "OK");
1203 }
1204
1205 errno = 0;
1206 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1207 if (debug_threads)
1208 {
1209 int save_errno = errno;
1210
1211 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1212 target_pid_to_str (ptid_of (thr)),
1213 save_errno ? strerror (save_errno) : "OK");
1214 }
1215}
1216
1217/* Kill LWP and wait for it to die. */
1218
1219static void
1220kill_wait_lwp (struct lwp_info *lwp)
1221{
1222 struct thread_info *thr = get_lwp_thread (lwp);
1223 int pid = ptid_get_pid (ptid_of (thr));
1224 int lwpid = ptid_get_lwp (ptid_of (thr));
1225 int wstat;
1226 int res;
1227
1228 if (debug_threads)
1229 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1230
1231 do
1232 {
1233 linux_kill_one_lwp (lwp);
1234
1235 /* Make sure it died. Notes:
1236
1237 - The loop is most likely unnecessary.
1238
1239 - We don't use linux_wait_for_event as that could delete lwps
1240 while we're iterating over them. We're not interested in
1241 any pending status at this point, only in making sure all
1242 wait status on the kernel side are collected until the
1243 process is reaped.
1244
1245 - We don't use __WALL here as the __WALL emulation relies on
1246 SIGCHLD, and killing a stopped process doesn't generate
1247 one, nor an exit status.
1248 */
1249 res = my_waitpid (lwpid, &wstat, 0);
1250 if (res == -1 && errno == ECHILD)
1251 res = my_waitpid (lwpid, &wstat, __WCLONE);
1252 } while (res > 0 && WIFSTOPPED (wstat));
1253
1254 /* Even if it was stopped, the child may have already disappeared.
1255 E.g., if it was killed by SIGKILL. */
1256 if (res < 0 && errno != ECHILD)
1257 perror_with_name ("kill_wait_lwp");
1258}
1259
1260/* Callback for `find_inferior'. Kills an lwp of a given process,
1261 except the leader. */
1262
1263static int
1264kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1265{
1266 struct thread_info *thread = (struct thread_info *) entry;
1267 struct lwp_info *lwp = get_thread_lwp (thread);
1268 int pid = * (int *) args;
1269
1270 if (ptid_get_pid (entry->id) != pid)
1271 return 0;
1272
1273 /* We avoid killing the first thread here, because of a Linux kernel (at
1274 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1275 the children get a chance to be reaped, it will remain a zombie
1276 forever. */
1277
1278 if (lwpid_of (thread) == pid)
1279 {
1280 if (debug_threads)
1281 debug_printf ("lkop: is last of process %s\n",
1282 target_pid_to_str (entry->id));
1283 return 0;
1284 }
1285
1286 kill_wait_lwp (lwp);
1287 return 0;
1288}
1289
1290static int
1291linux_kill (int pid)
1292{
1293 struct process_info *process;
1294 struct lwp_info *lwp;
1295
1296 process = find_process_pid (pid);
1297 if (process == NULL)
1298 return -1;
1299
1300 /* If we're killing a running inferior, make sure it is stopped
1301 first, as PTRACE_KILL will not work otherwise. */
1302 stop_all_lwps (0, NULL);
1303
1304 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1305
1306 /* See the comment in linux_kill_one_lwp. We did not kill the first
1307 thread in the list, so do so now. */
1308 lwp = find_lwp_pid (pid_to_ptid (pid));
1309
1310 if (lwp == NULL)
1311 {
1312 if (debug_threads)
1313 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1314 pid);
1315 }
1316 else
1317 kill_wait_lwp (lwp);
1318
1319 the_target->mourn (process);
1320
1321 /* Since we presently can only stop all lwps of all processes, we
1322 need to unstop lwps of other processes. */
1323 unstop_all_lwps (0, NULL);
1324 return 0;
1325}
1326
1327/* Get pending signal of THREAD, for detaching purposes. This is the
1328 signal the thread last stopped for, which we need to deliver to the
1329 thread when detaching, otherwise, it'd be suppressed/lost. */
1330
1331static int
1332get_detach_signal (struct thread_info *thread)
1333{
1334 enum gdb_signal signo = GDB_SIGNAL_0;
1335 int status;
1336 struct lwp_info *lp = get_thread_lwp (thread);
1337
1338 if (lp->status_pending_p)
1339 status = lp->status_pending;
1340 else
1341 {
1342 /* If the thread had been suspended by gdbserver, and it stopped
1343 cleanly, then it'll have stopped with SIGSTOP. But we don't
1344 want to deliver that SIGSTOP. */
1345 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1346 || thread->last_status.value.sig == GDB_SIGNAL_0)
1347 return 0;
1348
1349 /* Otherwise, we may need to deliver the signal we
1350 intercepted. */
1351 status = lp->last_status;
1352 }
1353
1354 if (!WIFSTOPPED (status))
1355 {
1356 if (debug_threads)
1357 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1358 target_pid_to_str (ptid_of (thread)));
1359 return 0;
1360 }
1361
1362 /* Extended wait statuses aren't real SIGTRAPs. */
1363 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1364 {
1365 if (debug_threads)
1366 debug_printf ("GPS: lwp %s had stopped with extended "
1367 "status: no pending signal\n",
1368 target_pid_to_str (ptid_of (thread)));
1369 return 0;
1370 }
1371
1372 signo = gdb_signal_from_host (WSTOPSIG (status));
1373
1374 if (program_signals_p && !program_signals[signo])
1375 {
1376 if (debug_threads)
1377 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1378 target_pid_to_str (ptid_of (thread)),
1379 gdb_signal_to_string (signo));
1380 return 0;
1381 }
1382 else if (!program_signals_p
1383 /* If we have no way to know which signals GDB does not
1384 want to have passed to the program, assume
1385 SIGTRAP/SIGINT, which is GDB's default. */
1386 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1387 {
1388 if (debug_threads)
1389 debug_printf ("GPS: lwp %s had signal %s, "
1390 "but we don't know if we should pass it. "
1391 "Default to not.\n",
1392 target_pid_to_str (ptid_of (thread)),
1393 gdb_signal_to_string (signo));
1394 return 0;
1395 }
1396 else
1397 {
1398 if (debug_threads)
1399 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1400 target_pid_to_str (ptid_of (thread)),
1401 gdb_signal_to_string (signo));
1402
1403 return WSTOPSIG (status);
1404 }
1405}
1406
1407static int
1408linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1409{
1410 struct thread_info *thread = (struct thread_info *) entry;
1411 struct lwp_info *lwp = get_thread_lwp (thread);
1412 int pid = * (int *) args;
1413 int sig;
1414
1415 if (ptid_get_pid (entry->id) != pid)
1416 return 0;
1417
1418 /* If there is a pending SIGSTOP, get rid of it. */
1419 if (lwp->stop_expected)
1420 {
1421 if (debug_threads)
1422 debug_printf ("Sending SIGCONT to %s\n",
1423 target_pid_to_str (ptid_of (thread)));
1424
1425 kill_lwp (lwpid_of (thread), SIGCONT);
1426 lwp->stop_expected = 0;
1427 }
1428
1429 /* Flush any pending changes to the process's registers. */
1430 regcache_invalidate_thread (thread);
1431
1432 /* Pass on any pending signal for this thread. */
1433 sig = get_detach_signal (thread);
1434
1435 /* Finally, let it resume. */
1436 if (the_low_target.prepare_to_resume != NULL)
1437 the_low_target.prepare_to_resume (lwp);
1438 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1439 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1440 error (_("Can't detach %s: %s"),
1441 target_pid_to_str (ptid_of (thread)),
1442 strerror (errno));
1443
1444 delete_lwp (lwp);
1445 return 0;
1446}
1447
1448static int
1449linux_detach (int pid)
1450{
1451 struct process_info *process;
1452
1453 process = find_process_pid (pid);
1454 if (process == NULL)
1455 return -1;
1456
1457 /* As there's a step over already in progress, let it finish first,
1458 otherwise nesting a stabilize_threads operation on top gets real
1459 messy. */
1460 complete_ongoing_step_over ();
1461
1462 /* Stop all threads before detaching. First, ptrace requires that
1463 the thread is stopped to sucessfully detach. Second, thread_db
1464 may need to uninstall thread event breakpoints from memory, which
1465 only works with a stopped process anyway. */
1466 stop_all_lwps (0, NULL);
1467
1468#ifdef USE_THREAD_DB
1469 thread_db_detach (process);
1470#endif
1471
1472 /* Stabilize threads (move out of jump pads). */
1473 stabilize_threads ();
1474
1475 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1476
1477 the_target->mourn (process);
1478
1479 /* Since we presently can only stop all lwps of all processes, we
1480 need to unstop lwps of other processes. */
1481 unstop_all_lwps (0, NULL);
1482 return 0;
1483}
1484
1485/* Remove all LWPs that belong to process PROC from the lwp list. */
1486
1487static int
1488delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1489{
1490 struct thread_info *thread = (struct thread_info *) entry;
1491 struct lwp_info *lwp = get_thread_lwp (thread);
1492 struct process_info *process = (struct process_info *) proc;
1493
1494 if (pid_of (thread) == pid_of (process))
1495 delete_lwp (lwp);
1496
1497 return 0;
1498}
1499
1500static void
1501linux_mourn (struct process_info *process)
1502{
1503 struct process_info_private *priv;
1504
1505#ifdef USE_THREAD_DB
1506 thread_db_mourn (process);
1507#endif
1508
1509 find_inferior (&all_threads, delete_lwp_callback, process);
1510
1511 /* Freeing all private data. */
1512 priv = process->priv;
1513 free (priv->arch_private);
1514 free (priv);
1515 process->priv = NULL;
1516
1517 remove_process (process);
1518}
1519
1520static void
1521linux_join (int pid)
1522{
1523 int status, ret;
1524
1525 do {
1526 ret = my_waitpid (pid, &status, 0);
1527 if (WIFEXITED (status) || WIFSIGNALED (status))
1528 break;
1529 } while (ret != -1 || errno != ECHILD);
1530}
1531
1532/* Return nonzero if the given thread is still alive. */
1533static int
1534linux_thread_alive (ptid_t ptid)
1535{
1536 struct lwp_info *lwp = find_lwp_pid (ptid);
1537
1538 /* We assume we always know if a thread exits. If a whole process
1539 exited but we still haven't been able to report it to GDB, we'll
1540 hold on to the last lwp of the dead process. */
1541 if (lwp != NULL)
1542 return !lwp_is_marked_dead (lwp);
1543 else
1544 return 0;
1545}
1546
1547/* Return 1 if this lwp still has an interesting status pending. If
1548 not (e.g., it had stopped for a breakpoint that is gone), return
1549 false. */
1550
1551static int
1552thread_still_has_status_pending_p (struct thread_info *thread)
1553{
1554 struct lwp_info *lp = get_thread_lwp (thread);
1555
1556 if (!lp->status_pending_p)
1557 return 0;
1558
1559 if (thread->last_resume_kind != resume_stop
1560 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1561 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1562 {
1563 struct thread_info *saved_thread;
1564 CORE_ADDR pc;
1565 int discard = 0;
1566
1567 gdb_assert (lp->last_status != 0);
1568
1569 pc = get_pc (lp);
1570
1571 saved_thread = current_thread;
1572 current_thread = thread;
1573
1574 if (pc != lp->stop_pc)
1575 {
1576 if (debug_threads)
1577 debug_printf ("PC of %ld changed\n",
1578 lwpid_of (thread));
1579 discard = 1;
1580 }
1581
1582#if !USE_SIGTRAP_SIGINFO
1583 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1584 && !(*the_low_target.breakpoint_at) (pc))
1585 {
1586 if (debug_threads)
1587 debug_printf ("previous SW breakpoint of %ld gone\n",
1588 lwpid_of (thread));
1589 discard = 1;
1590 }
1591 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1592 && !hardware_breakpoint_inserted_here (pc))
1593 {
1594 if (debug_threads)
1595 debug_printf ("previous HW breakpoint of %ld gone\n",
1596 lwpid_of (thread));
1597 discard = 1;
1598 }
1599#endif
1600
1601 current_thread = saved_thread;
1602
1603 if (discard)
1604 {
1605 if (debug_threads)
1606 debug_printf ("discarding pending breakpoint status\n");
1607 lp->status_pending_p = 0;
1608 return 0;
1609 }
1610 }
1611
1612 return 1;
1613}
1614
1615/* Returns true if LWP is resumed from the client's perspective. */
1616
1617static int
1618lwp_resumed (struct lwp_info *lwp)
1619{
1620 struct thread_info *thread = get_lwp_thread (lwp);
1621
1622 if (thread->last_resume_kind != resume_stop)
1623 return 1;
1624
1625 /* Did gdb send us a `vCont;t', but we haven't reported the
1626 corresponding stop to gdb yet? If so, the thread is still
1627 resumed/running from gdb's perspective. */
1628 if (thread->last_resume_kind == resume_stop
1629 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1630 return 1;
1631
1632 return 0;
1633}
1634
1635/* Return 1 if this lwp has an interesting status pending. */
1636static int
1637status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1638{
1639 struct thread_info *thread = (struct thread_info *) entry;
1640 struct lwp_info *lp = get_thread_lwp (thread);
1641 ptid_t ptid = * (ptid_t *) arg;
1642
1643 /* Check if we're only interested in events from a specific process
1644 or a specific LWP. */
1645 if (!ptid_match (ptid_of (thread), ptid))
1646 return 0;
1647
1648 if (!lwp_resumed (lp))
1649 return 0;
1650
1651 if (lp->status_pending_p
1652 && !thread_still_has_status_pending_p (thread))
1653 {
1654 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1655 return 0;
1656 }
1657
1658 return lp->status_pending_p;
1659}
1660
1661static int
1662same_lwp (struct inferior_list_entry *entry, void *data)
1663{
1664 ptid_t ptid = *(ptid_t *) data;
1665 int lwp;
1666
1667 if (ptid_get_lwp (ptid) != 0)
1668 lwp = ptid_get_lwp (ptid);
1669 else
1670 lwp = ptid_get_pid (ptid);
1671
1672 if (ptid_get_lwp (entry->id) == lwp)
1673 return 1;
1674
1675 return 0;
1676}
1677
1678struct lwp_info *
1679find_lwp_pid (ptid_t ptid)
1680{
1681 struct inferior_list_entry *thread
1682 = find_inferior (&all_threads, same_lwp, &ptid);
1683
1684 if (thread == NULL)
1685 return NULL;
1686
1687 return get_thread_lwp ((struct thread_info *) thread);
1688}
1689
1690/* Return the number of known LWPs in the tgid given by PID. */
1691
1692static int
1693num_lwps (int pid)
1694{
1695 struct inferior_list_entry *inf, *tmp;
1696 int count = 0;
1697
1698 ALL_INFERIORS (&all_threads, inf, tmp)
1699 {
1700 if (ptid_get_pid (inf->id) == pid)
1701 count++;
1702 }
1703
1704 return count;
1705}
1706
1707/* The arguments passed to iterate_over_lwps. */
1708
1709struct iterate_over_lwps_args
1710{
1711 /* The FILTER argument passed to iterate_over_lwps. */
1712 ptid_t filter;
1713
1714 /* The CALLBACK argument passed to iterate_over_lwps. */
1715 iterate_over_lwps_ftype *callback;
1716
1717 /* The DATA argument passed to iterate_over_lwps. */
1718 void *data;
1719};
1720
1721/* Callback for find_inferior used by iterate_over_lwps to filter
1722 calls to the callback supplied to that function. Returning a
1723 nonzero value causes find_inferiors to stop iterating and return
1724 the current inferior_list_entry. Returning zero indicates that
1725 find_inferiors should continue iterating. */
1726
1727static int
1728iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1729{
1730 struct iterate_over_lwps_args *args
1731 = (struct iterate_over_lwps_args *) args_p;
1732
1733 if (ptid_match (entry->id, args->filter))
1734 {
1735 struct thread_info *thr = (struct thread_info *) entry;
1736 struct lwp_info *lwp = get_thread_lwp (thr);
1737
1738 return (*args->callback) (lwp, args->data);
1739 }
1740
1741 return 0;
1742}
1743
1744/* See nat/linux-nat.h. */
1745
1746struct lwp_info *
1747iterate_over_lwps (ptid_t filter,
1748 iterate_over_lwps_ftype callback,
1749 void *data)
1750{
1751 struct iterate_over_lwps_args args = {filter, callback, data};
1752 struct inferior_list_entry *entry;
1753
1754 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1755 if (entry == NULL)
1756 return NULL;
1757
1758 return get_thread_lwp ((struct thread_info *) entry);
1759}
1760
1761/* Detect zombie thread group leaders, and "exit" them. We can't reap
1762 their exits until all other threads in the group have exited. */
1763
1764static void
1765check_zombie_leaders (void)
1766{
1767 struct process_info *proc, *tmp;
1768
1769 ALL_PROCESSES (proc, tmp)
1770 {
1771 pid_t leader_pid = pid_of (proc);
1772 struct lwp_info *leader_lp;
1773
1774 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1775
1776 if (debug_threads)
1777 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1778 "num_lwps=%d, zombie=%d\n",
1779 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1780 linux_proc_pid_is_zombie (leader_pid));
1781
1782 if (leader_lp != NULL && !leader_lp->stopped
1783 /* Check if there are other threads in the group, as we may
1784 have raced with the inferior simply exiting. */
1785 && !last_thread_of_process_p (leader_pid)
1786 && linux_proc_pid_is_zombie (leader_pid))
1787 {
1788 /* A leader zombie can mean one of two things:
1789
1790 - It exited, and there's an exit status pending
1791 available, or only the leader exited (not the whole
1792 program). In the latter case, we can't waitpid the
1793 leader's exit status until all other threads are gone.
1794
1795 - There are 3 or more threads in the group, and a thread
1796 other than the leader exec'd. On an exec, the Linux
1797 kernel destroys all other threads (except the execing
1798 one) in the thread group, and resets the execing thread's
1799 tid to the tgid. No exit notification is sent for the
1800 execing thread -- from the ptracer's perspective, it
1801 appears as though the execing thread just vanishes.
1802 Until we reap all other threads except the leader and the
1803 execing thread, the leader will be zombie, and the
1804 execing thread will be in `D (disc sleep)'. As soon as
1805 all other threads are reaped, the execing thread changes
1806 it's tid to the tgid, and the previous (zombie) leader
1807 vanishes, giving place to the "new" leader. We could try
1808 distinguishing the exit and exec cases, by waiting once
1809 more, and seeing if something comes out, but it doesn't
1810 sound useful. The previous leader _does_ go away, and
1811 we'll re-add the new one once we see the exec event
1812 (which is just the same as what would happen if the
1813 previous leader did exit voluntarily before some other
1814 thread execs). */
1815
1816 if (debug_threads)
1817 fprintf (stderr,
1818 "CZL: Thread group leader %d zombie "
1819 "(it exited, or another thread execd).\n",
1820 leader_pid);
1821
1822 delete_lwp (leader_lp);
1823 }
1824 }
1825}
1826
1827/* Callback for `find_inferior'. Returns the first LWP that is not
1828 stopped. ARG is a PTID filter. */
1829
1830static int
1831not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1832{
1833 struct thread_info *thr = (struct thread_info *) entry;
1834 struct lwp_info *lwp;
1835 ptid_t filter = *(ptid_t *) arg;
1836
1837 if (!ptid_match (ptid_of (thr), filter))
1838 return 0;
1839
1840 lwp = get_thread_lwp (thr);
1841 if (!lwp->stopped)
1842 return 1;
1843
1844 return 0;
1845}
1846
1847/* Increment LWP's suspend count. */
1848
1849static void
1850lwp_suspended_inc (struct lwp_info *lwp)
1851{
1852 lwp->suspended++;
1853
1854 if (debug_threads && lwp->suspended > 4)
1855 {
1856 struct thread_info *thread = get_lwp_thread (lwp);
1857
1858 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1859 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1860 }
1861}
1862
1863/* Decrement LWP's suspend count. */
1864
1865static void
1866lwp_suspended_decr (struct lwp_info *lwp)
1867{
1868 lwp->suspended--;
1869
1870 if (lwp->suspended < 0)
1871 {
1872 struct thread_info *thread = get_lwp_thread (lwp);
1873
1874 internal_error (__FILE__, __LINE__,
1875 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1876 lwp->suspended);
1877 }
1878}
1879
1880/* This function should only be called if the LWP got a SIGTRAP.
1881
1882 Handle any tracepoint steps or hits. Return true if a tracepoint
1883 event was handled, 0 otherwise. */
1884
1885static int
1886handle_tracepoints (struct lwp_info *lwp)
1887{
1888 struct thread_info *tinfo = get_lwp_thread (lwp);
1889 int tpoint_related_event = 0;
1890
1891 gdb_assert (lwp->suspended == 0);
1892
1893 /* If this tracepoint hit causes a tracing stop, we'll immediately
1894 uninsert tracepoints. To do this, we temporarily pause all
1895 threads, unpatch away, and then unpause threads. We need to make
1896 sure the unpausing doesn't resume LWP too. */
1897 lwp_suspended_inc (lwp);
1898
1899 /* And we need to be sure that any all-threads-stopping doesn't try
1900 to move threads out of the jump pads, as it could deadlock the
1901 inferior (LWP could be in the jump pad, maybe even holding the
1902 lock.) */
1903
1904 /* Do any necessary step collect actions. */
1905 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1906
1907 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1908
1909 /* See if we just hit a tracepoint and do its main collect
1910 actions. */
1911 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1912
1913 lwp_suspended_decr (lwp);
1914
1915 gdb_assert (lwp->suspended == 0);
1916 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1917
1918 if (tpoint_related_event)
1919 {
1920 if (debug_threads)
1921 debug_printf ("got a tracepoint event\n");
1922 return 1;
1923 }
1924
1925 return 0;
1926}
1927
1928/* Convenience wrapper. Returns true if LWP is presently collecting a
1929 fast tracepoint. */
1930
1931static int
1932linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1933 struct fast_tpoint_collect_status *status)
1934{
1935 CORE_ADDR thread_area;
1936 struct thread_info *thread = get_lwp_thread (lwp);
1937
1938 if (the_low_target.get_thread_area == NULL)
1939 return 0;
1940
1941 /* Get the thread area address. This is used to recognize which
1942 thread is which when tracing with the in-process agent library.
1943 We don't read anything from the address, and treat it as opaque;
1944 it's the address itself that we assume is unique per-thread. */
1945 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1946 return 0;
1947
1948 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1949}
1950
1951/* The reason we resume in the caller, is because we want to be able
1952 to pass lwp->status_pending as WSTAT, and we need to clear
1953 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1954 refuses to resume. */
1955
1956static int
1957maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1958{
1959 struct thread_info *saved_thread;
1960
1961 saved_thread = current_thread;
1962 current_thread = get_lwp_thread (lwp);
1963
1964 if ((wstat == NULL
1965 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1966 && supports_fast_tracepoints ()
1967 && agent_loaded_p ())
1968 {
1969 struct fast_tpoint_collect_status status;
1970 int r;
1971
1972 if (debug_threads)
1973 debug_printf ("Checking whether LWP %ld needs to move out of the "
1974 "jump pad.\n",
1975 lwpid_of (current_thread));
1976
1977 r = linux_fast_tracepoint_collecting (lwp, &status);
1978
1979 if (wstat == NULL
1980 || (WSTOPSIG (*wstat) != SIGILL
1981 && WSTOPSIG (*wstat) != SIGFPE
1982 && WSTOPSIG (*wstat) != SIGSEGV
1983 && WSTOPSIG (*wstat) != SIGBUS))
1984 {
1985 lwp->collecting_fast_tracepoint = r;
1986
1987 if (r != 0)
1988 {
1989 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1990 {
1991 /* Haven't executed the original instruction yet.
1992 Set breakpoint there, and wait till it's hit,
1993 then single-step until exiting the jump pad. */
1994 lwp->exit_jump_pad_bkpt
1995 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1996 }
1997
1998 if (debug_threads)
1999 debug_printf ("Checking whether LWP %ld needs to move out of "
2000 "the jump pad...it does\n",
2001 lwpid_of (current_thread));
2002 current_thread = saved_thread;
2003
2004 return 1;
2005 }
2006 }
2007 else
2008 {
2009 /* If we get a synchronous signal while collecting, *and*
2010 while executing the (relocated) original instruction,
2011 reset the PC to point at the tpoint address, before
2012 reporting to GDB. Otherwise, it's an IPA lib bug: just
2013 report the signal to GDB, and pray for the best. */
2014
2015 lwp->collecting_fast_tracepoint = 0;
2016
2017 if (r != 0
2018 && (status.adjusted_insn_addr <= lwp->stop_pc
2019 && lwp->stop_pc < status.adjusted_insn_addr_end))
2020 {
2021 siginfo_t info;
2022 struct regcache *regcache;
2023
2024 /* The si_addr on a few signals references the address
2025 of the faulting instruction. Adjust that as
2026 well. */
2027 if ((WSTOPSIG (*wstat) == SIGILL
2028 || WSTOPSIG (*wstat) == SIGFPE
2029 || WSTOPSIG (*wstat) == SIGBUS
2030 || WSTOPSIG (*wstat) == SIGSEGV)
2031 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2032 (PTRACE_TYPE_ARG3) 0, &info) == 0
2033 /* Final check just to make sure we don't clobber
2034 the siginfo of non-kernel-sent signals. */
2035 && (uintptr_t) info.si_addr == lwp->stop_pc)
2036 {
2037 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2038 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2039 (PTRACE_TYPE_ARG3) 0, &info);
2040 }
2041
2042 regcache = get_thread_regcache (current_thread, 1);
2043 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2044 lwp->stop_pc = status.tpoint_addr;
2045
2046 /* Cancel any fast tracepoint lock this thread was
2047 holding. */
2048 force_unlock_trace_buffer ();
2049 }
2050
2051 if (lwp->exit_jump_pad_bkpt != NULL)
2052 {
2053 if (debug_threads)
2054 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2055 "stopping all threads momentarily.\n");
2056
2057 stop_all_lwps (1, lwp);
2058
2059 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2060 lwp->exit_jump_pad_bkpt = NULL;
2061
2062 unstop_all_lwps (1, lwp);
2063
2064 gdb_assert (lwp->suspended >= 0);
2065 }
2066 }
2067 }
2068
2069 if (debug_threads)
2070 debug_printf ("Checking whether LWP %ld needs to move out of the "
2071 "jump pad...no\n",
2072 lwpid_of (current_thread));
2073
2074 current_thread = saved_thread;
2075 return 0;
2076}
2077
2078/* Enqueue one signal in the "signals to report later when out of the
2079 jump pad" list. */
2080
2081static void
2082enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2083{
2084 struct pending_signals *p_sig;
2085 struct thread_info *thread = get_lwp_thread (lwp);
2086
2087 if (debug_threads)
2088 debug_printf ("Deferring signal %d for LWP %ld.\n",
2089 WSTOPSIG (*wstat), lwpid_of (thread));
2090
2091 if (debug_threads)
2092 {
2093 struct pending_signals *sig;
2094
2095 for (sig = lwp->pending_signals_to_report;
2096 sig != NULL;
2097 sig = sig->prev)
2098 debug_printf (" Already queued %d\n",
2099 sig->signal);
2100
2101 debug_printf (" (no more currently queued signals)\n");
2102 }
2103
2104 /* Don't enqueue non-RT signals if they are already in the deferred
2105 queue. (SIGSTOP being the easiest signal to see ending up here
2106 twice) */
2107 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2108 {
2109 struct pending_signals *sig;
2110
2111 for (sig = lwp->pending_signals_to_report;
2112 sig != NULL;
2113 sig = sig->prev)
2114 {
2115 if (sig->signal == WSTOPSIG (*wstat))
2116 {
2117 if (debug_threads)
2118 debug_printf ("Not requeuing already queued non-RT signal %d"
2119 " for LWP %ld\n",
2120 sig->signal,
2121 lwpid_of (thread));
2122 return;
2123 }
2124 }
2125 }
2126
2127 p_sig = XCNEW (struct pending_signals);
2128 p_sig->prev = lwp->pending_signals_to_report;
2129 p_sig->signal = WSTOPSIG (*wstat);
2130
2131 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2132 &p_sig->info);
2133
2134 lwp->pending_signals_to_report = p_sig;
2135}
2136
2137/* Dequeue one signal from the "signals to report later when out of
2138 the jump pad" list. */
2139
2140static int
2141dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2142{
2143 struct thread_info *thread = get_lwp_thread (lwp);
2144
2145 if (lwp->pending_signals_to_report != NULL)
2146 {
2147 struct pending_signals **p_sig;
2148
2149 p_sig = &lwp->pending_signals_to_report;
2150 while ((*p_sig)->prev != NULL)
2151 p_sig = &(*p_sig)->prev;
2152
2153 *wstat = W_STOPCODE ((*p_sig)->signal);
2154 if ((*p_sig)->info.si_signo != 0)
2155 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2156 &(*p_sig)->info);
2157 free (*p_sig);
2158 *p_sig = NULL;
2159
2160 if (debug_threads)
2161 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2162 WSTOPSIG (*wstat), lwpid_of (thread));
2163
2164 if (debug_threads)
2165 {
2166 struct pending_signals *sig;
2167
2168 for (sig = lwp->pending_signals_to_report;
2169 sig != NULL;
2170 sig = sig->prev)
2171 debug_printf (" Still queued %d\n",
2172 sig->signal);
2173
2174 debug_printf (" (no more queued signals)\n");
2175 }
2176
2177 return 1;
2178 }
2179
2180 return 0;
2181}
2182
2183/* Fetch the possibly triggered data watchpoint info and store it in
2184 CHILD.
2185
2186 On some archs, like x86, that use debug registers to set
2187 watchpoints, it's possible that the way to know which watched
2188 address trapped, is to check the register that is used to select
2189 which address to watch. Problem is, between setting the watchpoint
2190 and reading back which data address trapped, the user may change
2191 the set of watchpoints, and, as a consequence, GDB changes the
2192 debug registers in the inferior. To avoid reading back a stale
2193 stopped-data-address when that happens, we cache in LP the fact
2194 that a watchpoint trapped, and the corresponding data address, as
2195 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2196 registers meanwhile, we have the cached data we can rely on. */
2197
2198static int
2199check_stopped_by_watchpoint (struct lwp_info *child)
2200{
2201 if (the_low_target.stopped_by_watchpoint != NULL)
2202 {
2203 struct thread_info *saved_thread;
2204
2205 saved_thread = current_thread;
2206 current_thread = get_lwp_thread (child);
2207
2208 if (the_low_target.stopped_by_watchpoint ())
2209 {
2210 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2211
2212 if (the_low_target.stopped_data_address != NULL)
2213 child->stopped_data_address
2214 = the_low_target.stopped_data_address ();
2215 else
2216 child->stopped_data_address = 0;
2217 }
2218
2219 current_thread = saved_thread;
2220 }
2221
2222 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2223}
2224
2225/* Return the ptrace options that we want to try to enable. */
2226
2227static int
2228linux_low_ptrace_options (int attached)
2229{
2230 int options = 0;
2231
2232 if (!attached)
2233 options |= PTRACE_O_EXITKILL;
2234
2235 if (report_fork_events)
2236 options |= PTRACE_O_TRACEFORK;
2237
2238 if (report_vfork_events)
2239 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2240
2241 if (report_exec_events)
2242 options |= PTRACE_O_TRACEEXEC;
2243
2244 return options;
2245}
2246
2247/* Do low-level handling of the event, and check if we should go on
2248 and pass it to caller code. Return the affected lwp if we are, or
2249 NULL otherwise. */
2250
2251static struct lwp_info *
2252linux_low_filter_event (int lwpid, int wstat)
2253{
2254 struct lwp_info *child;
2255 struct thread_info *thread;
2256 int have_stop_pc = 0;
2257
2258 child = find_lwp_pid (pid_to_ptid (lwpid));
2259
2260 /* Check for stop events reported by a process we didn't already
2261 know about - anything not already in our LWP list.
2262
2263 If we're expecting to receive stopped processes after
2264 fork, vfork, and clone events, then we'll just add the
2265 new one to our list and go back to waiting for the event
2266 to be reported - the stopped process might be returned
2267 from waitpid before or after the event is.
2268
2269 But note the case of a non-leader thread exec'ing after the
2270 leader having exited, and gone from our lists (because
2271 check_zombie_leaders deleted it). The non-leader thread
2272 changes its tid to the tgid. */
2273
2274 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2275 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2276 {
2277 ptid_t child_ptid;
2278
2279 /* A multi-thread exec after we had seen the leader exiting. */
2280 if (debug_threads)
2281 {
2282 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2283 "after exec.\n", lwpid);
2284 }
2285
2286 child_ptid = ptid_build (lwpid, lwpid, 0);
2287 child = add_lwp (child_ptid);
2288 child->stopped = 1;
2289 current_thread = child->thread;
2290 }
2291
2292 /* If we didn't find a process, one of two things presumably happened:
2293 - A process we started and then detached from has exited. Ignore it.
2294 - A process we are controlling has forked and the new child's stop
2295 was reported to us by the kernel. Save its PID. */
2296 if (child == NULL && WIFSTOPPED (wstat))
2297 {
2298 add_to_pid_list (&stopped_pids, lwpid, wstat);
2299 return NULL;
2300 }
2301 else if (child == NULL)
2302 return NULL;
2303
2304 thread = get_lwp_thread (child);
2305
2306 child->stopped = 1;
2307
2308 child->last_status = wstat;
2309
2310 /* Check if the thread has exited. */
2311 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2312 {
2313 if (debug_threads)
2314 debug_printf ("LLFE: %d exited.\n", lwpid);
2315 /* If there is at least one more LWP, then the exit signal was
2316 not the end of the debugged application and should be
2317 ignored, unless GDB wants to hear about thread exits. */
2318 if (report_thread_events
2319 || last_thread_of_process_p (pid_of (thread)))
2320 {
2321 /* Since events are serialized to GDB core, and we can't
2322 report this one right now. Leave the status pending for
2323 the next time we're able to report it. */
2324 mark_lwp_dead (child, wstat);
2325 return child;
2326 }
2327 else
2328 {
2329 delete_lwp (child);
2330 return NULL;
2331 }
2332 }
2333
2334 gdb_assert (WIFSTOPPED (wstat));
2335
2336 if (WIFSTOPPED (wstat))
2337 {
2338 struct process_info *proc;
2339
2340 /* Architecture-specific setup after inferior is running. */
2341 proc = find_process_pid (pid_of (thread));
2342 if (proc->tdesc == NULL)
2343 {
2344 if (proc->attached)
2345 {
2346 /* This needs to happen after we have attached to the
2347 inferior and it is stopped for the first time, but
2348 before we access any inferior registers. */
2349 linux_arch_setup_thread (thread);
2350 }
2351 else
2352 {
2353 /* The process is started, but GDBserver will do
2354 architecture-specific setup after the program stops at
2355 the first instruction. */
2356 child->status_pending_p = 1;
2357 child->status_pending = wstat;
2358 return child;
2359 }
2360 }
2361 }
2362
2363 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2364 {
2365 struct process_info *proc = find_process_pid (pid_of (thread));
2366 int options = linux_low_ptrace_options (proc->attached);
2367
2368 linux_enable_event_reporting (lwpid, options);
2369 child->must_set_ptrace_flags = 0;
2370 }
2371
2372 /* Be careful to not overwrite stop_pc until
2373 check_stopped_by_breakpoint is called. */
2374 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2375 && linux_is_extended_waitstatus (wstat))
2376 {
2377 child->stop_pc = get_pc (child);
2378 if (handle_extended_wait (&child, wstat))
2379 {
2380 /* The event has been handled, so just return without
2381 reporting it. */
2382 return NULL;
2383 }
2384 }
2385
2386 /* Check first whether this was a SW/HW breakpoint before checking
2387 watchpoints, because at least s390 can't tell the data address of
2388 hardware watchpoint hits, and returns stopped-by-watchpoint as
2389 long as there's a watchpoint set. */
2390 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2391 {
2392 if (check_stopped_by_breakpoint (child))
2393 have_stop_pc = 1;
2394 }
2395
2396 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2397 or hardware watchpoint. Check which is which if we got
2398 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2399 stepped an instruction that triggered a watchpoint. In that
2400 case, on some architectures (such as x86), instead of
2401 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2402 the debug registers separately. */
2403 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2404 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2405 check_stopped_by_watchpoint (child);
2406
2407 if (!have_stop_pc)
2408 child->stop_pc = get_pc (child);
2409
2410 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2411 && child->stop_expected)
2412 {
2413 if (debug_threads)
2414 debug_printf ("Expected stop.\n");
2415 child->stop_expected = 0;
2416
2417 if (thread->last_resume_kind == resume_stop)
2418 {
2419 /* We want to report the stop to the core. Treat the
2420 SIGSTOP as a normal event. */
2421 if (debug_threads)
2422 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2423 target_pid_to_str (ptid_of (thread)));
2424 }
2425 else if (stopping_threads != NOT_STOPPING_THREADS)
2426 {
2427 /* Stopping threads. We don't want this SIGSTOP to end up
2428 pending. */
2429 if (debug_threads)
2430 debug_printf ("LLW: SIGSTOP caught for %s "
2431 "while stopping threads.\n",
2432 target_pid_to_str (ptid_of (thread)));
2433 return NULL;
2434 }
2435 else
2436 {
2437 /* This is a delayed SIGSTOP. Filter out the event. */
2438 if (debug_threads)
2439 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2440 child->stepping ? "step" : "continue",
2441 target_pid_to_str (ptid_of (thread)));
2442
2443 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2444 return NULL;
2445 }
2446 }
2447
2448 child->status_pending_p = 1;
2449 child->status_pending = wstat;
2450 return child;
2451}
2452
2453/* Resume LWPs that are currently stopped without any pending status
2454 to report, but are resumed from the core's perspective. */
2455
2456static void
2457resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2458{
2459 struct thread_info *thread = (struct thread_info *) entry;
2460 struct lwp_info *lp = get_thread_lwp (thread);
2461
2462 if (lp->stopped
2463 && !lp->suspended
2464 && !lp->status_pending_p
2465 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2466 {
2467 int step = thread->last_resume_kind == resume_step;
2468
2469 if (debug_threads)
2470 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2471 target_pid_to_str (ptid_of (thread)),
2472 paddress (lp->stop_pc),
2473 step);
2474
2475 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2476 }
2477}
2478
2479/* Wait for an event from child(ren) WAIT_PTID, and return any that
2480 match FILTER_PTID (leaving others pending). The PTIDs can be:
2481 minus_one_ptid, to specify any child; a pid PTID, specifying all
2482 lwps of a thread group; or a PTID representing a single lwp. Store
2483 the stop status through the status pointer WSTAT. OPTIONS is
2484 passed to the waitpid call. Return 0 if no event was found and
2485 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2486 was found. Return the PID of the stopped child otherwise. */
2487
2488static int
2489linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2490 int *wstatp, int options)
2491{
2492 struct thread_info *event_thread;
2493 struct lwp_info *event_child, *requested_child;
2494 sigset_t block_mask, prev_mask;
2495
2496 retry:
2497 /* N.B. event_thread points to the thread_info struct that contains
2498 event_child. Keep them in sync. */
2499 event_thread = NULL;
2500 event_child = NULL;
2501 requested_child = NULL;
2502
2503 /* Check for a lwp with a pending status. */
2504
2505 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2506 {
2507 event_thread = (struct thread_info *)
2508 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2509 if (event_thread != NULL)
2510 event_child = get_thread_lwp (event_thread);
2511 if (debug_threads && event_thread)
2512 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2513 }
2514 else if (!ptid_equal (filter_ptid, null_ptid))
2515 {
2516 requested_child = find_lwp_pid (filter_ptid);
2517
2518 if (stopping_threads == NOT_STOPPING_THREADS
2519 && requested_child->status_pending_p
2520 && requested_child->collecting_fast_tracepoint)
2521 {
2522 enqueue_one_deferred_signal (requested_child,
2523 &requested_child->status_pending);
2524 requested_child->status_pending_p = 0;
2525 requested_child->status_pending = 0;
2526 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2527 }
2528
2529 if (requested_child->suspended
2530 && requested_child->status_pending_p)
2531 {
2532 internal_error (__FILE__, __LINE__,
2533 "requesting an event out of a"
2534 " suspended child?");
2535 }
2536
2537 if (requested_child->status_pending_p)
2538 {
2539 event_child = requested_child;
2540 event_thread = get_lwp_thread (event_child);
2541 }
2542 }
2543
2544 if (event_child != NULL)
2545 {
2546 if (debug_threads)
2547 debug_printf ("Got an event from pending child %ld (%04x)\n",
2548 lwpid_of (event_thread), event_child->status_pending);
2549 *wstatp = event_child->status_pending;
2550 event_child->status_pending_p = 0;
2551 event_child->status_pending = 0;
2552 current_thread = event_thread;
2553 return lwpid_of (event_thread);
2554 }
2555
2556 /* But if we don't find a pending event, we'll have to wait.
2557
2558 We only enter this loop if no process has a pending wait status.
2559 Thus any action taken in response to a wait status inside this
2560 loop is responding as soon as we detect the status, not after any
2561 pending events. */
2562
2563 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2564 all signals while here. */
2565 sigfillset (&block_mask);
2566 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2567
2568 /* Always pull all events out of the kernel. We'll randomly select
2569 an event LWP out of all that have events, to prevent
2570 starvation. */
2571 while (event_child == NULL)
2572 {
2573 pid_t ret = 0;
2574
2575 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2576 quirks:
2577
2578 - If the thread group leader exits while other threads in the
2579 thread group still exist, waitpid(TGID, ...) hangs. That
2580 waitpid won't return an exit status until the other threads
2581 in the group are reaped.
2582
2583 - When a non-leader thread execs, that thread just vanishes
2584 without reporting an exit (so we'd hang if we waited for it
2585 explicitly in that case). The exec event is reported to
2586 the TGID pid. */
2587 errno = 0;
2588 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2589
2590 if (debug_threads)
2591 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2592 ret, errno ? strerror (errno) : "ERRNO-OK");
2593
2594 if (ret > 0)
2595 {
2596 if (debug_threads)
2597 {
2598 debug_printf ("LLW: waitpid %ld received %s\n",
2599 (long) ret, status_to_str (*wstatp));
2600 }
2601
2602 /* Filter all events. IOW, leave all events pending. We'll
2603 randomly select an event LWP out of all that have events
2604 below. */
2605 linux_low_filter_event (ret, *wstatp);
2606 /* Retry until nothing comes out of waitpid. A single
2607 SIGCHLD can indicate more than one child stopped. */
2608 continue;
2609 }
2610
2611 /* Now that we've pulled all events out of the kernel, resume
2612 LWPs that don't have an interesting event to report. */
2613 if (stopping_threads == NOT_STOPPING_THREADS)
2614 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2615
2616 /* ... and find an LWP with a status to report to the core, if
2617 any. */
2618 event_thread = (struct thread_info *)
2619 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2620 if (event_thread != NULL)
2621 {
2622 event_child = get_thread_lwp (event_thread);
2623 *wstatp = event_child->status_pending;
2624 event_child->status_pending_p = 0;
2625 event_child->status_pending = 0;
2626 break;
2627 }
2628
2629 /* Check for zombie thread group leaders. Those can't be reaped
2630 until all other threads in the thread group are. */
2631 check_zombie_leaders ();
2632
2633 /* If there are no resumed children left in the set of LWPs we
2634 want to wait for, bail. We can't just block in
2635 waitpid/sigsuspend, because lwps might have been left stopped
2636 in trace-stop state, and we'd be stuck forever waiting for
2637 their status to change (which would only happen if we resumed
2638 them). Even if WNOHANG is set, this return code is preferred
2639 over 0 (below), as it is more detailed. */
2640 if ((find_inferior (&all_threads,
2641 not_stopped_callback,
2642 &wait_ptid) == NULL))
2643 {
2644 if (debug_threads)
2645 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2646 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2647 return -1;
2648 }
2649
2650 /* No interesting event to report to the caller. */
2651 if ((options & WNOHANG))
2652 {
2653 if (debug_threads)
2654 debug_printf ("WNOHANG set, no event found\n");
2655
2656 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2657 return 0;
2658 }
2659
2660 /* Block until we get an event reported with SIGCHLD. */
2661 if (debug_threads)
2662 debug_printf ("sigsuspend'ing\n");
2663
2664 sigsuspend (&prev_mask);
2665 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2666 goto retry;
2667 }
2668
2669 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2670
2671 current_thread = event_thread;
2672
2673 return lwpid_of (event_thread);
2674}
2675
2676/* Wait for an event from child(ren) PTID. PTIDs can be:
2677 minus_one_ptid, to specify any child; a pid PTID, specifying all
2678 lwps of a thread group; or a PTID representing a single lwp. Store
2679 the stop status through the status pointer WSTAT. OPTIONS is
2680 passed to the waitpid call. Return 0 if no event was found and
2681 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2682 was found. Return the PID of the stopped child otherwise. */
2683
2684static int
2685linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2686{
2687 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2688}
2689
2690/* Count the LWP's that have had events. */
2691
2692static int
2693count_events_callback (struct inferior_list_entry *entry, void *data)
2694{
2695 struct thread_info *thread = (struct thread_info *) entry;
2696 struct lwp_info *lp = get_thread_lwp (thread);
2697 int *count = (int *) data;
2698
2699 gdb_assert (count != NULL);
2700
2701 /* Count only resumed LWPs that have an event pending. */
2702 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2703 && lp->status_pending_p)
2704 (*count)++;
2705
2706 return 0;
2707}
2708
2709/* Select the LWP (if any) that is currently being single-stepped. */
2710
2711static int
2712select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2713{
2714 struct thread_info *thread = (struct thread_info *) entry;
2715 struct lwp_info *lp = get_thread_lwp (thread);
2716
2717 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2718 && thread->last_resume_kind == resume_step
2719 && lp->status_pending_p)
2720 return 1;
2721 else
2722 return 0;
2723}
2724
2725/* Select the Nth LWP that has had an event. */
2726
2727static int
2728select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2729{
2730 struct thread_info *thread = (struct thread_info *) entry;
2731 struct lwp_info *lp = get_thread_lwp (thread);
2732 int *selector = (int *) data;
2733
2734 gdb_assert (selector != NULL);
2735
2736 /* Select only resumed LWPs that have an event pending. */
2737 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2738 && lp->status_pending_p)
2739 if ((*selector)-- == 0)
2740 return 1;
2741
2742 return 0;
2743}
2744
2745/* Select one LWP out of those that have events pending. */
2746
2747static void
2748select_event_lwp (struct lwp_info **orig_lp)
2749{
2750 int num_events = 0;
2751 int random_selector;
2752 struct thread_info *event_thread = NULL;
2753
2754 /* In all-stop, give preference to the LWP that is being
2755 single-stepped. There will be at most one, and it's the LWP that
2756 the core is most interested in. If we didn't do this, then we'd
2757 have to handle pending step SIGTRAPs somehow in case the core
2758 later continues the previously-stepped thread, otherwise we'd
2759 report the pending SIGTRAP, and the core, not having stepped the
2760 thread, wouldn't understand what the trap was for, and therefore
2761 would report it to the user as a random signal. */
2762 if (!non_stop)
2763 {
2764 event_thread
2765 = (struct thread_info *) find_inferior (&all_threads,
2766 select_singlestep_lwp_callback,
2767 NULL);
2768 if (event_thread != NULL)
2769 {
2770 if (debug_threads)
2771 debug_printf ("SEL: Select single-step %s\n",
2772 target_pid_to_str (ptid_of (event_thread)));
2773 }
2774 }
2775 if (event_thread == NULL)
2776 {
2777 /* No single-stepping LWP. Select one at random, out of those
2778 which have had events. */
2779
2780 /* First see how many events we have. */
2781 find_inferior (&all_threads, count_events_callback, &num_events);
2782 gdb_assert (num_events > 0);
2783
2784 /* Now randomly pick a LWP out of those that have had
2785 events. */
2786 random_selector = (int)
2787 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2788
2789 if (debug_threads && num_events > 1)
2790 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2791 num_events, random_selector);
2792
2793 event_thread
2794 = (struct thread_info *) find_inferior (&all_threads,
2795 select_event_lwp_callback,
2796 &random_selector);
2797 }
2798
2799 if (event_thread != NULL)
2800 {
2801 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2802
2803 /* Switch the event LWP. */
2804 *orig_lp = event_lp;
2805 }
2806}
2807
2808/* Decrement the suspend count of an LWP. */
2809
2810static int
2811unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2812{
2813 struct thread_info *thread = (struct thread_info *) entry;
2814 struct lwp_info *lwp = get_thread_lwp (thread);
2815
2816 /* Ignore EXCEPT. */
2817 if (lwp == except)
2818 return 0;
2819
2820 lwp_suspended_decr (lwp);
2821 return 0;
2822}
2823
2824/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2825 NULL. */
2826
2827static void
2828unsuspend_all_lwps (struct lwp_info *except)
2829{
2830 find_inferior (&all_threads, unsuspend_one_lwp, except);
2831}
2832
2833static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2834static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2835 void *data);
2836static int lwp_running (struct inferior_list_entry *entry, void *data);
2837static ptid_t linux_wait_1 (ptid_t ptid,
2838 struct target_waitstatus *ourstatus,
2839 int target_options);
2840
2841/* Stabilize threads (move out of jump pads).
2842
2843 If a thread is midway collecting a fast tracepoint, we need to
2844 finish the collection and move it out of the jump pad before
2845 reporting the signal.
2846
2847 This avoids recursion while collecting (when a signal arrives
2848 midway, and the signal handler itself collects), which would trash
2849 the trace buffer. In case the user set a breakpoint in a signal
2850 handler, this avoids the backtrace showing the jump pad, etc..
2851 Most importantly, there are certain things we can't do safely if
2852 threads are stopped in a jump pad (or in its callee's). For
2853 example:
2854
2855 - starting a new trace run. A thread still collecting the
2856 previous run, could trash the trace buffer when resumed. The trace
2857 buffer control structures would have been reset but the thread had
2858 no way to tell. The thread could even midway memcpy'ing to the
2859 buffer, which would mean that when resumed, it would clobber the
2860 trace buffer that had been set for a new run.
2861
2862 - we can't rewrite/reuse the jump pads for new tracepoints
2863 safely. Say you do tstart while a thread is stopped midway while
2864 collecting. When the thread is later resumed, it finishes the
2865 collection, and returns to the jump pad, to execute the original
2866 instruction that was under the tracepoint jump at the time the
2867 older run had been started. If the jump pad had been rewritten
2868 since for something else in the new run, the thread would now
2869 execute the wrong / random instructions. */
2870
2871static void
2872linux_stabilize_threads (void)
2873{
2874 struct thread_info *saved_thread;
2875 struct thread_info *thread_stuck;
2876
2877 thread_stuck
2878 = (struct thread_info *) find_inferior (&all_threads,
2879 stuck_in_jump_pad_callback,
2880 NULL);
2881 if (thread_stuck != NULL)
2882 {
2883 if (debug_threads)
2884 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2885 lwpid_of (thread_stuck));
2886 return;
2887 }
2888
2889 saved_thread = current_thread;
2890
2891 stabilizing_threads = 1;
2892
2893 /* Kick 'em all. */
2894 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2895
2896 /* Loop until all are stopped out of the jump pads. */
2897 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2898 {
2899 struct target_waitstatus ourstatus;
2900 struct lwp_info *lwp;
2901 int wstat;
2902
2903 /* Note that we go through the full wait even loop. While
2904 moving threads out of jump pad, we need to be able to step
2905 over internal breakpoints and such. */
2906 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2907
2908 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2909 {
2910 lwp = get_thread_lwp (current_thread);
2911
2912 /* Lock it. */
2913 lwp_suspended_inc (lwp);
2914
2915 if (ourstatus.value.sig != GDB_SIGNAL_0
2916 || current_thread->last_resume_kind == resume_stop)
2917 {
2918 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2919 enqueue_one_deferred_signal (lwp, &wstat);
2920 }
2921 }
2922 }
2923
2924 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2925
2926 stabilizing_threads = 0;
2927
2928 current_thread = saved_thread;
2929
2930 if (debug_threads)
2931 {
2932 thread_stuck
2933 = (struct thread_info *) find_inferior (&all_threads,
2934 stuck_in_jump_pad_callback,
2935 NULL);
2936 if (thread_stuck != NULL)
2937 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2938 lwpid_of (thread_stuck));
2939 }
2940}
2941
2942/* Convenience function that is called when the kernel reports an
2943 event that is not passed out to GDB. */
2944
2945static ptid_t
2946ignore_event (struct target_waitstatus *ourstatus)
2947{
2948 /* If we got an event, there may still be others, as a single
2949 SIGCHLD can indicate more than one child stopped. This forces
2950 another target_wait call. */
2951 async_file_mark ();
2952
2953 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2954 return null_ptid;
2955}
2956
2957/* Convenience function that is called when the kernel reports an exit
2958 event. This decides whether to report the event to GDB as a
2959 process exit event, a thread exit event, or to suppress the
2960 event. */
2961
2962static ptid_t
2963filter_exit_event (struct lwp_info *event_child,
2964 struct target_waitstatus *ourstatus)
2965{
2966 struct thread_info *thread = get_lwp_thread (event_child);
2967 ptid_t ptid = ptid_of (thread);
2968
2969 if (!last_thread_of_process_p (pid_of (thread)))
2970 {
2971 if (report_thread_events)
2972 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2973 else
2974 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2975
2976 delete_lwp (event_child);
2977 }
2978 return ptid;
2979}
2980
2981/* Wait for process, returns status. */
2982
2983static ptid_t
2984linux_wait_1 (ptid_t ptid,
2985 struct target_waitstatus *ourstatus, int target_options)
2986{
2987 int w;
2988 struct lwp_info *event_child;
2989 int options;
2990 int pid;
2991 int step_over_finished;
2992 int bp_explains_trap;
2993 int maybe_internal_trap;
2994 int report_to_gdb;
2995 int trace_event;
2996 int in_step_range;
2997 int any_resumed;
2998
2999 if (debug_threads)
3000 {
3001 debug_enter ();
3002 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3003 }
3004
3005 /* Translate generic target options into linux options. */
3006 options = __WALL;
3007 if (target_options & TARGET_WNOHANG)
3008 options |= WNOHANG;
3009
3010 bp_explains_trap = 0;
3011 trace_event = 0;
3012 in_step_range = 0;
3013 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3014
3015 /* Find a resumed LWP, if any. */
3016 if (find_inferior (&all_threads,
3017 status_pending_p_callback,
3018 &minus_one_ptid) != NULL)
3019 any_resumed = 1;
3020 else if ((find_inferior (&all_threads,
3021 not_stopped_callback,
3022 &minus_one_ptid) != NULL))
3023 any_resumed = 1;
3024 else
3025 any_resumed = 0;
3026
3027 if (ptid_equal (step_over_bkpt, null_ptid))
3028 pid = linux_wait_for_event (ptid, &w, options);
3029 else
3030 {
3031 if (debug_threads)
3032 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3033 target_pid_to_str (step_over_bkpt));
3034 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3035 }
3036
3037 if (pid == 0 || (pid == -1 && !any_resumed))
3038 {
3039 gdb_assert (target_options & TARGET_WNOHANG);
3040
3041 if (debug_threads)
3042 {
3043 debug_printf ("linux_wait_1 ret = null_ptid, "
3044 "TARGET_WAITKIND_IGNORE\n");
3045 debug_exit ();
3046 }
3047
3048 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3049 return null_ptid;
3050 }
3051 else if (pid == -1)
3052 {
3053 if (debug_threads)
3054 {
3055 debug_printf ("linux_wait_1 ret = null_ptid, "
3056 "TARGET_WAITKIND_NO_RESUMED\n");
3057 debug_exit ();
3058 }
3059
3060 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3061 return null_ptid;
3062 }
3063
3064 event_child = get_thread_lwp (current_thread);
3065
3066 /* linux_wait_for_event only returns an exit status for the last
3067 child of a process. Report it. */
3068 if (WIFEXITED (w) || WIFSIGNALED (w))
3069 {
3070 if (WIFEXITED (w))
3071 {
3072 ourstatus->kind = TARGET_WAITKIND_EXITED;
3073 ourstatus->value.integer = WEXITSTATUS (w);
3074
3075 if (debug_threads)
3076 {
3077 debug_printf ("linux_wait_1 ret = %s, exited with "
3078 "retcode %d\n",
3079 target_pid_to_str (ptid_of (current_thread)),
3080 WEXITSTATUS (w));
3081 debug_exit ();
3082 }
3083 }
3084 else
3085 {
3086 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3087 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3088
3089 if (debug_threads)
3090 {
3091 debug_printf ("linux_wait_1 ret = %s, terminated with "
3092 "signal %d\n",
3093 target_pid_to_str (ptid_of (current_thread)),
3094 WTERMSIG (w));
3095 debug_exit ();
3096 }
3097 }
3098
3099 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3100 return filter_exit_event (event_child, ourstatus);
3101
3102 return ptid_of (current_thread);
3103 }
3104
3105 /* If step-over executes a breakpoint instruction, in the case of a
3106 hardware single step it means a gdb/gdbserver breakpoint had been
3107 planted on top of a permanent breakpoint, in the case of a software
3108 single step it may just mean that gdbserver hit the reinsert breakpoint.
3109 The PC has been adjusted by check_stopped_by_breakpoint to point at
3110 the breakpoint address.
3111 So in the case of the hardware single step advance the PC manually
3112 past the breakpoint and in the case of software single step advance only
3113 if it's not the reinsert_breakpoint we are hitting.
3114 This avoids that a program would keep trapping a permanent breakpoint
3115 forever. */
3116 if (!ptid_equal (step_over_bkpt, null_ptid)
3117 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3118 && (event_child->stepping
3119 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3120 {
3121 int increment_pc = 0;
3122 int breakpoint_kind = 0;
3123 CORE_ADDR stop_pc = event_child->stop_pc;
3124
3125 breakpoint_kind =
3126 the_target->breakpoint_kind_from_current_state (&stop_pc);
3127 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3128
3129 if (debug_threads)
3130 {
3131 debug_printf ("step-over for %s executed software breakpoint\n",
3132 target_pid_to_str (ptid_of (current_thread)));
3133 }
3134
3135 if (increment_pc != 0)
3136 {
3137 struct regcache *regcache
3138 = get_thread_regcache (current_thread, 1);
3139
3140 event_child->stop_pc += increment_pc;
3141 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3142
3143 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3144 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3145 }
3146 }
3147
3148 /* If this event was not handled before, and is not a SIGTRAP, we
3149 report it. SIGILL and SIGSEGV are also treated as traps in case
3150 a breakpoint is inserted at the current PC. If this target does
3151 not support internal breakpoints at all, we also report the
3152 SIGTRAP without further processing; it's of no concern to us. */
3153 maybe_internal_trap
3154 = (supports_breakpoints ()
3155 && (WSTOPSIG (w) == SIGTRAP
3156 || ((WSTOPSIG (w) == SIGILL
3157 || WSTOPSIG (w) == SIGSEGV)
3158 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3159
3160 if (maybe_internal_trap)
3161 {
3162 /* Handle anything that requires bookkeeping before deciding to
3163 report the event or continue waiting. */
3164
3165 /* First check if we can explain the SIGTRAP with an internal
3166 breakpoint, or if we should possibly report the event to GDB.
3167 Do this before anything that may remove or insert a
3168 breakpoint. */
3169 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3170
3171 /* We have a SIGTRAP, possibly a step-over dance has just
3172 finished. If so, tweak the state machine accordingly,
3173 reinsert breakpoints and delete any reinsert (software
3174 single-step) breakpoints. */
3175 step_over_finished = finish_step_over (event_child);
3176
3177 /* Now invoke the callbacks of any internal breakpoints there. */
3178 check_breakpoints (event_child->stop_pc);
3179
3180 /* Handle tracepoint data collecting. This may overflow the
3181 trace buffer, and cause a tracing stop, removing
3182 breakpoints. */
3183 trace_event = handle_tracepoints (event_child);
3184
3185 if (bp_explains_trap)
3186 {
3187 /* If we stepped or ran into an internal breakpoint, we've
3188 already handled it. So next time we resume (from this
3189 PC), we should step over it. */
3190 if (debug_threads)
3191 debug_printf ("Hit a gdbserver breakpoint.\n");
3192
3193 if (breakpoint_here (event_child->stop_pc))
3194 event_child->need_step_over = 1;
3195 }
3196 }
3197 else
3198 {
3199 /* We have some other signal, possibly a step-over dance was in
3200 progress, and it should be cancelled too. */
3201 step_over_finished = finish_step_over (event_child);
3202 }
3203
3204 /* We have all the data we need. Either report the event to GDB, or
3205 resume threads and keep waiting for more. */
3206
3207 /* If we're collecting a fast tracepoint, finish the collection and
3208 move out of the jump pad before delivering a signal. See
3209 linux_stabilize_threads. */
3210
3211 if (WIFSTOPPED (w)
3212 && WSTOPSIG (w) != SIGTRAP
3213 && supports_fast_tracepoints ()
3214 && agent_loaded_p ())
3215 {
3216 if (debug_threads)
3217 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3218 "to defer or adjust it.\n",
3219 WSTOPSIG (w), lwpid_of (current_thread));
3220
3221 /* Allow debugging the jump pad itself. */
3222 if (current_thread->last_resume_kind != resume_step
3223 && maybe_move_out_of_jump_pad (event_child, &w))
3224 {
3225 enqueue_one_deferred_signal (event_child, &w);
3226
3227 if (debug_threads)
3228 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3229 WSTOPSIG (w), lwpid_of (current_thread));
3230
3231 linux_resume_one_lwp (event_child, 0, 0, NULL);
3232
3233 return ignore_event (ourstatus);
3234 }
3235 }
3236
3237 if (event_child->collecting_fast_tracepoint)
3238 {
3239 if (debug_threads)
3240 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3241 "Check if we're already there.\n",
3242 lwpid_of (current_thread),
3243 event_child->collecting_fast_tracepoint);
3244
3245 trace_event = 1;
3246
3247 event_child->collecting_fast_tracepoint
3248 = linux_fast_tracepoint_collecting (event_child, NULL);
3249
3250 if (event_child->collecting_fast_tracepoint != 1)
3251 {
3252 /* No longer need this breakpoint. */
3253 if (event_child->exit_jump_pad_bkpt != NULL)
3254 {
3255 if (debug_threads)
3256 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3257 "stopping all threads momentarily.\n");
3258
3259 /* Other running threads could hit this breakpoint.
3260 We don't handle moribund locations like GDB does,
3261 instead we always pause all threads when removing
3262 breakpoints, so that any step-over or
3263 decr_pc_after_break adjustment is always taken
3264 care of while the breakpoint is still
3265 inserted. */
3266 stop_all_lwps (1, event_child);
3267
3268 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3269 event_child->exit_jump_pad_bkpt = NULL;
3270
3271 unstop_all_lwps (1, event_child);
3272
3273 gdb_assert (event_child->suspended >= 0);
3274 }
3275 }
3276
3277 if (event_child->collecting_fast_tracepoint == 0)
3278 {
3279 if (debug_threads)
3280 debug_printf ("fast tracepoint finished "
3281 "collecting successfully.\n");
3282
3283 /* We may have a deferred signal to report. */
3284 if (dequeue_one_deferred_signal (event_child, &w))
3285 {
3286 if (debug_threads)
3287 debug_printf ("dequeued one signal.\n");
3288 }
3289 else
3290 {
3291 if (debug_threads)
3292 debug_printf ("no deferred signals.\n");
3293
3294 if (stabilizing_threads)
3295 {
3296 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3297 ourstatus->value.sig = GDB_SIGNAL_0;
3298
3299 if (debug_threads)
3300 {
3301 debug_printf ("linux_wait_1 ret = %s, stopped "
3302 "while stabilizing threads\n",
3303 target_pid_to_str (ptid_of (current_thread)));
3304 debug_exit ();
3305 }
3306
3307 return ptid_of (current_thread);
3308 }
3309 }
3310 }
3311 }
3312
3313 /* Check whether GDB would be interested in this event. */
3314
3315 /* If GDB is not interested in this signal, don't stop other
3316 threads, and don't report it to GDB. Just resume the inferior
3317 right away. We do this for threading-related signals as well as
3318 any that GDB specifically requested we ignore. But never ignore
3319 SIGSTOP if we sent it ourselves, and do not ignore signals when
3320 stepping - they may require special handling to skip the signal
3321 handler. Also never ignore signals that could be caused by a
3322 breakpoint. */
3323 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3324 thread library? */
3325 if (WIFSTOPPED (w)
3326 && current_thread->last_resume_kind != resume_step
3327 && (
3328#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3329 (current_process ()->priv->thread_db != NULL
3330 && (WSTOPSIG (w) == __SIGRTMIN
3331 || WSTOPSIG (w) == __SIGRTMIN + 1))
3332 ||
3333#endif
3334 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3335 && !(WSTOPSIG (w) == SIGSTOP
3336 && current_thread->last_resume_kind == resume_stop)
3337 && !linux_wstatus_maybe_breakpoint (w))))
3338 {
3339 siginfo_t info, *info_p;
3340
3341 if (debug_threads)
3342 debug_printf ("Ignored signal %d for LWP %ld.\n",
3343 WSTOPSIG (w), lwpid_of (current_thread));
3344
3345 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3346 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3347 info_p = &info;
3348 else
3349 info_p = NULL;
3350
3351 if (step_over_finished)
3352 {
3353 /* We cancelled this thread's step-over above. We still
3354 need to unsuspend all other LWPs, and set them back
3355 running again while the signal handler runs. */
3356 unsuspend_all_lwps (event_child);
3357
3358 /* Enqueue the pending signal info so that proceed_all_lwps
3359 doesn't lose it. */
3360 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3361
3362 proceed_all_lwps ();
3363 }
3364 else
3365 {
3366 linux_resume_one_lwp (event_child, event_child->stepping,
3367 WSTOPSIG (w), info_p);
3368 }
3369 return ignore_event (ourstatus);
3370 }
3371
3372 /* Note that all addresses are always "out of the step range" when
3373 there's no range to begin with. */
3374 in_step_range = lwp_in_step_range (event_child);
3375
3376 /* If GDB wanted this thread to single step, and the thread is out
3377 of the step range, we always want to report the SIGTRAP, and let
3378 GDB handle it. Watchpoints should always be reported. So should
3379 signals we can't explain. A SIGTRAP we can't explain could be a
3380 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3381 do, we're be able to handle GDB breakpoints on top of internal
3382 breakpoints, by handling the internal breakpoint and still
3383 reporting the event to GDB. If we don't, we're out of luck, GDB
3384 won't see the breakpoint hit. If we see a single-step event but
3385 the thread should be continuing, don't pass the trap to gdb.
3386 That indicates that we had previously finished a single-step but
3387 left the single-step pending -- see
3388 complete_ongoing_step_over. */
3389 report_to_gdb = (!maybe_internal_trap
3390 || (current_thread->last_resume_kind == resume_step
3391 && !in_step_range)
3392 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3393 || (!in_step_range
3394 && !bp_explains_trap
3395 && !trace_event
3396 && !step_over_finished
3397 && !(current_thread->last_resume_kind == resume_continue
3398 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3399 || (gdb_breakpoint_here (event_child->stop_pc)
3400 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3401 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3402 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3403
3404 run_breakpoint_commands (event_child->stop_pc);
3405
3406 /* We found no reason GDB would want us to stop. We either hit one
3407 of our own breakpoints, or finished an internal step GDB
3408 shouldn't know about. */
3409 if (!report_to_gdb)
3410 {
3411 if (debug_threads)
3412 {
3413 if (bp_explains_trap)
3414 debug_printf ("Hit a gdbserver breakpoint.\n");
3415 if (step_over_finished)
3416 debug_printf ("Step-over finished.\n");
3417 if (trace_event)
3418 debug_printf ("Tracepoint event.\n");
3419 if (lwp_in_step_range (event_child))
3420 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3421 paddress (event_child->stop_pc),
3422 paddress (event_child->step_range_start),
3423 paddress (event_child->step_range_end));
3424 }
3425
3426 /* We're not reporting this breakpoint to GDB, so apply the
3427 decr_pc_after_break adjustment to the inferior's regcache
3428 ourselves. */
3429
3430 if (the_low_target.set_pc != NULL)
3431 {
3432 struct regcache *regcache
3433 = get_thread_regcache (current_thread, 1);
3434 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3435 }
3436
3437 /* We may have finished stepping over a breakpoint. If so,
3438 we've stopped and suspended all LWPs momentarily except the
3439 stepping one. This is where we resume them all again. We're
3440 going to keep waiting, so use proceed, which handles stepping
3441 over the next breakpoint. */
3442 if (debug_threads)
3443 debug_printf ("proceeding all threads.\n");
3444
3445 if (step_over_finished)
3446 unsuspend_all_lwps (event_child);
3447
3448 proceed_all_lwps ();
3449 return ignore_event (ourstatus);
3450 }
3451
3452 if (debug_threads)
3453 {
3454 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3455 {
3456 char *str;
3457
3458 str = target_waitstatus_to_string (&event_child->waitstatus);
3459 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3460 lwpid_of (get_lwp_thread (event_child)), str);
3461 xfree (str);
3462 }
3463 if (current_thread->last_resume_kind == resume_step)
3464 {
3465 if (event_child->step_range_start == event_child->step_range_end)
3466 debug_printf ("GDB wanted to single-step, reporting event.\n");
3467 else if (!lwp_in_step_range (event_child))
3468 debug_printf ("Out of step range, reporting event.\n");
3469 }
3470 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3471 debug_printf ("Stopped by watchpoint.\n");
3472 else if (gdb_breakpoint_here (event_child->stop_pc))
3473 debug_printf ("Stopped by GDB breakpoint.\n");
3474 if (debug_threads)
3475 debug_printf ("Hit a non-gdbserver trap event.\n");
3476 }
3477
3478 /* Alright, we're going to report a stop. */
3479
3480 if (!stabilizing_threads)
3481 {
3482 /* In all-stop, stop all threads. */
3483 if (!non_stop)
3484 stop_all_lwps (0, NULL);
3485
3486 /* If we're not waiting for a specific LWP, choose an event LWP
3487 from among those that have had events. Giving equal priority
3488 to all LWPs that have had events helps prevent
3489 starvation. */
3490 if (ptid_equal (ptid, minus_one_ptid))
3491 {
3492 event_child->status_pending_p = 1;
3493 event_child->status_pending = w;
3494
3495 select_event_lwp (&event_child);
3496
3497 /* current_thread and event_child must stay in sync. */
3498 current_thread = get_lwp_thread (event_child);
3499
3500 event_child->status_pending_p = 0;
3501 w = event_child->status_pending;
3502 }
3503
3504 if (step_over_finished)
3505 {
3506 if (!non_stop)
3507 {
3508 /* If we were doing a step-over, all other threads but
3509 the stepping one had been paused in start_step_over,
3510 with their suspend counts incremented. We don't want
3511 to do a full unstop/unpause, because we're in
3512 all-stop mode (so we want threads stopped), but we
3513 still need to unsuspend the other threads, to
3514 decrement their `suspended' count back. */
3515 unsuspend_all_lwps (event_child);
3516 }
3517 else
3518 {
3519 /* If we just finished a step-over, then all threads had
3520 been momentarily paused. In all-stop, that's fine,
3521 we want threads stopped by now anyway. In non-stop,
3522 we need to re-resume threads that GDB wanted to be
3523 running. */
3524 unstop_all_lwps (1, event_child);
3525 }
3526 }
3527
3528 /* Stabilize threads (move out of jump pads). */
3529 if (!non_stop)
3530 stabilize_threads ();
3531 }
3532 else
3533 {
3534 /* If we just finished a step-over, then all threads had been
3535 momentarily paused. In all-stop, that's fine, we want
3536 threads stopped by now anyway. In non-stop, we need to
3537 re-resume threads that GDB wanted to be running. */
3538 if (step_over_finished)
3539 unstop_all_lwps (1, event_child);
3540 }
3541
3542 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3543 {
3544 /* If the reported event is an exit, fork, vfork or exec, let
3545 GDB know. */
3546 *ourstatus = event_child->waitstatus;
3547 /* Clear the event lwp's waitstatus since we handled it already. */
3548 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3549 }
3550 else
3551 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3552
3553 /* Now that we've selected our final event LWP, un-adjust its PC if
3554 it was a software breakpoint, and the client doesn't know we can
3555 adjust the breakpoint ourselves. */
3556 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3557 && !swbreak_feature)
3558 {
3559 int decr_pc = the_low_target.decr_pc_after_break;
3560
3561 if (decr_pc != 0)
3562 {
3563 struct regcache *regcache
3564 = get_thread_regcache (current_thread, 1);
3565 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3566 }
3567 }
3568
3569 if (current_thread->last_resume_kind == resume_stop
3570 && WSTOPSIG (w) == SIGSTOP)
3571 {
3572 /* A thread that has been requested to stop by GDB with vCont;t,
3573 and it stopped cleanly, so report as SIG0. The use of
3574 SIGSTOP is an implementation detail. */
3575 ourstatus->value.sig = GDB_SIGNAL_0;
3576 }
3577 else if (current_thread->last_resume_kind == resume_stop
3578 && WSTOPSIG (w) != SIGSTOP)
3579 {
3580 /* A thread that has been requested to stop by GDB with vCont;t,
3581 but, it stopped for other reasons. */
3582 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3583 }
3584 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3585 {
3586 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3587 }
3588
3589 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3590
3591 if (debug_threads)
3592 {
3593 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3594 target_pid_to_str (ptid_of (current_thread)),
3595 ourstatus->kind, ourstatus->value.sig);
3596 debug_exit ();
3597 }
3598
3599 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3600 return filter_exit_event (event_child, ourstatus);
3601
3602 return ptid_of (current_thread);
3603}
3604
3605/* Get rid of any pending event in the pipe. */
3606static void
3607async_file_flush (void)
3608{
3609 int ret;
3610 char buf;
3611
3612 do
3613 ret = read (linux_event_pipe[0], &buf, 1);
3614 while (ret >= 0 || (ret == -1 && errno == EINTR));
3615}
3616
3617/* Put something in the pipe, so the event loop wakes up. */
3618static void
3619async_file_mark (void)
3620{
3621 int ret;
3622
3623 async_file_flush ();
3624
3625 do
3626 ret = write (linux_event_pipe[1], "+", 1);
3627 while (ret == 0 || (ret == -1 && errno == EINTR));
3628
3629 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3630 be awakened anyway. */
3631}
3632
3633static ptid_t
3634linux_wait (ptid_t ptid,
3635 struct target_waitstatus *ourstatus, int target_options)
3636{
3637 ptid_t event_ptid;
3638
3639 /* Flush the async file first. */
3640 if (target_is_async_p ())
3641 async_file_flush ();
3642
3643 do
3644 {
3645 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3646 }
3647 while ((target_options & TARGET_WNOHANG) == 0
3648 && ptid_equal (event_ptid, null_ptid)
3649 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3650
3651 /* If at least one stop was reported, there may be more. A single
3652 SIGCHLD can signal more than one child stop. */
3653 if (target_is_async_p ()
3654 && (target_options & TARGET_WNOHANG) != 0
3655 && !ptid_equal (event_ptid, null_ptid))
3656 async_file_mark ();
3657
3658 return event_ptid;
3659}
3660
3661/* Send a signal to an LWP. */
3662
3663static int
3664kill_lwp (unsigned long lwpid, int signo)
3665{
3666 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3667 fails, then we are not using nptl threads and we should be using kill. */
3668
3669#ifdef __NR_tkill
3670 {
3671 static int tkill_failed;
3672
3673 if (!tkill_failed)
3674 {
3675 int ret;
3676
3677 errno = 0;
3678 ret = syscall (__NR_tkill, lwpid, signo);
3679 if (errno != ENOSYS)
3680 return ret;
3681 tkill_failed = 1;
3682 }
3683 }
3684#endif
3685
3686 return kill (lwpid, signo);
3687}
3688
3689void
3690linux_stop_lwp (struct lwp_info *lwp)
3691{
3692 send_sigstop (lwp);
3693}
3694
3695static void
3696send_sigstop (struct lwp_info *lwp)
3697{
3698 int pid;
3699
3700 pid = lwpid_of (get_lwp_thread (lwp));
3701
3702 /* If we already have a pending stop signal for this process, don't
3703 send another. */
3704 if (lwp->stop_expected)
3705 {
3706 if (debug_threads)
3707 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3708
3709 return;
3710 }
3711
3712 if (debug_threads)
3713 debug_printf ("Sending sigstop to lwp %d\n", pid);
3714
3715 lwp->stop_expected = 1;
3716 kill_lwp (pid, SIGSTOP);
3717}
3718
3719static int
3720send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3721{
3722 struct thread_info *thread = (struct thread_info *) entry;
3723 struct lwp_info *lwp = get_thread_lwp (thread);
3724
3725 /* Ignore EXCEPT. */
3726 if (lwp == except)
3727 return 0;
3728
3729 if (lwp->stopped)
3730 return 0;
3731
3732 send_sigstop (lwp);
3733 return 0;
3734}
3735
3736/* Increment the suspend count of an LWP, and stop it, if not stopped
3737 yet. */
3738static int
3739suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3740 void *except)
3741{
3742 struct thread_info *thread = (struct thread_info *) entry;
3743 struct lwp_info *lwp = get_thread_lwp (thread);
3744
3745 /* Ignore EXCEPT. */
3746 if (lwp == except)
3747 return 0;
3748
3749 lwp_suspended_inc (lwp);
3750
3751 return send_sigstop_callback (entry, except);
3752}
3753
3754static void
3755mark_lwp_dead (struct lwp_info *lwp, int wstat)
3756{
3757 /* Store the exit status for later. */
3758 lwp->status_pending_p = 1;
3759 lwp->status_pending = wstat;
3760
3761 /* Store in waitstatus as well, as there's nothing else to process
3762 for this event. */
3763 if (WIFEXITED (wstat))
3764 {
3765 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3766 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3767 }
3768 else if (WIFSIGNALED (wstat))
3769 {
3770 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3771 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3772 }
3773
3774 /* Prevent trying to stop it. */
3775 lwp->stopped = 1;
3776
3777 /* No further stops are expected from a dead lwp. */
3778 lwp->stop_expected = 0;
3779}
3780
3781/* Return true if LWP has exited already, and has a pending exit event
3782 to report to GDB. */
3783
3784static int
3785lwp_is_marked_dead (struct lwp_info *lwp)
3786{
3787 return (lwp->status_pending_p
3788 && (WIFEXITED (lwp->status_pending)
3789 || WIFSIGNALED (lwp->status_pending)));
3790}
3791
3792/* Wait for all children to stop for the SIGSTOPs we just queued. */
3793
3794static void
3795wait_for_sigstop (void)
3796{
3797 struct thread_info *saved_thread;
3798 ptid_t saved_tid;
3799 int wstat;
3800 int ret;
3801
3802 saved_thread = current_thread;
3803 if (saved_thread != NULL)
3804 saved_tid = saved_thread->entry.id;
3805 else
3806 saved_tid = null_ptid; /* avoid bogus unused warning */
3807
3808 if (debug_threads)
3809 debug_printf ("wait_for_sigstop: pulling events\n");
3810
3811 /* Passing NULL_PTID as filter indicates we want all events to be
3812 left pending. Eventually this returns when there are no
3813 unwaited-for children left. */
3814 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3815 &wstat, __WALL);
3816 gdb_assert (ret == -1);
3817
3818 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3819 current_thread = saved_thread;
3820 else
3821 {
3822 if (debug_threads)
3823 debug_printf ("Previously current thread died.\n");
3824
3825 /* We can't change the current inferior behind GDB's back,
3826 otherwise, a subsequent command may apply to the wrong
3827 process. */
3828 current_thread = NULL;
3829 }
3830}
3831
3832/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3833 move it out, because we need to report the stop event to GDB. For
3834 example, if the user puts a breakpoint in the jump pad, it's
3835 because she wants to debug it. */
3836
3837static int
3838stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3839{
3840 struct thread_info *thread = (struct thread_info *) entry;
3841 struct lwp_info *lwp = get_thread_lwp (thread);
3842
3843 if (lwp->suspended != 0)
3844 {
3845 internal_error (__FILE__, __LINE__,
3846 "LWP %ld is suspended, suspended=%d\n",
3847 lwpid_of (thread), lwp->suspended);
3848 }
3849 gdb_assert (lwp->stopped);
3850
3851 /* Allow debugging the jump pad, gdb_collect, etc.. */
3852 return (supports_fast_tracepoints ()
3853 && agent_loaded_p ()
3854 && (gdb_breakpoint_here (lwp->stop_pc)
3855 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3856 || thread->last_resume_kind == resume_step)
3857 && linux_fast_tracepoint_collecting (lwp, NULL));
3858}
3859
3860static void
3861move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3862{
3863 struct thread_info *thread = (struct thread_info *) entry;
3864 struct thread_info *saved_thread;
3865 struct lwp_info *lwp = get_thread_lwp (thread);
3866 int *wstat;
3867
3868 if (lwp->suspended != 0)
3869 {
3870 internal_error (__FILE__, __LINE__,
3871 "LWP %ld is suspended, suspended=%d\n",
3872 lwpid_of (thread), lwp->suspended);
3873 }
3874 gdb_assert (lwp->stopped);
3875
3876 /* For gdb_breakpoint_here. */
3877 saved_thread = current_thread;
3878 current_thread = thread;
3879
3880 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3881
3882 /* Allow debugging the jump pad, gdb_collect, etc. */
3883 if (!gdb_breakpoint_here (lwp->stop_pc)
3884 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3885 && thread->last_resume_kind != resume_step
3886 && maybe_move_out_of_jump_pad (lwp, wstat))
3887 {
3888 if (debug_threads)
3889 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3890 lwpid_of (thread));
3891
3892 if (wstat)
3893 {
3894 lwp->status_pending_p = 0;
3895 enqueue_one_deferred_signal (lwp, wstat);
3896
3897 if (debug_threads)
3898 debug_printf ("Signal %d for LWP %ld deferred "
3899 "(in jump pad)\n",
3900 WSTOPSIG (*wstat), lwpid_of (thread));
3901 }
3902
3903 linux_resume_one_lwp (lwp, 0, 0, NULL);
3904 }
3905 else
3906 lwp_suspended_inc (lwp);
3907
3908 current_thread = saved_thread;
3909}
3910
3911static int
3912lwp_running (struct inferior_list_entry *entry, void *data)
3913{
3914 struct thread_info *thread = (struct thread_info *) entry;
3915 struct lwp_info *lwp = get_thread_lwp (thread);
3916
3917 if (lwp_is_marked_dead (lwp))
3918 return 0;
3919 if (lwp->stopped)
3920 return 0;
3921 return 1;
3922}
3923
3924/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3925 If SUSPEND, then also increase the suspend count of every LWP,
3926 except EXCEPT. */
3927
3928static void
3929stop_all_lwps (int suspend, struct lwp_info *except)
3930{
3931 /* Should not be called recursively. */
3932 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3933
3934 if (debug_threads)
3935 {
3936 debug_enter ();
3937 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3938 suspend ? "stop-and-suspend" : "stop",
3939 except != NULL
3940 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3941 : "none");
3942 }
3943
3944 stopping_threads = (suspend
3945 ? STOPPING_AND_SUSPENDING_THREADS
3946 : STOPPING_THREADS);
3947
3948 if (suspend)
3949 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3950 else
3951 find_inferior (&all_threads, send_sigstop_callback, except);
3952 wait_for_sigstop ();
3953 stopping_threads = NOT_STOPPING_THREADS;
3954
3955 if (debug_threads)
3956 {
3957 debug_printf ("stop_all_lwps done, setting stopping_threads "
3958 "back to !stopping\n");
3959 debug_exit ();
3960 }
3961}
3962
3963/* Enqueue one signal in the chain of signals which need to be
3964 delivered to this process on next resume. */
3965
3966static void
3967enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3968{
3969 struct pending_signals *p_sig = XNEW (struct pending_signals);
3970
3971 p_sig->prev = lwp->pending_signals;
3972 p_sig->signal = signal;
3973 if (info == NULL)
3974 memset (&p_sig->info, 0, sizeof (siginfo_t));
3975 else
3976 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3977 lwp->pending_signals = p_sig;
3978}
3979
3980/* Resume execution of LWP. If STEP is nonzero, single-step it. If
3981 SIGNAL is nonzero, give it that signal. */
3982
3983static void
3984linux_resume_one_lwp_throw (struct lwp_info *lwp,
3985 int step, int signal, siginfo_t *info)
3986{
3987 struct thread_info *thread = get_lwp_thread (lwp);
3988 struct thread_info *saved_thread;
3989 int fast_tp_collecting;
3990 struct process_info *proc = get_thread_process (thread);
3991
3992 /* Note that target description may not be initialised
3993 (proc->tdesc == NULL) at this point because the program hasn't
3994 stopped at the first instruction yet. It means GDBserver skips
3995 the extra traps from the wrapper program (see option --wrapper).
3996 Code in this function that requires register access should be
3997 guarded by proc->tdesc == NULL or something else. */
3998
3999 if (lwp->stopped == 0)
4000 return;
4001
4002 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4003
4004 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4005
4006 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4007
4008 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4009 user used the "jump" command, or "set $pc = foo"). */
4010 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4011 {
4012 /* Collecting 'while-stepping' actions doesn't make sense
4013 anymore. */
4014 release_while_stepping_state_list (thread);
4015 }
4016
4017 /* If we have pending signals or status, and a new signal, enqueue the
4018 signal. Also enqueue the signal if we are waiting to reinsert a
4019 breakpoint; it will be picked up again below. */
4020 if (signal != 0
4021 && (lwp->status_pending_p
4022 || lwp->pending_signals != NULL
4023 || lwp->bp_reinsert != 0
4024 || fast_tp_collecting))
4025 {
4026 struct pending_signals *p_sig = XNEW (struct pending_signals);
4027
4028 p_sig->prev = lwp->pending_signals;
4029 p_sig->signal = signal;
4030 if (info == NULL)
4031 memset (&p_sig->info, 0, sizeof (siginfo_t));
4032 else
4033 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4034 lwp->pending_signals = p_sig;
4035 }
4036
4037 if (lwp->status_pending_p)
4038 {
4039 if (debug_threads)
4040 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
4041 " has pending status\n",
4042 lwpid_of (thread), step ? "step" : "continue", signal,
4043 lwp->stop_expected ? "expected" : "not expected");
4044 return;
4045 }
4046
4047 saved_thread = current_thread;
4048 current_thread = thread;
4049
4050 if (debug_threads)
4051 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4052 lwpid_of (thread), step ? "step" : "continue", signal,
4053 lwp->stop_expected ? "expected" : "not expected");
4054
4055 /* This bit needs some thinking about. If we get a signal that
4056 we must report while a single-step reinsert is still pending,
4057 we often end up resuming the thread. It might be better to
4058 (ew) allow a stack of pending events; then we could be sure that
4059 the reinsert happened right away and not lose any signals.
4060
4061 Making this stack would also shrink the window in which breakpoints are
4062 uninserted (see comment in linux_wait_for_lwp) but not enough for
4063 complete correctness, so it won't solve that problem. It may be
4064 worthwhile just to solve this one, however. */
4065 if (lwp->bp_reinsert != 0)
4066 {
4067 if (debug_threads)
4068 debug_printf (" pending reinsert at 0x%s\n",
4069 paddress (lwp->bp_reinsert));
4070
4071 if (can_hardware_single_step ())
4072 {
4073 if (fast_tp_collecting == 0)
4074 {
4075 if (step == 0)
4076 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4077 if (lwp->suspended)
4078 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4079 lwp->suspended);
4080 }
4081
4082 step = 1;
4083 }
4084
4085 /* Postpone any pending signal. It was enqueued above. */
4086 signal = 0;
4087 }
4088
4089 if (fast_tp_collecting == 1)
4090 {
4091 if (debug_threads)
4092 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4093 " (exit-jump-pad-bkpt)\n",
4094 lwpid_of (thread));
4095
4096 /* Postpone any pending signal. It was enqueued above. */
4097 signal = 0;
4098 }
4099 else if (fast_tp_collecting == 2)
4100 {
4101 if (debug_threads)
4102 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4103 " single-stepping\n",
4104 lwpid_of (thread));
4105
4106 if (can_hardware_single_step ())
4107 step = 1;
4108 else
4109 {
4110 internal_error (__FILE__, __LINE__,
4111 "moving out of jump pad single-stepping"
4112 " not implemented on this target");
4113 }
4114
4115 /* Postpone any pending signal. It was enqueued above. */
4116 signal = 0;
4117 }
4118
4119 /* If we have while-stepping actions in this thread set it stepping.
4120 If we have a signal to deliver, it may or may not be set to
4121 SIG_IGN, we don't know. Assume so, and allow collecting
4122 while-stepping into a signal handler. A possible smart thing to
4123 do would be to set an internal breakpoint at the signal return
4124 address, continue, and carry on catching this while-stepping
4125 action only when that breakpoint is hit. A future
4126 enhancement. */
4127 if (thread->while_stepping != NULL
4128 && can_hardware_single_step ())
4129 {
4130 if (debug_threads)
4131 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4132 lwpid_of (thread));
4133 step = 1;
4134 }
4135
4136 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4137 {
4138 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4139
4140 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4141
4142 if (debug_threads)
4143 {
4144 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4145 (long) lwp->stop_pc);
4146 }
4147 }
4148
4149 /* If we have pending signals, consume one unless we are trying to
4150 reinsert a breakpoint or we're trying to finish a fast tracepoint
4151 collect. */
4152 if (lwp->pending_signals != NULL
4153 && lwp->bp_reinsert == 0
4154 && fast_tp_collecting == 0)
4155 {
4156 struct pending_signals **p_sig;
4157
4158 p_sig = &lwp->pending_signals;
4159 while ((*p_sig)->prev != NULL)
4160 p_sig = &(*p_sig)->prev;
4161
4162 signal = (*p_sig)->signal;
4163 if ((*p_sig)->info.si_signo != 0)
4164 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4165 &(*p_sig)->info);
4166
4167 free (*p_sig);
4168 *p_sig = NULL;
4169 }
4170
4171 if (the_low_target.prepare_to_resume != NULL)
4172 the_low_target.prepare_to_resume (lwp);
4173
4174 regcache_invalidate_thread (thread);
4175 errno = 0;
4176 lwp->stepping = step;
4177 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
4178 (PTRACE_TYPE_ARG3) 0,
4179 /* Coerce to a uintptr_t first to avoid potential gcc warning
4180 of coercing an 8 byte integer to a 4 byte pointer. */
4181 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4182
4183 current_thread = saved_thread;
4184 if (errno)
4185 perror_with_name ("resuming thread");
4186
4187 /* Successfully resumed. Clear state that no longer makes sense,
4188 and mark the LWP as running. Must not do this before resuming
4189 otherwise if that fails other code will be confused. E.g., we'd
4190 later try to stop the LWP and hang forever waiting for a stop
4191 status. Note that we must not throw after this is cleared,
4192 otherwise handle_zombie_lwp_error would get confused. */
4193 lwp->stopped = 0;
4194 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4195}
4196
4197/* Called when we try to resume a stopped LWP and that errors out. If
4198 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4199 or about to become), discard the error, clear any pending status
4200 the LWP may have, and return true (we'll collect the exit status
4201 soon enough). Otherwise, return false. */
4202
4203static int
4204check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4205{
4206 struct thread_info *thread = get_lwp_thread (lp);
4207
4208 /* If we get an error after resuming the LWP successfully, we'd
4209 confuse !T state for the LWP being gone. */
4210 gdb_assert (lp->stopped);
4211
4212 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4213 because even if ptrace failed with ESRCH, the tracee may be "not
4214 yet fully dead", but already refusing ptrace requests. In that
4215 case the tracee has 'R (Running)' state for a little bit
4216 (observed in Linux 3.18). See also the note on ESRCH in the
4217 ptrace(2) man page. Instead, check whether the LWP has any state
4218 other than ptrace-stopped. */
4219
4220 /* Don't assume anything if /proc/PID/status can't be read. */
4221 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4222 {
4223 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4224 lp->status_pending_p = 0;
4225 return 1;
4226 }
4227 return 0;
4228}
4229
4230/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4231 disappears while we try to resume it. */
4232
4233static void
4234linux_resume_one_lwp (struct lwp_info *lwp,
4235 int step, int signal, siginfo_t *info)
4236{
4237 TRY
4238 {
4239 linux_resume_one_lwp_throw (lwp, step, signal, info);
4240 }
4241 CATCH (ex, RETURN_MASK_ERROR)
4242 {
4243 if (!check_ptrace_stopped_lwp_gone (lwp))
4244 throw_exception (ex);
4245 }
4246 END_CATCH
4247}
4248
4249struct thread_resume_array
4250{
4251 struct thread_resume *resume;
4252 size_t n;
4253};
4254
4255/* This function is called once per thread via find_inferior.
4256 ARG is a pointer to a thread_resume_array struct.
4257 We look up the thread specified by ENTRY in ARG, and mark the thread
4258 with a pointer to the appropriate resume request.
4259
4260 This algorithm is O(threads * resume elements), but resume elements
4261 is small (and will remain small at least until GDB supports thread
4262 suspension). */
4263
4264static int
4265linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4266{
4267 struct thread_info *thread = (struct thread_info *) entry;
4268 struct lwp_info *lwp = get_thread_lwp (thread);
4269 int ndx;
4270 struct thread_resume_array *r;
4271
4272 r = (struct thread_resume_array *) arg;
4273
4274 for (ndx = 0; ndx < r->n; ndx++)
4275 {
4276 ptid_t ptid = r->resume[ndx].thread;
4277 if (ptid_equal (ptid, minus_one_ptid)
4278 || ptid_equal (ptid, entry->id)
4279 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4280 of PID'. */
4281 || (ptid_get_pid (ptid) == pid_of (thread)
4282 && (ptid_is_pid (ptid)
4283 || ptid_get_lwp (ptid) == -1)))
4284 {
4285 if (r->resume[ndx].kind == resume_stop
4286 && thread->last_resume_kind == resume_stop)
4287 {
4288 if (debug_threads)
4289 debug_printf ("already %s LWP %ld at GDB's request\n",
4290 (thread->last_status.kind
4291 == TARGET_WAITKIND_STOPPED)
4292 ? "stopped"
4293 : "stopping",
4294 lwpid_of (thread));
4295
4296 continue;
4297 }
4298
4299 lwp->resume = &r->resume[ndx];
4300 thread->last_resume_kind = lwp->resume->kind;
4301
4302 lwp->step_range_start = lwp->resume->step_range_start;
4303 lwp->step_range_end = lwp->resume->step_range_end;
4304
4305 /* If we had a deferred signal to report, dequeue one now.
4306 This can happen if LWP gets more than one signal while
4307 trying to get out of a jump pad. */
4308 if (lwp->stopped
4309 && !lwp->status_pending_p
4310 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4311 {
4312 lwp->status_pending_p = 1;
4313
4314 if (debug_threads)
4315 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4316 "leaving status pending.\n",
4317 WSTOPSIG (lwp->status_pending),
4318 lwpid_of (thread));
4319 }
4320
4321 return 0;
4322 }
4323 }
4324
4325 /* No resume action for this thread. */
4326 lwp->resume = NULL;
4327
4328 return 0;
4329}
4330
4331/* find_inferior callback for linux_resume.
4332 Set *FLAG_P if this lwp has an interesting status pending. */
4333
4334static int
4335resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4336{
4337 struct thread_info *thread = (struct thread_info *) entry;
4338 struct lwp_info *lwp = get_thread_lwp (thread);
4339
4340 /* LWPs which will not be resumed are not interesting, because
4341 we might not wait for them next time through linux_wait. */
4342 if (lwp->resume == NULL)
4343 return 0;
4344
4345 if (thread_still_has_status_pending_p (thread))
4346 * (int *) flag_p = 1;
4347
4348 return 0;
4349}
4350
4351/* Return 1 if this lwp that GDB wants running is stopped at an
4352 internal breakpoint that we need to step over. It assumes that any
4353 required STOP_PC adjustment has already been propagated to the
4354 inferior's regcache. */
4355
4356static int
4357need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4358{
4359 struct thread_info *thread = (struct thread_info *) entry;
4360 struct lwp_info *lwp = get_thread_lwp (thread);
4361 struct thread_info *saved_thread;
4362 CORE_ADDR pc;
4363 struct process_info *proc = get_thread_process (thread);
4364
4365 /* GDBserver is skipping the extra traps from the wrapper program,
4366 don't have to do step over. */
4367 if (proc->tdesc == NULL)
4368 return 0;
4369
4370 /* LWPs which will not be resumed are not interesting, because we
4371 might not wait for them next time through linux_wait. */
4372
4373 if (!lwp->stopped)
4374 {
4375 if (debug_threads)
4376 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4377 lwpid_of (thread));
4378 return 0;
4379 }
4380
4381 if (thread->last_resume_kind == resume_stop)
4382 {
4383 if (debug_threads)
4384 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4385 " stopped\n",
4386 lwpid_of (thread));
4387 return 0;
4388 }
4389
4390 gdb_assert (lwp->suspended >= 0);
4391
4392 if (lwp->suspended)
4393 {
4394 if (debug_threads)
4395 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4396 lwpid_of (thread));
4397 return 0;
4398 }
4399
4400 if (!lwp->need_step_over)
4401 {
4402 if (debug_threads)
4403 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4404 }
4405
4406 if (lwp->status_pending_p)
4407 {
4408 if (debug_threads)
4409 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4410 " status.\n",
4411 lwpid_of (thread));
4412 return 0;
4413 }
4414
4415 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4416 or we have. */
4417 pc = get_pc (lwp);
4418
4419 /* If the PC has changed since we stopped, then don't do anything,
4420 and let the breakpoint/tracepoint be hit. This happens if, for
4421 instance, GDB handled the decr_pc_after_break subtraction itself,
4422 GDB is OOL stepping this thread, or the user has issued a "jump"
4423 command, or poked thread's registers herself. */
4424 if (pc != lwp->stop_pc)
4425 {
4426 if (debug_threads)
4427 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4428 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4429 lwpid_of (thread),
4430 paddress (lwp->stop_pc), paddress (pc));
4431
4432 lwp->need_step_over = 0;
4433 return 0;
4434 }
4435
4436 saved_thread = current_thread;
4437 current_thread = thread;
4438
4439 /* We can only step over breakpoints we know about. */
4440 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4441 {
4442 /* Don't step over a breakpoint that GDB expects to hit
4443 though. If the condition is being evaluated on the target's side
4444 and it evaluate to false, step over this breakpoint as well. */
4445 if (gdb_breakpoint_here (pc)
4446 && gdb_condition_true_at_breakpoint (pc)
4447 && gdb_no_commands_at_breakpoint (pc))
4448 {
4449 if (debug_threads)
4450 debug_printf ("Need step over [LWP %ld]? yes, but found"
4451 " GDB breakpoint at 0x%s; skipping step over\n",
4452 lwpid_of (thread), paddress (pc));
4453
4454 current_thread = saved_thread;
4455 return 0;
4456 }
4457 else
4458 {
4459 if (debug_threads)
4460 debug_printf ("Need step over [LWP %ld]? yes, "
4461 "found breakpoint at 0x%s\n",
4462 lwpid_of (thread), paddress (pc));
4463
4464 /* We've found an lwp that needs stepping over --- return 1 so
4465 that find_inferior stops looking. */
4466 current_thread = saved_thread;
4467
4468 /* If the step over is cancelled, this is set again. */
4469 lwp->need_step_over = 0;
4470 return 1;
4471 }
4472 }
4473
4474 current_thread = saved_thread;
4475
4476 if (debug_threads)
4477 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4478 " at 0x%s\n",
4479 lwpid_of (thread), paddress (pc));
4480
4481 return 0;
4482}
4483
4484/* Start a step-over operation on LWP. When LWP stopped at a
4485 breakpoint, to make progress, we need to remove the breakpoint out
4486 of the way. If we let other threads run while we do that, they may
4487 pass by the breakpoint location and miss hitting it. To avoid
4488 that, a step-over momentarily stops all threads while LWP is
4489 single-stepped while the breakpoint is temporarily uninserted from
4490 the inferior. When the single-step finishes, we reinsert the
4491 breakpoint, and let all threads that are supposed to be running,
4492 run again.
4493
4494 On targets that don't support hardware single-step, we don't
4495 currently support full software single-stepping. Instead, we only
4496 support stepping over the thread event breakpoint, by asking the
4497 low target where to place a reinsert breakpoint. Since this
4498 routine assumes the breakpoint being stepped over is a thread event
4499 breakpoint, it usually assumes the return address of the current
4500 function is a good enough place to set the reinsert breakpoint. */
4501
4502static int
4503start_step_over (struct lwp_info *lwp)
4504{
4505 struct thread_info *thread = get_lwp_thread (lwp);
4506 struct thread_info *saved_thread;
4507 CORE_ADDR pc;
4508 int step;
4509
4510 if (debug_threads)
4511 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4512 lwpid_of (thread));
4513
4514 stop_all_lwps (1, lwp);
4515
4516 if (lwp->suspended != 0)
4517 {
4518 internal_error (__FILE__, __LINE__,
4519 "LWP %ld suspended=%d\n", lwpid_of (thread),
4520 lwp->suspended);
4521 }
4522
4523 if (debug_threads)
4524 debug_printf ("Done stopping all threads for step-over.\n");
4525
4526 /* Note, we should always reach here with an already adjusted PC,
4527 either by GDB (if we're resuming due to GDB's request), or by our
4528 caller, if we just finished handling an internal breakpoint GDB
4529 shouldn't care about. */
4530 pc = get_pc (lwp);
4531
4532 saved_thread = current_thread;
4533 current_thread = thread;
4534
4535 lwp->bp_reinsert = pc;
4536 uninsert_breakpoints_at (pc);
4537 uninsert_fast_tracepoint_jumps_at (pc);
4538
4539 if (can_hardware_single_step ())
4540 {
4541 step = 1;
4542 }
4543 else if (can_software_single_step ())
4544 {
4545 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4546 set_reinsert_breakpoint (raddr);
4547 step = 0;
4548 }
4549 else
4550 {
4551 internal_error (__FILE__, __LINE__,
4552 "stepping is not implemented on this target");
4553 }
4554
4555 current_thread = saved_thread;
4556
4557 linux_resume_one_lwp (lwp, step, 0, NULL);
4558
4559 /* Require next event from this LWP. */
4560 step_over_bkpt = thread->entry.id;
4561 return 1;
4562}
4563
4564/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4565 start_step_over, if still there, and delete any reinsert
4566 breakpoints we've set, on non hardware single-step targets. */
4567
4568static int
4569finish_step_over (struct lwp_info *lwp)
4570{
4571 if (lwp->bp_reinsert != 0)
4572 {
4573 if (debug_threads)
4574 debug_printf ("Finished step over.\n");
4575
4576 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4577 may be no breakpoint to reinsert there by now. */
4578 reinsert_breakpoints_at (lwp->bp_reinsert);
4579 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4580
4581 lwp->bp_reinsert = 0;
4582
4583 /* Delete any software-single-step reinsert breakpoints. No
4584 longer needed. We don't have to worry about other threads
4585 hitting this trap, and later not being able to explain it,
4586 because we were stepping over a breakpoint, and we hold all
4587 threads but LWP stopped while doing that. */
4588 if (!can_hardware_single_step ())
4589 delete_reinsert_breakpoints ();
4590
4591 step_over_bkpt = null_ptid;
4592 return 1;
4593 }
4594 else
4595 return 0;
4596}
4597
4598/* If there's a step over in progress, wait until all threads stop
4599 (that is, until the stepping thread finishes its step), and
4600 unsuspend all lwps. The stepping thread ends with its status
4601 pending, which is processed later when we get back to processing
4602 events. */
4603
4604static void
4605complete_ongoing_step_over (void)
4606{
4607 if (!ptid_equal (step_over_bkpt, null_ptid))
4608 {
4609 struct lwp_info *lwp;
4610 int wstat;
4611 int ret;
4612
4613 if (debug_threads)
4614 debug_printf ("detach: step over in progress, finish it first\n");
4615
4616 /* Passing NULL_PTID as filter indicates we want all events to
4617 be left pending. Eventually this returns when there are no
4618 unwaited-for children left. */
4619 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4620 &wstat, __WALL);
4621 gdb_assert (ret == -1);
4622
4623 lwp = find_lwp_pid (step_over_bkpt);
4624 if (lwp != NULL)
4625 finish_step_over (lwp);
4626 step_over_bkpt = null_ptid;
4627 unsuspend_all_lwps (lwp);
4628 }
4629}
4630
4631/* This function is called once per thread. We check the thread's resume
4632 request, which will tell us whether to resume, step, or leave the thread
4633 stopped; and what signal, if any, it should be sent.
4634
4635 For threads which we aren't explicitly told otherwise, we preserve
4636 the stepping flag; this is used for stepping over gdbserver-placed
4637 breakpoints.
4638
4639 If pending_flags was set in any thread, we queue any needed
4640 signals, since we won't actually resume. We already have a pending
4641 event to report, so we don't need to preserve any step requests;
4642 they should be re-issued if necessary. */
4643
4644static int
4645linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4646{
4647 struct thread_info *thread = (struct thread_info *) entry;
4648 struct lwp_info *lwp = get_thread_lwp (thread);
4649 int step;
4650 int leave_all_stopped = * (int *) arg;
4651 int leave_pending;
4652
4653 if (lwp->resume == NULL)
4654 return 0;
4655
4656 if (lwp->resume->kind == resume_stop)
4657 {
4658 if (debug_threads)
4659 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4660
4661 if (!lwp->stopped)
4662 {
4663 if (debug_threads)
4664 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4665
4666 /* Stop the thread, and wait for the event asynchronously,
4667 through the event loop. */
4668 send_sigstop (lwp);
4669 }
4670 else
4671 {
4672 if (debug_threads)
4673 debug_printf ("already stopped LWP %ld\n",
4674 lwpid_of (thread));
4675
4676 /* The LWP may have been stopped in an internal event that
4677 was not meant to be notified back to GDB (e.g., gdbserver
4678 breakpoint), so we should be reporting a stop event in
4679 this case too. */
4680
4681 /* If the thread already has a pending SIGSTOP, this is a
4682 no-op. Otherwise, something later will presumably resume
4683 the thread and this will cause it to cancel any pending
4684 operation, due to last_resume_kind == resume_stop. If
4685 the thread already has a pending status to report, we
4686 will still report it the next time we wait - see
4687 status_pending_p_callback. */
4688
4689 /* If we already have a pending signal to report, then
4690 there's no need to queue a SIGSTOP, as this means we're
4691 midway through moving the LWP out of the jumppad, and we
4692 will report the pending signal as soon as that is
4693 finished. */
4694 if (lwp->pending_signals_to_report == NULL)
4695 send_sigstop (lwp);
4696 }
4697
4698 /* For stop requests, we're done. */
4699 lwp->resume = NULL;
4700 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4701 return 0;
4702 }
4703
4704 /* If this thread which is about to be resumed has a pending status,
4705 then don't resume it - we can just report the pending status.
4706 Likewise if it is suspended, because e.g., another thread is
4707 stepping past a breakpoint. Make sure to queue any signals that
4708 would otherwise be sent. In all-stop mode, we do this decision
4709 based on if *any* thread has a pending status. If there's a
4710 thread that needs the step-over-breakpoint dance, then don't
4711 resume any other thread but that particular one. */
4712 leave_pending = (lwp->suspended
4713 || lwp->status_pending_p
4714 || leave_all_stopped);
4715
4716 if (!leave_pending)
4717 {
4718 if (debug_threads)
4719 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4720
4721 step = (lwp->resume->kind == resume_step);
4722 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4723 }
4724 else
4725 {
4726 if (debug_threads)
4727 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4728
4729 /* If we have a new signal, enqueue the signal. */
4730 if (lwp->resume->sig != 0)
4731 {
4732 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4733
4734 p_sig->prev = lwp->pending_signals;
4735 p_sig->signal = lwp->resume->sig;
4736
4737 /* If this is the same signal we were previously stopped by,
4738 make sure to queue its siginfo. We can ignore the return
4739 value of ptrace; if it fails, we'll skip
4740 PTRACE_SETSIGINFO. */
4741 if (WIFSTOPPED (lwp->last_status)
4742 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4743 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4744 &p_sig->info);
4745
4746 lwp->pending_signals = p_sig;
4747 }
4748 }
4749
4750 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4751 lwp->resume = NULL;
4752 return 0;
4753}
4754
4755static void
4756linux_resume (struct thread_resume *resume_info, size_t n)
4757{
4758 struct thread_resume_array array = { resume_info, n };
4759 struct thread_info *need_step_over = NULL;
4760 int any_pending;
4761 int leave_all_stopped;
4762
4763 if (debug_threads)
4764 {
4765 debug_enter ();
4766 debug_printf ("linux_resume:\n");
4767 }
4768
4769 find_inferior (&all_threads, linux_set_resume_request, &array);
4770
4771 /* If there is a thread which would otherwise be resumed, which has
4772 a pending status, then don't resume any threads - we can just
4773 report the pending status. Make sure to queue any signals that
4774 would otherwise be sent. In non-stop mode, we'll apply this
4775 logic to each thread individually. We consume all pending events
4776 before considering to start a step-over (in all-stop). */
4777 any_pending = 0;
4778 if (!non_stop)
4779 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4780
4781 /* If there is a thread which would otherwise be resumed, which is
4782 stopped at a breakpoint that needs stepping over, then don't
4783 resume any threads - have it step over the breakpoint with all
4784 other threads stopped, then resume all threads again. Make sure
4785 to queue any signals that would otherwise be delivered or
4786 queued. */
4787 if (!any_pending && supports_breakpoints ())
4788 need_step_over
4789 = (struct thread_info *) find_inferior (&all_threads,
4790 need_step_over_p, NULL);
4791
4792 leave_all_stopped = (need_step_over != NULL || any_pending);
4793
4794 if (debug_threads)
4795 {
4796 if (need_step_over != NULL)
4797 debug_printf ("Not resuming all, need step over\n");
4798 else if (any_pending)
4799 debug_printf ("Not resuming, all-stop and found "
4800 "an LWP with pending status\n");
4801 else
4802 debug_printf ("Resuming, no pending status or step over needed\n");
4803 }
4804
4805 /* Even if we're leaving threads stopped, queue all signals we'd
4806 otherwise deliver. */
4807 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4808
4809 if (need_step_over)
4810 start_step_over (get_thread_lwp (need_step_over));
4811
4812 if (debug_threads)
4813 {
4814 debug_printf ("linux_resume done\n");
4815 debug_exit ();
4816 }
4817
4818 /* We may have events that were pending that can/should be sent to
4819 the client now. Trigger a linux_wait call. */
4820 if (target_is_async_p ())
4821 async_file_mark ();
4822}
4823
4824/* This function is called once per thread. We check the thread's
4825 last resume request, which will tell us whether to resume, step, or
4826 leave the thread stopped. Any signal the client requested to be
4827 delivered has already been enqueued at this point.
4828
4829 If any thread that GDB wants running is stopped at an internal
4830 breakpoint that needs stepping over, we start a step-over operation
4831 on that particular thread, and leave all others stopped. */
4832
4833static int
4834proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4835{
4836 struct thread_info *thread = (struct thread_info *) entry;
4837 struct lwp_info *lwp = get_thread_lwp (thread);
4838 int step;
4839
4840 if (lwp == except)
4841 return 0;
4842
4843 if (debug_threads)
4844 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4845
4846 if (!lwp->stopped)
4847 {
4848 if (debug_threads)
4849 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4850 return 0;
4851 }
4852
4853 if (thread->last_resume_kind == resume_stop
4854 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4855 {
4856 if (debug_threads)
4857 debug_printf (" client wants LWP to remain %ld stopped\n",
4858 lwpid_of (thread));
4859 return 0;
4860 }
4861
4862 if (lwp->status_pending_p)
4863 {
4864 if (debug_threads)
4865 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4866 lwpid_of (thread));
4867 return 0;
4868 }
4869
4870 gdb_assert (lwp->suspended >= 0);
4871
4872 if (lwp->suspended)
4873 {
4874 if (debug_threads)
4875 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4876 return 0;
4877 }
4878
4879 if (thread->last_resume_kind == resume_stop
4880 && lwp->pending_signals_to_report == NULL
4881 && lwp->collecting_fast_tracepoint == 0)
4882 {
4883 /* We haven't reported this LWP as stopped yet (otherwise, the
4884 last_status.kind check above would catch it, and we wouldn't
4885 reach here. This LWP may have been momentarily paused by a
4886 stop_all_lwps call while handling for example, another LWP's
4887 step-over. In that case, the pending expected SIGSTOP signal
4888 that was queued at vCont;t handling time will have already
4889 been consumed by wait_for_sigstop, and so we need to requeue
4890 another one here. Note that if the LWP already has a SIGSTOP
4891 pending, this is a no-op. */
4892
4893 if (debug_threads)
4894 debug_printf ("Client wants LWP %ld to stop. "
4895 "Making sure it has a SIGSTOP pending\n",
4896 lwpid_of (thread));
4897
4898 send_sigstop (lwp);
4899 }
4900
4901 if (thread->last_resume_kind == resume_step)
4902 {
4903 if (debug_threads)
4904 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4905 lwpid_of (thread));
4906 step = 1;
4907 }
4908 else if (lwp->bp_reinsert != 0)
4909 {
4910 if (debug_threads)
4911 debug_printf (" stepping LWP %ld, reinsert set\n",
4912 lwpid_of (thread));
4913 step = 1;
4914 }
4915 else
4916 step = 0;
4917
4918 linux_resume_one_lwp (lwp, step, 0, NULL);
4919 return 0;
4920}
4921
4922static int
4923unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4924{
4925 struct thread_info *thread = (struct thread_info *) entry;
4926 struct lwp_info *lwp = get_thread_lwp (thread);
4927
4928 if (lwp == except)
4929 return 0;
4930
4931 lwp_suspended_decr (lwp);
4932
4933 return proceed_one_lwp (entry, except);
4934}
4935
4936/* When we finish a step-over, set threads running again. If there's
4937 another thread that may need a step-over, now's the time to start
4938 it. Eventually, we'll move all threads past their breakpoints. */
4939
4940static void
4941proceed_all_lwps (void)
4942{
4943 struct thread_info *need_step_over;
4944
4945 /* If there is a thread which would otherwise be resumed, which is
4946 stopped at a breakpoint that needs stepping over, then don't
4947 resume any threads - have it step over the breakpoint with all
4948 other threads stopped, then resume all threads again. */
4949
4950 if (supports_breakpoints ())
4951 {
4952 need_step_over
4953 = (struct thread_info *) find_inferior (&all_threads,
4954 need_step_over_p, NULL);
4955
4956 if (need_step_over != NULL)
4957 {
4958 if (debug_threads)
4959 debug_printf ("proceed_all_lwps: found "
4960 "thread %ld needing a step-over\n",
4961 lwpid_of (need_step_over));
4962
4963 start_step_over (get_thread_lwp (need_step_over));
4964 return;
4965 }
4966 }
4967
4968 if (debug_threads)
4969 debug_printf ("Proceeding, no step-over needed\n");
4970
4971 find_inferior (&all_threads, proceed_one_lwp, NULL);
4972}
4973
4974/* Stopped LWPs that the client wanted to be running, that don't have
4975 pending statuses, are set to run again, except for EXCEPT, if not
4976 NULL. This undoes a stop_all_lwps call. */
4977
4978static void
4979unstop_all_lwps (int unsuspend, struct lwp_info *except)
4980{
4981 if (debug_threads)
4982 {
4983 debug_enter ();
4984 if (except)
4985 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4986 lwpid_of (get_lwp_thread (except)));
4987 else
4988 debug_printf ("unstopping all lwps\n");
4989 }
4990
4991 if (unsuspend)
4992 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4993 else
4994 find_inferior (&all_threads, proceed_one_lwp, except);
4995
4996 if (debug_threads)
4997 {
4998 debug_printf ("unstop_all_lwps done\n");
4999 debug_exit ();
5000 }
5001}
5002
5003
5004#ifdef HAVE_LINUX_REGSETS
5005
5006#define use_linux_regsets 1
5007
5008/* Returns true if REGSET has been disabled. */
5009
5010static int
5011regset_disabled (struct regsets_info *info, struct regset_info *regset)
5012{
5013 return (info->disabled_regsets != NULL
5014 && info->disabled_regsets[regset - info->regsets]);
5015}
5016
5017/* Disable REGSET. */
5018
5019static void
5020disable_regset (struct regsets_info *info, struct regset_info *regset)
5021{
5022 int dr_offset;
5023
5024 dr_offset = regset - info->regsets;
5025 if (info->disabled_regsets == NULL)
5026 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5027 info->disabled_regsets[dr_offset] = 1;
5028}
5029
5030static int
5031regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5032 struct regcache *regcache)
5033{
5034 struct regset_info *regset;
5035 int saw_general_regs = 0;
5036 int pid;
5037 struct iovec iov;
5038
5039 pid = lwpid_of (current_thread);
5040 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5041 {
5042 void *buf, *data;
5043 int nt_type, res;
5044
5045 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5046 continue;
5047
5048 buf = xmalloc (regset->size);
5049
5050 nt_type = regset->nt_type;
5051 if (nt_type)
5052 {
5053 iov.iov_base = buf;
5054 iov.iov_len = regset->size;
5055 data = (void *) &iov;
5056 }
5057 else
5058 data = buf;
5059
5060#ifndef __sparc__
5061 res = ptrace (regset->get_request, pid,
5062 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5063#else
5064 res = ptrace (regset->get_request, pid, data, nt_type);
5065#endif
5066 if (res < 0)
5067 {
5068 if (errno == EIO)
5069 {
5070 /* If we get EIO on a regset, do not try it again for
5071 this process mode. */
5072 disable_regset (regsets_info, regset);
5073 }
5074 else if (errno == ENODATA)
5075 {
5076 /* ENODATA may be returned if the regset is currently
5077 not "active". This can happen in normal operation,
5078 so suppress the warning in this case. */
5079 }
5080 else
5081 {
5082 char s[256];
5083 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5084 pid);
5085 perror (s);
5086 }
5087 }
5088 else
5089 {
5090 if (regset->type == GENERAL_REGS)
5091 saw_general_regs = 1;
5092 regset->store_function (regcache, buf);
5093 }
5094 free (buf);
5095 }
5096 if (saw_general_regs)
5097 return 0;
5098 else
5099 return 1;
5100}
5101
5102static int
5103regsets_store_inferior_registers (struct regsets_info *regsets_info,
5104 struct regcache *regcache)
5105{
5106 struct regset_info *regset;
5107 int saw_general_regs = 0;
5108 int pid;
5109 struct iovec iov;
5110
5111 pid = lwpid_of (current_thread);
5112 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5113 {
5114 void *buf, *data;
5115 int nt_type, res;
5116
5117 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5118 || regset->fill_function == NULL)
5119 continue;
5120
5121 buf = xmalloc (regset->size);
5122
5123 /* First fill the buffer with the current register set contents,
5124 in case there are any items in the kernel's regset that are
5125 not in gdbserver's regcache. */
5126
5127 nt_type = regset->nt_type;
5128 if (nt_type)
5129 {
5130 iov.iov_base = buf;
5131 iov.iov_len = regset->size;
5132 data = (void *) &iov;
5133 }
5134 else
5135 data = buf;
5136
5137#ifndef __sparc__
5138 res = ptrace (regset->get_request, pid,
5139 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5140#else
5141 res = ptrace (regset->get_request, pid, data, nt_type);
5142#endif
5143
5144 if (res == 0)
5145 {
5146 /* Then overlay our cached registers on that. */
5147 regset->fill_function (regcache, buf);
5148
5149 /* Only now do we write the register set. */
5150#ifndef __sparc__
5151 res = ptrace (regset->set_request, pid,
5152 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5153#else
5154 res = ptrace (regset->set_request, pid, data, nt_type);
5155#endif
5156 }
5157
5158 if (res < 0)
5159 {
5160 if (errno == EIO)
5161 {
5162 /* If we get EIO on a regset, do not try it again for
5163 this process mode. */
5164 disable_regset (regsets_info, regset);
5165 }
5166 else if (errno == ESRCH)
5167 {
5168 /* At this point, ESRCH should mean the process is
5169 already gone, in which case we simply ignore attempts
5170 to change its registers. See also the related
5171 comment in linux_resume_one_lwp. */
5172 free (buf);
5173 return 0;
5174 }
5175 else
5176 {
5177 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5178 }
5179 }
5180 else if (regset->type == GENERAL_REGS)
5181 saw_general_regs = 1;
5182 free (buf);
5183 }
5184 if (saw_general_regs)
5185 return 0;
5186 else
5187 return 1;
5188}
5189
5190#else /* !HAVE_LINUX_REGSETS */
5191
5192#define use_linux_regsets 0
5193#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5194#define regsets_store_inferior_registers(regsets_info, regcache) 1
5195
5196#endif
5197
5198/* Return 1 if register REGNO is supported by one of the regset ptrace
5199 calls or 0 if it has to be transferred individually. */
5200
5201static int
5202linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5203{
5204 unsigned char mask = 1 << (regno % 8);
5205 size_t index = regno / 8;
5206
5207 return (use_linux_regsets
5208 && (regs_info->regset_bitmap == NULL
5209 || (regs_info->regset_bitmap[index] & mask) != 0));
5210}
5211
5212#ifdef HAVE_LINUX_USRREGS
5213
5214int
5215register_addr (const struct usrregs_info *usrregs, int regnum)
5216{
5217 int addr;
5218
5219 if (regnum < 0 || regnum >= usrregs->num_regs)
5220 error ("Invalid register number %d.", regnum);
5221
5222 addr = usrregs->regmap[regnum];
5223
5224 return addr;
5225}
5226
5227/* Fetch one register. */
5228static void
5229fetch_register (const struct usrregs_info *usrregs,
5230 struct regcache *regcache, int regno)
5231{
5232 CORE_ADDR regaddr;
5233 int i, size;
5234 char *buf;
5235 int pid;
5236
5237 if (regno >= usrregs->num_regs)
5238 return;
5239 if ((*the_low_target.cannot_fetch_register) (regno))
5240 return;
5241
5242 regaddr = register_addr (usrregs, regno);
5243 if (regaddr == -1)
5244 return;
5245
5246 size = ((register_size (regcache->tdesc, regno)
5247 + sizeof (PTRACE_XFER_TYPE) - 1)
5248 & -sizeof (PTRACE_XFER_TYPE));
5249 buf = (char *) alloca (size);
5250
5251 pid = lwpid_of (current_thread);
5252 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5253 {
5254 errno = 0;
5255 *(PTRACE_XFER_TYPE *) (buf + i) =
5256 ptrace (PTRACE_PEEKUSER, pid,
5257 /* Coerce to a uintptr_t first to avoid potential gcc warning
5258 of coercing an 8 byte integer to a 4 byte pointer. */
5259 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5260 regaddr += sizeof (PTRACE_XFER_TYPE);
5261 if (errno != 0)
5262 error ("reading register %d: %s", regno, strerror (errno));
5263 }
5264
5265 if (the_low_target.supply_ptrace_register)
5266 the_low_target.supply_ptrace_register (regcache, regno, buf);
5267 else
5268 supply_register (regcache, regno, buf);
5269}
5270
5271/* Store one register. */
5272static void
5273store_register (const struct usrregs_info *usrregs,
5274 struct regcache *regcache, int regno)
5275{
5276 CORE_ADDR regaddr;
5277 int i, size;
5278 char *buf;
5279 int pid;
5280
5281 if (regno >= usrregs->num_regs)
5282 return;
5283 if ((*the_low_target.cannot_store_register) (regno))
5284 return;
5285
5286 regaddr = register_addr (usrregs, regno);
5287 if (regaddr == -1)
5288 return;
5289
5290 size = ((register_size (regcache->tdesc, regno)
5291 + sizeof (PTRACE_XFER_TYPE) - 1)
5292 & -sizeof (PTRACE_XFER_TYPE));
5293 buf = (char *) alloca (size);
5294 memset (buf, 0, size);
5295
5296 if (the_low_target.collect_ptrace_register)
5297 the_low_target.collect_ptrace_register (regcache, regno, buf);
5298 else
5299 collect_register (regcache, regno, buf);
5300
5301 pid = lwpid_of (current_thread);
5302 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5303 {
5304 errno = 0;
5305 ptrace (PTRACE_POKEUSER, pid,
5306 /* Coerce to a uintptr_t first to avoid potential gcc warning
5307 about coercing an 8 byte integer to a 4 byte pointer. */
5308 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5309 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5310 if (errno != 0)
5311 {
5312 /* At this point, ESRCH should mean the process is
5313 already gone, in which case we simply ignore attempts
5314 to change its registers. See also the related
5315 comment in linux_resume_one_lwp. */
5316 if (errno == ESRCH)
5317 return;
5318
5319 if ((*the_low_target.cannot_store_register) (regno) == 0)
5320 error ("writing register %d: %s", regno, strerror (errno));
5321 }
5322 regaddr += sizeof (PTRACE_XFER_TYPE);
5323 }
5324}
5325
5326/* Fetch all registers, or just one, from the child process.
5327 If REGNO is -1, do this for all registers, skipping any that are
5328 assumed to have been retrieved by regsets_fetch_inferior_registers,
5329 unless ALL is non-zero.
5330 Otherwise, REGNO specifies which register (so we can save time). */
5331static void
5332usr_fetch_inferior_registers (const struct regs_info *regs_info,
5333 struct regcache *regcache, int regno, int all)
5334{
5335 struct usrregs_info *usr = regs_info->usrregs;
5336
5337 if (regno == -1)
5338 {
5339 for (regno = 0; regno < usr->num_regs; regno++)
5340 if (all || !linux_register_in_regsets (regs_info, regno))
5341 fetch_register (usr, regcache, regno);
5342 }
5343 else
5344 fetch_register (usr, regcache, regno);
5345}
5346
5347/* Store our register values back into the inferior.
5348 If REGNO is -1, do this for all registers, skipping any that are
5349 assumed to have been saved by regsets_store_inferior_registers,
5350 unless ALL is non-zero.
5351 Otherwise, REGNO specifies which register (so we can save time). */
5352static void
5353usr_store_inferior_registers (const struct regs_info *regs_info,
5354 struct regcache *regcache, int regno, int all)
5355{
5356 struct usrregs_info *usr = regs_info->usrregs;
5357
5358 if (regno == -1)
5359 {
5360 for (regno = 0; regno < usr->num_regs; regno++)
5361 if (all || !linux_register_in_regsets (regs_info, regno))
5362 store_register (usr, regcache, regno);
5363 }
5364 else
5365 store_register (usr, regcache, regno);
5366}
5367
5368#else /* !HAVE_LINUX_USRREGS */
5369
5370#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5371#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5372
5373#endif
5374
5375
5376void
5377linux_fetch_registers (struct regcache *regcache, int regno)
5378{
5379 int use_regsets;
5380 int all = 0;
5381 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5382
5383 if (regno == -1)
5384 {
5385 if (the_low_target.fetch_register != NULL
5386 && regs_info->usrregs != NULL)
5387 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5388 (*the_low_target.fetch_register) (regcache, regno);
5389
5390 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5391 if (regs_info->usrregs != NULL)
5392 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5393 }
5394 else
5395 {
5396 if (the_low_target.fetch_register != NULL
5397 && (*the_low_target.fetch_register) (regcache, regno))
5398 return;
5399
5400 use_regsets = linux_register_in_regsets (regs_info, regno);
5401 if (use_regsets)
5402 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5403 regcache);
5404 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5405 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5406 }
5407}
5408
5409void
5410linux_store_registers (struct regcache *regcache, int regno)
5411{
5412 int use_regsets;
5413 int all = 0;
5414 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5415
5416 if (regno == -1)
5417 {
5418 all = regsets_store_inferior_registers (regs_info->regsets_info,
5419 regcache);
5420 if (regs_info->usrregs != NULL)
5421 usr_store_inferior_registers (regs_info, regcache, regno, all);
5422 }
5423 else
5424 {
5425 use_regsets = linux_register_in_regsets (regs_info, regno);
5426 if (use_regsets)
5427 all = regsets_store_inferior_registers (regs_info->regsets_info,
5428 regcache);
5429 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5430 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5431 }
5432}
5433
5434
5435/* Copy LEN bytes from inferior's memory starting at MEMADDR
5436 to debugger memory starting at MYADDR. */
5437
5438static int
5439linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5440{
5441 int pid = lwpid_of (current_thread);
5442 register PTRACE_XFER_TYPE *buffer;
5443 register CORE_ADDR addr;
5444 register int count;
5445 char filename[64];
5446 register int i;
5447 int ret;
5448 int fd;
5449
5450 /* Try using /proc. Don't bother for one word. */
5451 if (len >= 3 * sizeof (long))
5452 {
5453 int bytes;
5454
5455 /* We could keep this file open and cache it - possibly one per
5456 thread. That requires some juggling, but is even faster. */
5457 sprintf (filename, "/proc/%d/mem", pid);
5458 fd = open (filename, O_RDONLY | O_LARGEFILE);
5459 if (fd == -1)
5460 goto no_proc;
5461
5462 /* If pread64 is available, use it. It's faster if the kernel
5463 supports it (only one syscall), and it's 64-bit safe even on
5464 32-bit platforms (for instance, SPARC debugging a SPARC64
5465 application). */
5466#ifdef HAVE_PREAD64
5467 bytes = pread64 (fd, myaddr, len, memaddr);
5468#else
5469 bytes = -1;
5470 if (lseek (fd, memaddr, SEEK_SET) != -1)
5471 bytes = read (fd, myaddr, len);
5472#endif
5473
5474 close (fd);
5475 if (bytes == len)
5476 return 0;
5477
5478 /* Some data was read, we'll try to get the rest with ptrace. */
5479 if (bytes > 0)
5480 {
5481 memaddr += bytes;
5482 myaddr += bytes;
5483 len -= bytes;
5484 }
5485 }
5486
5487 no_proc:
5488 /* Round starting address down to longword boundary. */
5489 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5490 /* Round ending address up; get number of longwords that makes. */
5491 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5492 / sizeof (PTRACE_XFER_TYPE));
5493 /* Allocate buffer of that many longwords. */
5494 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5495
5496 /* Read all the longwords */
5497 errno = 0;
5498 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5499 {
5500 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5501 about coercing an 8 byte integer to a 4 byte pointer. */
5502 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5503 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5504 (PTRACE_TYPE_ARG4) 0);
5505 if (errno)
5506 break;
5507 }
5508 ret = errno;
5509
5510 /* Copy appropriate bytes out of the buffer. */
5511 if (i > 0)
5512 {
5513 i *= sizeof (PTRACE_XFER_TYPE);
5514 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5515 memcpy (myaddr,
5516 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5517 i < len ? i : len);
5518 }
5519
5520 return ret;
5521}
5522
5523/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5524 memory at MEMADDR. On failure (cannot write to the inferior)
5525 returns the value of errno. Always succeeds if LEN is zero. */
5526
5527static int
5528linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5529{
5530 register int i;
5531 /* Round starting address down to longword boundary. */
5532 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5533 /* Round ending address up; get number of longwords that makes. */
5534 register int count
5535 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5536 / sizeof (PTRACE_XFER_TYPE);
5537
5538 /* Allocate buffer of that many longwords. */
5539 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5540
5541 int pid = lwpid_of (current_thread);
5542
5543 if (len == 0)
5544 {
5545 /* Zero length write always succeeds. */
5546 return 0;
5547 }
5548
5549 if (debug_threads)
5550 {
5551 /* Dump up to four bytes. */
5552 char str[4 * 2 + 1];
5553 char *p = str;
5554 int dump = len < 4 ? len : 4;
5555
5556 for (i = 0; i < dump; i++)
5557 {
5558 sprintf (p, "%02x", myaddr[i]);
5559 p += 2;
5560 }
5561 *p = '\0';
5562
5563 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5564 str, (long) memaddr, pid);
5565 }
5566
5567 /* Fill start and end extra bytes of buffer with existing memory data. */
5568
5569 errno = 0;
5570 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5571 about coercing an 8 byte integer to a 4 byte pointer. */
5572 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5573 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5574 (PTRACE_TYPE_ARG4) 0);
5575 if (errno)
5576 return errno;
5577
5578 if (count > 1)
5579 {
5580 errno = 0;
5581 buffer[count - 1]
5582 = ptrace (PTRACE_PEEKTEXT, pid,
5583 /* Coerce to a uintptr_t first to avoid potential gcc warning
5584 about coercing an 8 byte integer to a 4 byte pointer. */
5585 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5586 * sizeof (PTRACE_XFER_TYPE)),
5587 (PTRACE_TYPE_ARG4) 0);
5588 if (errno)
5589 return errno;
5590 }
5591
5592 /* Copy data to be written over corresponding part of buffer. */
5593
5594 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5595 myaddr, len);
5596
5597 /* Write the entire buffer. */
5598
5599 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5600 {
5601 errno = 0;
5602 ptrace (PTRACE_POKETEXT, pid,
5603 /* Coerce to a uintptr_t first to avoid potential gcc warning
5604 about coercing an 8 byte integer to a 4 byte pointer. */
5605 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5606 (PTRACE_TYPE_ARG4) buffer[i]);
5607 if (errno)
5608 return errno;
5609 }
5610
5611 return 0;
5612}
5613
5614static void
5615linux_look_up_symbols (void)
5616{
5617#ifdef USE_THREAD_DB
5618 struct process_info *proc = current_process ();
5619
5620 if (proc->priv->thread_db != NULL)
5621 return;
5622
5623 thread_db_init ();
5624#endif
5625}
5626
5627static void
5628linux_request_interrupt (void)
5629{
5630 extern unsigned long signal_pid;
5631
5632 /* Send a SIGINT to the process group. This acts just like the user
5633 typed a ^C on the controlling terminal. */
5634 kill (-signal_pid, SIGINT);
5635}
5636
5637/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5638 to debugger memory starting at MYADDR. */
5639
5640static int
5641linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5642{
5643 char filename[PATH_MAX];
5644 int fd, n;
5645 int pid = lwpid_of (current_thread);
5646
5647 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5648
5649 fd = open (filename, O_RDONLY);
5650 if (fd < 0)
5651 return -1;
5652
5653 if (offset != (CORE_ADDR) 0
5654 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5655 n = -1;
5656 else
5657 n = read (fd, myaddr, len);
5658
5659 close (fd);
5660
5661 return n;
5662}
5663
5664/* These breakpoint and watchpoint related wrapper functions simply
5665 pass on the function call if the target has registered a
5666 corresponding function. */
5667
5668static int
5669linux_supports_z_point_type (char z_type)
5670{
5671 return (the_low_target.supports_z_point_type != NULL
5672 && the_low_target.supports_z_point_type (z_type));
5673}
5674
5675static int
5676linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5677 int size, struct raw_breakpoint *bp)
5678{
5679 if (type == raw_bkpt_type_sw)
5680 return insert_memory_breakpoint (bp);
5681 else if (the_low_target.insert_point != NULL)
5682 return the_low_target.insert_point (type, addr, size, bp);
5683 else
5684 /* Unsupported (see target.h). */
5685 return 1;
5686}
5687
5688static int
5689linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5690 int size, struct raw_breakpoint *bp)
5691{
5692 if (type == raw_bkpt_type_sw)
5693 return remove_memory_breakpoint (bp);
5694 else if (the_low_target.remove_point != NULL)
5695 return the_low_target.remove_point (type, addr, size, bp);
5696 else
5697 /* Unsupported (see target.h). */
5698 return 1;
5699}
5700
5701/* Implement the to_stopped_by_sw_breakpoint target_ops
5702 method. */
5703
5704static int
5705linux_stopped_by_sw_breakpoint (void)
5706{
5707 struct lwp_info *lwp = get_thread_lwp (current_thread);
5708
5709 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5710}
5711
5712/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5713 method. */
5714
5715static int
5716linux_supports_stopped_by_sw_breakpoint (void)
5717{
5718 return USE_SIGTRAP_SIGINFO;
5719}
5720
5721/* Implement the to_stopped_by_hw_breakpoint target_ops
5722 method. */
5723
5724static int
5725linux_stopped_by_hw_breakpoint (void)
5726{
5727 struct lwp_info *lwp = get_thread_lwp (current_thread);
5728
5729 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5730}
5731
5732/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5733 method. */
5734
5735static int
5736linux_supports_stopped_by_hw_breakpoint (void)
5737{
5738 return USE_SIGTRAP_SIGINFO;
5739}
5740
5741/* Implement the supports_hardware_single_step target_ops method. */
5742
5743static int
5744linux_supports_hardware_single_step (void)
5745{
5746 return can_hardware_single_step ();
5747}
5748
5749static int
5750linux_supports_software_single_step (void)
5751{
5752 return can_software_single_step ();
5753}
5754
5755static int
5756linux_stopped_by_watchpoint (void)
5757{
5758 struct lwp_info *lwp = get_thread_lwp (current_thread);
5759
5760 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5761}
5762
5763static CORE_ADDR
5764linux_stopped_data_address (void)
5765{
5766 struct lwp_info *lwp = get_thread_lwp (current_thread);
5767
5768 return lwp->stopped_data_address;
5769}
5770
5771#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5772 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5773 && defined(PT_TEXT_END_ADDR)
5774
5775/* This is only used for targets that define PT_TEXT_ADDR,
5776 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5777 the target has different ways of acquiring this information, like
5778 loadmaps. */
5779
5780/* Under uClinux, programs are loaded at non-zero offsets, which we need
5781 to tell gdb about. */
5782
5783static int
5784linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5785{
5786 unsigned long text, text_end, data;
5787 int pid = lwpid_of (current_thread);
5788
5789 errno = 0;
5790
5791 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5792 (PTRACE_TYPE_ARG4) 0);
5793 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5794 (PTRACE_TYPE_ARG4) 0);
5795 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5796 (PTRACE_TYPE_ARG4) 0);
5797
5798 if (errno == 0)
5799 {
5800 /* Both text and data offsets produced at compile-time (and so
5801 used by gdb) are relative to the beginning of the program,
5802 with the data segment immediately following the text segment.
5803 However, the actual runtime layout in memory may put the data
5804 somewhere else, so when we send gdb a data base-address, we
5805 use the real data base address and subtract the compile-time
5806 data base-address from it (which is just the length of the
5807 text segment). BSS immediately follows data in both
5808 cases. */
5809 *text_p = text;
5810 *data_p = data - (text_end - text);
5811
5812 return 1;
5813 }
5814 return 0;
5815}
5816#endif
5817
5818static int
5819linux_qxfer_osdata (const char *annex,
5820 unsigned char *readbuf, unsigned const char *writebuf,
5821 CORE_ADDR offset, int len)
5822{
5823 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5824}
5825
5826/* Convert a native/host siginfo object, into/from the siginfo in the
5827 layout of the inferiors' architecture. */
5828
5829static void
5830siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5831{
5832 int done = 0;
5833
5834 if (the_low_target.siginfo_fixup != NULL)
5835 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5836
5837 /* If there was no callback, or the callback didn't do anything,
5838 then just do a straight memcpy. */
5839 if (!done)
5840 {
5841 if (direction == 1)
5842 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5843 else
5844 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5845 }
5846}
5847
5848static int
5849linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5850 unsigned const char *writebuf, CORE_ADDR offset, int len)
5851{
5852 int pid;
5853 siginfo_t siginfo;
5854 char inf_siginfo[sizeof (siginfo_t)];
5855
5856 if (current_thread == NULL)
5857 return -1;
5858
5859 pid = lwpid_of (current_thread);
5860
5861 if (debug_threads)
5862 debug_printf ("%s siginfo for lwp %d.\n",
5863 readbuf != NULL ? "Reading" : "Writing",
5864 pid);
5865
5866 if (offset >= sizeof (siginfo))
5867 return -1;
5868
5869 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5870 return -1;
5871
5872 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5873 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5874 inferior with a 64-bit GDBSERVER should look the same as debugging it
5875 with a 32-bit GDBSERVER, we need to convert it. */
5876 siginfo_fixup (&siginfo, inf_siginfo, 0);
5877
5878 if (offset + len > sizeof (siginfo))
5879 len = sizeof (siginfo) - offset;
5880
5881 if (readbuf != NULL)
5882 memcpy (readbuf, inf_siginfo + offset, len);
5883 else
5884 {
5885 memcpy (inf_siginfo + offset, writebuf, len);
5886
5887 /* Convert back to ptrace layout before flushing it out. */
5888 siginfo_fixup (&siginfo, inf_siginfo, 1);
5889
5890 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5891 return -1;
5892 }
5893
5894 return len;
5895}
5896
5897/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5898 so we notice when children change state; as the handler for the
5899 sigsuspend in my_waitpid. */
5900
5901static void
5902sigchld_handler (int signo)
5903{
5904 int old_errno = errno;
5905
5906 if (debug_threads)
5907 {
5908 do
5909 {
5910 /* fprintf is not async-signal-safe, so call write
5911 directly. */
5912 if (write (2, "sigchld_handler\n",
5913 sizeof ("sigchld_handler\n") - 1) < 0)
5914 break; /* just ignore */
5915 } while (0);
5916 }
5917
5918 if (target_is_async_p ())
5919 async_file_mark (); /* trigger a linux_wait */
5920
5921 errno = old_errno;
5922}
5923
5924static int
5925linux_supports_non_stop (void)
5926{
5927 return 1;
5928}
5929
5930static int
5931linux_async (int enable)
5932{
5933 int previous = target_is_async_p ();
5934
5935 if (debug_threads)
5936 debug_printf ("linux_async (%d), previous=%d\n",
5937 enable, previous);
5938
5939 if (previous != enable)
5940 {
5941 sigset_t mask;
5942 sigemptyset (&mask);
5943 sigaddset (&mask, SIGCHLD);
5944
5945 sigprocmask (SIG_BLOCK, &mask, NULL);
5946
5947 if (enable)
5948 {
5949 if (pipe (linux_event_pipe) == -1)
5950 {
5951 linux_event_pipe[0] = -1;
5952 linux_event_pipe[1] = -1;
5953 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5954
5955 warning ("creating event pipe failed.");
5956 return previous;
5957 }
5958
5959 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5960 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5961
5962 /* Register the event loop handler. */
5963 add_file_handler (linux_event_pipe[0],
5964 handle_target_event, NULL);
5965
5966 /* Always trigger a linux_wait. */
5967 async_file_mark ();
5968 }
5969 else
5970 {
5971 delete_file_handler (linux_event_pipe[0]);
5972
5973 close (linux_event_pipe[0]);
5974 close (linux_event_pipe[1]);
5975 linux_event_pipe[0] = -1;
5976 linux_event_pipe[1] = -1;
5977 }
5978
5979 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5980 }
5981
5982 return previous;
5983}
5984
5985static int
5986linux_start_non_stop (int nonstop)
5987{
5988 /* Register or unregister from event-loop accordingly. */
5989 linux_async (nonstop);
5990
5991 if (target_is_async_p () != (nonstop != 0))
5992 return -1;
5993
5994 return 0;
5995}
5996
5997static int
5998linux_supports_multi_process (void)
5999{
6000 return 1;
6001}
6002
6003/* Check if fork events are supported. */
6004
6005static int
6006linux_supports_fork_events (void)
6007{
6008 return linux_supports_tracefork ();
6009}
6010
6011/* Check if vfork events are supported. */
6012
6013static int
6014linux_supports_vfork_events (void)
6015{
6016 return linux_supports_tracefork ();
6017}
6018
6019/* Check if exec events are supported. */
6020
6021static int
6022linux_supports_exec_events (void)
6023{
6024 return linux_supports_traceexec ();
6025}
6026
6027/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6028 options for the specified lwp. */
6029
6030static int
6031reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6032 void *args)
6033{
6034 struct thread_info *thread = (struct thread_info *) entry;
6035 struct lwp_info *lwp = get_thread_lwp (thread);
6036
6037 if (!lwp->stopped)
6038 {
6039 /* Stop the lwp so we can modify its ptrace options. */
6040 lwp->must_set_ptrace_flags = 1;
6041 linux_stop_lwp (lwp);
6042 }
6043 else
6044 {
6045 /* Already stopped; go ahead and set the ptrace options. */
6046 struct process_info *proc = find_process_pid (pid_of (thread));
6047 int options = linux_low_ptrace_options (proc->attached);
6048
6049 linux_enable_event_reporting (lwpid_of (thread), options);
6050 lwp->must_set_ptrace_flags = 0;
6051 }
6052
6053 return 0;
6054}
6055
6056/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6057 ptrace flags for all inferiors. This is in case the new GDB connection
6058 doesn't support the same set of events that the previous one did. */
6059
6060static void
6061linux_handle_new_gdb_connection (void)
6062{
6063 pid_t pid;
6064
6065 /* Request that all the lwps reset their ptrace options. */
6066 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6067}
6068
6069static int
6070linux_supports_disable_randomization (void)
6071{
6072#ifdef HAVE_PERSONALITY
6073 return 1;
6074#else
6075 return 0;
6076#endif
6077}
6078
6079static int
6080linux_supports_agent (void)
6081{
6082 return 1;
6083}
6084
6085static int
6086linux_supports_range_stepping (void)
6087{
6088 if (*the_low_target.supports_range_stepping == NULL)
6089 return 0;
6090
6091 return (*the_low_target.supports_range_stepping) ();
6092}
6093
6094/* Enumerate spufs IDs for process PID. */
6095static int
6096spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6097{
6098 int pos = 0;
6099 int written = 0;
6100 char path[128];
6101 DIR *dir;
6102 struct dirent *entry;
6103
6104 sprintf (path, "/proc/%ld/fd", pid);
6105 dir = opendir (path);
6106 if (!dir)
6107 return -1;
6108
6109 rewinddir (dir);
6110 while ((entry = readdir (dir)) != NULL)
6111 {
6112 struct stat st;
6113 struct statfs stfs;
6114 int fd;
6115
6116 fd = atoi (entry->d_name);
6117 if (!fd)
6118 continue;
6119
6120 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6121 if (stat (path, &st) != 0)
6122 continue;
6123 if (!S_ISDIR (st.st_mode))
6124 continue;
6125
6126 if (statfs (path, &stfs) != 0)
6127 continue;
6128 if (stfs.f_type != SPUFS_MAGIC)
6129 continue;
6130
6131 if (pos >= offset && pos + 4 <= offset + len)
6132 {
6133 *(unsigned int *)(buf + pos - offset) = fd;
6134 written += 4;
6135 }
6136 pos += 4;
6137 }
6138
6139 closedir (dir);
6140 return written;
6141}
6142
6143/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6144 object type, using the /proc file system. */
6145static int
6146linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6147 unsigned const char *writebuf,
6148 CORE_ADDR offset, int len)
6149{
6150 long pid = lwpid_of (current_thread);
6151 char buf[128];
6152 int fd = 0;
6153 int ret = 0;
6154
6155 if (!writebuf && !readbuf)
6156 return -1;
6157
6158 if (!*annex)
6159 {
6160 if (!readbuf)
6161 return -1;
6162 else
6163 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6164 }
6165
6166 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6167 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6168 if (fd <= 0)
6169 return -1;
6170
6171 if (offset != 0
6172 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6173 {
6174 close (fd);
6175 return 0;
6176 }
6177
6178 if (writebuf)
6179 ret = write (fd, writebuf, (size_t) len);
6180 else
6181 ret = read (fd, readbuf, (size_t) len);
6182
6183 close (fd);
6184 return ret;
6185}
6186
6187#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6188struct target_loadseg
6189{
6190 /* Core address to which the segment is mapped. */
6191 Elf32_Addr addr;
6192 /* VMA recorded in the program header. */
6193 Elf32_Addr p_vaddr;
6194 /* Size of this segment in memory. */
6195 Elf32_Word p_memsz;
6196};
6197
6198# if defined PT_GETDSBT
6199struct target_loadmap
6200{
6201 /* Protocol version number, must be zero. */
6202 Elf32_Word version;
6203 /* Pointer to the DSBT table, its size, and the DSBT index. */
6204 unsigned *dsbt_table;
6205 unsigned dsbt_size, dsbt_index;
6206 /* Number of segments in this map. */
6207 Elf32_Word nsegs;
6208 /* The actual memory map. */
6209 struct target_loadseg segs[/*nsegs*/];
6210};
6211# define LINUX_LOADMAP PT_GETDSBT
6212# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6213# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6214# else
6215struct target_loadmap
6216{
6217 /* Protocol version number, must be zero. */
6218 Elf32_Half version;
6219 /* Number of segments in this map. */
6220 Elf32_Half nsegs;
6221 /* The actual memory map. */
6222 struct target_loadseg segs[/*nsegs*/];
6223};
6224# define LINUX_LOADMAP PTRACE_GETFDPIC
6225# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6226# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6227# endif
6228
6229static int
6230linux_read_loadmap (const char *annex, CORE_ADDR offset,
6231 unsigned char *myaddr, unsigned int len)
6232{
6233 int pid = lwpid_of (current_thread);
6234 int addr = -1;
6235 struct target_loadmap *data = NULL;
6236 unsigned int actual_length, copy_length;
6237
6238 if (strcmp (annex, "exec") == 0)
6239 addr = (int) LINUX_LOADMAP_EXEC;
6240 else if (strcmp (annex, "interp") == 0)
6241 addr = (int) LINUX_LOADMAP_INTERP;
6242 else
6243 return -1;
6244
6245 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6246 return -1;
6247
6248 if (data == NULL)
6249 return -1;
6250
6251 actual_length = sizeof (struct target_loadmap)
6252 + sizeof (struct target_loadseg) * data->nsegs;
6253
6254 if (offset < 0 || offset > actual_length)
6255 return -1;
6256
6257 copy_length = actual_length - offset < len ? actual_length - offset : len;
6258 memcpy (myaddr, (char *) data + offset, copy_length);
6259 return copy_length;
6260}
6261#else
6262# define linux_read_loadmap NULL
6263#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6264
6265static void
6266linux_process_qsupported (char **features, int count)
6267{
6268 if (the_low_target.process_qsupported != NULL)
6269 the_low_target.process_qsupported (features, count);
6270}
6271
6272static int
6273linux_supports_tracepoints (void)
6274{
6275 if (*the_low_target.supports_tracepoints == NULL)
6276 return 0;
6277
6278 return (*the_low_target.supports_tracepoints) ();
6279}
6280
6281static CORE_ADDR
6282linux_read_pc (struct regcache *regcache)
6283{
6284 if (the_low_target.get_pc == NULL)
6285 return 0;
6286
6287 return (*the_low_target.get_pc) (regcache);
6288}
6289
6290static void
6291linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6292{
6293 gdb_assert (the_low_target.set_pc != NULL);
6294
6295 (*the_low_target.set_pc) (regcache, pc);
6296}
6297
6298static int
6299linux_thread_stopped (struct thread_info *thread)
6300{
6301 return get_thread_lwp (thread)->stopped;
6302}
6303
6304/* This exposes stop-all-threads functionality to other modules. */
6305
6306static void
6307linux_pause_all (int freeze)
6308{
6309 stop_all_lwps (freeze, NULL);
6310}
6311
6312/* This exposes unstop-all-threads functionality to other gdbserver
6313 modules. */
6314
6315static void
6316linux_unpause_all (int unfreeze)
6317{
6318 unstop_all_lwps (unfreeze, NULL);
6319}
6320
6321static int
6322linux_prepare_to_access_memory (void)
6323{
6324 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6325 running LWP. */
6326 if (non_stop)
6327 linux_pause_all (1);
6328 return 0;
6329}
6330
6331static void
6332linux_done_accessing_memory (void)
6333{
6334 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6335 running LWP. */
6336 if (non_stop)
6337 linux_unpause_all (1);
6338}
6339
6340static int
6341linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6342 CORE_ADDR collector,
6343 CORE_ADDR lockaddr,
6344 ULONGEST orig_size,
6345 CORE_ADDR *jump_entry,
6346 CORE_ADDR *trampoline,
6347 ULONGEST *trampoline_size,
6348 unsigned char *jjump_pad_insn,
6349 ULONGEST *jjump_pad_insn_size,
6350 CORE_ADDR *adjusted_insn_addr,
6351 CORE_ADDR *adjusted_insn_addr_end,
6352 char *err)
6353{
6354 return (*the_low_target.install_fast_tracepoint_jump_pad)
6355 (tpoint, tpaddr, collector, lockaddr, orig_size,
6356 jump_entry, trampoline, trampoline_size,
6357 jjump_pad_insn, jjump_pad_insn_size,
6358 adjusted_insn_addr, adjusted_insn_addr_end,
6359 err);
6360}
6361
6362static struct emit_ops *
6363linux_emit_ops (void)
6364{
6365 if (the_low_target.emit_ops != NULL)
6366 return (*the_low_target.emit_ops) ();
6367 else
6368 return NULL;
6369}
6370
6371static int
6372linux_get_min_fast_tracepoint_insn_len (void)
6373{
6374 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6375}
6376
6377/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6378
6379static int
6380get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6381 CORE_ADDR *phdr_memaddr, int *num_phdr)
6382{
6383 char filename[PATH_MAX];
6384 int fd;
6385 const int auxv_size = is_elf64
6386 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6387 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6388
6389 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6390
6391 fd = open (filename, O_RDONLY);
6392 if (fd < 0)
6393 return 1;
6394
6395 *phdr_memaddr = 0;
6396 *num_phdr = 0;
6397 while (read (fd, buf, auxv_size) == auxv_size
6398 && (*phdr_memaddr == 0 || *num_phdr == 0))
6399 {
6400 if (is_elf64)
6401 {
6402 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6403
6404 switch (aux->a_type)
6405 {
6406 case AT_PHDR:
6407 *phdr_memaddr = aux->a_un.a_val;
6408 break;
6409 case AT_PHNUM:
6410 *num_phdr = aux->a_un.a_val;
6411 break;
6412 }
6413 }
6414 else
6415 {
6416 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6417
6418 switch (aux->a_type)
6419 {
6420 case AT_PHDR:
6421 *phdr_memaddr = aux->a_un.a_val;
6422 break;
6423 case AT_PHNUM:
6424 *num_phdr = aux->a_un.a_val;
6425 break;
6426 }
6427 }
6428 }
6429
6430 close (fd);
6431
6432 if (*phdr_memaddr == 0 || *num_phdr == 0)
6433 {
6434 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6435 "phdr_memaddr = %ld, phdr_num = %d",
6436 (long) *phdr_memaddr, *num_phdr);
6437 return 2;
6438 }
6439
6440 return 0;
6441}
6442
6443/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6444
6445static CORE_ADDR
6446get_dynamic (const int pid, const int is_elf64)
6447{
6448 CORE_ADDR phdr_memaddr, relocation;
6449 int num_phdr, i;
6450 unsigned char *phdr_buf;
6451 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6452
6453 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6454 return 0;
6455
6456 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6457 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6458
6459 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6460 return 0;
6461
6462 /* Compute relocation: it is expected to be 0 for "regular" executables,
6463 non-zero for PIE ones. */
6464 relocation = -1;
6465 for (i = 0; relocation == -1 && i < num_phdr; i++)
6466 if (is_elf64)
6467 {
6468 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6469
6470 if (p->p_type == PT_PHDR)
6471 relocation = phdr_memaddr - p->p_vaddr;
6472 }
6473 else
6474 {
6475 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6476
6477 if (p->p_type == PT_PHDR)
6478 relocation = phdr_memaddr - p->p_vaddr;
6479 }
6480
6481 if (relocation == -1)
6482 {
6483 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6484 any real world executables, including PIE executables, have always
6485 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6486 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6487 or present DT_DEBUG anyway (fpc binaries are statically linked).
6488
6489 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6490
6491 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6492
6493 return 0;
6494 }
6495
6496 for (i = 0; i < num_phdr; i++)
6497 {
6498 if (is_elf64)
6499 {
6500 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6501
6502 if (p->p_type == PT_DYNAMIC)
6503 return p->p_vaddr + relocation;
6504 }
6505 else
6506 {
6507 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6508
6509 if (p->p_type == PT_DYNAMIC)
6510 return p->p_vaddr + relocation;
6511 }
6512 }
6513
6514 return 0;
6515}
6516
6517/* Return &_r_debug in the inferior, or -1 if not present. Return value
6518 can be 0 if the inferior does not yet have the library list initialized.
6519 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6520 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6521
6522static CORE_ADDR
6523get_r_debug (const int pid, const int is_elf64)
6524{
6525 CORE_ADDR dynamic_memaddr;
6526 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6527 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6528 CORE_ADDR map = -1;
6529
6530 dynamic_memaddr = get_dynamic (pid, is_elf64);
6531 if (dynamic_memaddr == 0)
6532 return map;
6533
6534 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6535 {
6536 if (is_elf64)
6537 {
6538 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6539#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6540 union
6541 {
6542 Elf64_Xword map;
6543 unsigned char buf[sizeof (Elf64_Xword)];
6544 }
6545 rld_map;
6546#endif
6547#ifdef DT_MIPS_RLD_MAP
6548 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6549 {
6550 if (linux_read_memory (dyn->d_un.d_val,
6551 rld_map.buf, sizeof (rld_map.buf)) == 0)
6552 return rld_map.map;
6553 else
6554 break;
6555 }
6556#endif /* DT_MIPS_RLD_MAP */
6557#ifdef DT_MIPS_RLD_MAP_REL
6558 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6559 {
6560 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6561 rld_map.buf, sizeof (rld_map.buf)) == 0)
6562 return rld_map.map;
6563 else
6564 break;
6565 }
6566#endif /* DT_MIPS_RLD_MAP_REL */
6567
6568 if (dyn->d_tag == DT_DEBUG && map == -1)
6569 map = dyn->d_un.d_val;
6570
6571 if (dyn->d_tag == DT_NULL)
6572 break;
6573 }
6574 else
6575 {
6576 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6577#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6578 union
6579 {
6580 Elf32_Word map;
6581 unsigned char buf[sizeof (Elf32_Word)];
6582 }
6583 rld_map;
6584#endif
6585#ifdef DT_MIPS_RLD_MAP
6586 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6587 {
6588 if (linux_read_memory (dyn->d_un.d_val,
6589 rld_map.buf, sizeof (rld_map.buf)) == 0)
6590 return rld_map.map;
6591 else
6592 break;
6593 }
6594#endif /* DT_MIPS_RLD_MAP */
6595#ifdef DT_MIPS_RLD_MAP_REL
6596 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6597 {
6598 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6599 rld_map.buf, sizeof (rld_map.buf)) == 0)
6600 return rld_map.map;
6601 else
6602 break;
6603 }
6604#endif /* DT_MIPS_RLD_MAP_REL */
6605
6606 if (dyn->d_tag == DT_DEBUG && map == -1)
6607 map = dyn->d_un.d_val;
6608
6609 if (dyn->d_tag == DT_NULL)
6610 break;
6611 }
6612
6613 dynamic_memaddr += dyn_size;
6614 }
6615
6616 return map;
6617}
6618
6619/* Read one pointer from MEMADDR in the inferior. */
6620
6621static int
6622read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6623{
6624 int ret;
6625
6626 /* Go through a union so this works on either big or little endian
6627 hosts, when the inferior's pointer size is smaller than the size
6628 of CORE_ADDR. It is assumed the inferior's endianness is the
6629 same of the superior's. */
6630 union
6631 {
6632 CORE_ADDR core_addr;
6633 unsigned int ui;
6634 unsigned char uc;
6635 } addr;
6636
6637 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6638 if (ret == 0)
6639 {
6640 if (ptr_size == sizeof (CORE_ADDR))
6641 *ptr = addr.core_addr;
6642 else if (ptr_size == sizeof (unsigned int))
6643 *ptr = addr.ui;
6644 else
6645 gdb_assert_not_reached ("unhandled pointer size");
6646 }
6647 return ret;
6648}
6649
6650struct link_map_offsets
6651 {
6652 /* Offset and size of r_debug.r_version. */
6653 int r_version_offset;
6654
6655 /* Offset and size of r_debug.r_map. */
6656 int r_map_offset;
6657
6658 /* Offset to l_addr field in struct link_map. */
6659 int l_addr_offset;
6660
6661 /* Offset to l_name field in struct link_map. */
6662 int l_name_offset;
6663
6664 /* Offset to l_ld field in struct link_map. */
6665 int l_ld_offset;
6666
6667 /* Offset to l_next field in struct link_map. */
6668 int l_next_offset;
6669
6670 /* Offset to l_prev field in struct link_map. */
6671 int l_prev_offset;
6672 };
6673
6674/* Construct qXfer:libraries-svr4:read reply. */
6675
6676static int
6677linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6678 unsigned const char *writebuf,
6679 CORE_ADDR offset, int len)
6680{
6681 char *document;
6682 unsigned document_len;
6683 struct process_info_private *const priv = current_process ()->priv;
6684 char filename[PATH_MAX];
6685 int pid, is_elf64;
6686
6687 static const struct link_map_offsets lmo_32bit_offsets =
6688 {
6689 0, /* r_version offset. */
6690 4, /* r_debug.r_map offset. */
6691 0, /* l_addr offset in link_map. */
6692 4, /* l_name offset in link_map. */
6693 8, /* l_ld offset in link_map. */
6694 12, /* l_next offset in link_map. */
6695 16 /* l_prev offset in link_map. */
6696 };
6697
6698 static const struct link_map_offsets lmo_64bit_offsets =
6699 {
6700 0, /* r_version offset. */
6701 8, /* r_debug.r_map offset. */
6702 0, /* l_addr offset in link_map. */
6703 8, /* l_name offset in link_map. */
6704 16, /* l_ld offset in link_map. */
6705 24, /* l_next offset in link_map. */
6706 32 /* l_prev offset in link_map. */
6707 };
6708 const struct link_map_offsets *lmo;
6709 unsigned int machine;
6710 int ptr_size;
6711 CORE_ADDR lm_addr = 0, lm_prev = 0;
6712 int allocated = 1024;
6713 char *p;
6714 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6715 int header_done = 0;
6716
6717 if (writebuf != NULL)
6718 return -2;
6719 if (readbuf == NULL)
6720 return -1;
6721
6722 pid = lwpid_of (current_thread);
6723 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6724 is_elf64 = elf_64_file_p (filename, &machine);
6725 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6726 ptr_size = is_elf64 ? 8 : 4;
6727
6728 while (annex[0] != '\0')
6729 {
6730 const char *sep;
6731 CORE_ADDR *addrp;
6732 int len;
6733
6734 sep = strchr (annex, '=');
6735 if (sep == NULL)
6736 break;
6737
6738 len = sep - annex;
6739 if (len == 5 && startswith (annex, "start"))
6740 addrp = &lm_addr;
6741 else if (len == 4 && startswith (annex, "prev"))
6742 addrp = &lm_prev;
6743 else
6744 {
6745 annex = strchr (sep, ';');
6746 if (annex == NULL)
6747 break;
6748 annex++;
6749 continue;
6750 }
6751
6752 annex = decode_address_to_semicolon (addrp, sep + 1);
6753 }
6754
6755 if (lm_addr == 0)
6756 {
6757 int r_version = 0;
6758
6759 if (priv->r_debug == 0)
6760 priv->r_debug = get_r_debug (pid, is_elf64);
6761
6762 /* We failed to find DT_DEBUG. Such situation will not change
6763 for this inferior - do not retry it. Report it to GDB as
6764 E01, see for the reasons at the GDB solib-svr4.c side. */
6765 if (priv->r_debug == (CORE_ADDR) -1)
6766 return -1;
6767
6768 if (priv->r_debug != 0)
6769 {
6770 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6771 (unsigned char *) &r_version,
6772 sizeof (r_version)) != 0
6773 || r_version != 1)
6774 {
6775 warning ("unexpected r_debug version %d", r_version);
6776 }
6777 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6778 &lm_addr, ptr_size) != 0)
6779 {
6780 warning ("unable to read r_map from 0x%lx",
6781 (long) priv->r_debug + lmo->r_map_offset);
6782 }
6783 }
6784 }
6785
6786 document = (char *) xmalloc (allocated);
6787 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6788 p = document + strlen (document);
6789
6790 while (lm_addr
6791 && read_one_ptr (lm_addr + lmo->l_name_offset,
6792 &l_name, ptr_size) == 0
6793 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6794 &l_addr, ptr_size) == 0
6795 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6796 &l_ld, ptr_size) == 0
6797 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6798 &l_prev, ptr_size) == 0
6799 && read_one_ptr (lm_addr + lmo->l_next_offset,
6800 &l_next, ptr_size) == 0)
6801 {
6802 unsigned char libname[PATH_MAX];
6803
6804 if (lm_prev != l_prev)
6805 {
6806 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6807 (long) lm_prev, (long) l_prev);
6808 break;
6809 }
6810
6811 /* Ignore the first entry even if it has valid name as the first entry
6812 corresponds to the main executable. The first entry should not be
6813 skipped if the dynamic loader was loaded late by a static executable
6814 (see solib-svr4.c parameter ignore_first). But in such case the main
6815 executable does not have PT_DYNAMIC present and this function already
6816 exited above due to failed get_r_debug. */
6817 if (lm_prev == 0)
6818 {
6819 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6820 p = p + strlen (p);
6821 }
6822 else
6823 {
6824 /* Not checking for error because reading may stop before
6825 we've got PATH_MAX worth of characters. */
6826 libname[0] = '\0';
6827 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6828 libname[sizeof (libname) - 1] = '\0';
6829 if (libname[0] != '\0')
6830 {
6831 /* 6x the size for xml_escape_text below. */
6832 size_t len = 6 * strlen ((char *) libname);
6833 char *name;
6834
6835 if (!header_done)
6836 {
6837 /* Terminate `<library-list-svr4'. */
6838 *p++ = '>';
6839 header_done = 1;
6840 }
6841
6842 while (allocated < p - document + len + 200)
6843 {
6844 /* Expand to guarantee sufficient storage. */
6845 uintptr_t document_len = p - document;
6846
6847 document = (char *) xrealloc (document, 2 * allocated);
6848 allocated *= 2;
6849 p = document + document_len;
6850 }
6851
6852 name = xml_escape_text ((char *) libname);
6853 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6854 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6855 name, (unsigned long) lm_addr,
6856 (unsigned long) l_addr, (unsigned long) l_ld);
6857 free (name);
6858 }
6859 }
6860
6861 lm_prev = lm_addr;
6862 lm_addr = l_next;
6863 }
6864
6865 if (!header_done)
6866 {
6867 /* Empty list; terminate `<library-list-svr4'. */
6868 strcpy (p, "/>");
6869 }
6870 else
6871 strcpy (p, "</library-list-svr4>");
6872
6873 document_len = strlen (document);
6874 if (offset < document_len)
6875 document_len -= offset;
6876 else
6877 document_len = 0;
6878 if (len > document_len)
6879 len = document_len;
6880
6881 memcpy (readbuf, document + offset, len);
6882 xfree (document);
6883
6884 return len;
6885}
6886
6887#ifdef HAVE_LINUX_BTRACE
6888
6889/* See to_disable_btrace target method. */
6890
6891static int
6892linux_low_disable_btrace (struct btrace_target_info *tinfo)
6893{
6894 enum btrace_error err;
6895
6896 err = linux_disable_btrace (tinfo);
6897 return (err == BTRACE_ERR_NONE ? 0 : -1);
6898}
6899
6900/* Encode an Intel(R) Processor Trace configuration. */
6901
6902static void
6903linux_low_encode_pt_config (struct buffer *buffer,
6904 const struct btrace_data_pt_config *config)
6905{
6906 buffer_grow_str (buffer, "<pt-config>\n");
6907
6908 switch (config->cpu.vendor)
6909 {
6910 case CV_INTEL:
6911 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6912 "model=\"%u\" stepping=\"%u\"/>\n",
6913 config->cpu.family, config->cpu.model,
6914 config->cpu.stepping);
6915 break;
6916
6917 default:
6918 break;
6919 }
6920
6921 buffer_grow_str (buffer, "</pt-config>\n");
6922}
6923
6924/* Encode a raw buffer. */
6925
6926static void
6927linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6928 unsigned int size)
6929{
6930 if (size == 0)
6931 return;
6932
6933 /* We use hex encoding - see common/rsp-low.h. */
6934 buffer_grow_str (buffer, "<raw>\n");
6935
6936 while (size-- > 0)
6937 {
6938 char elem[2];
6939
6940 elem[0] = tohex ((*data >> 4) & 0xf);
6941 elem[1] = tohex (*data++ & 0xf);
6942
6943 buffer_grow (buffer, elem, 2);
6944 }
6945
6946 buffer_grow_str (buffer, "</raw>\n");
6947}
6948
6949/* See to_read_btrace target method. */
6950
6951static int
6952linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6953 enum btrace_read_type type)
6954{
6955 struct btrace_data btrace;
6956 struct btrace_block *block;
6957 enum btrace_error err;
6958 int i;
6959
6960 btrace_data_init (&btrace);
6961
6962 err = linux_read_btrace (&btrace, tinfo, type);
6963 if (err != BTRACE_ERR_NONE)
6964 {
6965 if (err == BTRACE_ERR_OVERFLOW)
6966 buffer_grow_str0 (buffer, "E.Overflow.");
6967 else
6968 buffer_grow_str0 (buffer, "E.Generic Error.");
6969
6970 goto err;
6971 }
6972
6973 switch (btrace.format)
6974 {
6975 case BTRACE_FORMAT_NONE:
6976 buffer_grow_str0 (buffer, "E.No Trace.");
6977 goto err;
6978
6979 case BTRACE_FORMAT_BTS:
6980 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6981 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6982
6983 for (i = 0;
6984 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6985 i++)
6986 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6987 paddress (block->begin), paddress (block->end));
6988
6989 buffer_grow_str0 (buffer, "</btrace>\n");
6990 break;
6991
6992 case BTRACE_FORMAT_PT:
6993 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6994 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6995 buffer_grow_str (buffer, "<pt>\n");
6996
6997 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6998
6999 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7000 btrace.variant.pt.size);
7001
7002 buffer_grow_str (buffer, "</pt>\n");
7003 buffer_grow_str0 (buffer, "</btrace>\n");
7004 break;
7005
7006 default:
7007 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7008 goto err;
7009 }
7010
7011 btrace_data_fini (&btrace);
7012 return 0;
7013
7014err:
7015 btrace_data_fini (&btrace);
7016 return -1;
7017}
7018
7019/* See to_btrace_conf target method. */
7020
7021static int
7022linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7023 struct buffer *buffer)
7024{
7025 const struct btrace_config *conf;
7026
7027 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7028 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7029
7030 conf = linux_btrace_conf (tinfo);
7031 if (conf != NULL)
7032 {
7033 switch (conf->format)
7034 {
7035 case BTRACE_FORMAT_NONE:
7036 break;
7037
7038 case BTRACE_FORMAT_BTS:
7039 buffer_xml_printf (buffer, "<bts");
7040 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7041 buffer_xml_printf (buffer, " />\n");
7042 break;
7043
7044 case BTRACE_FORMAT_PT:
7045 buffer_xml_printf (buffer, "<pt");
7046 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7047 buffer_xml_printf (buffer, "/>\n");
7048 break;
7049 }
7050 }
7051
7052 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7053 return 0;
7054}
7055#endif /* HAVE_LINUX_BTRACE */
7056
7057/* See nat/linux-nat.h. */
7058
7059ptid_t
7060current_lwp_ptid (void)
7061{
7062 return ptid_of (current_thread);
7063}
7064
7065/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7066
7067static int
7068linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7069{
7070 if (the_low_target.breakpoint_kind_from_pc != NULL)
7071 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7072 else
7073 return default_breakpoint_kind_from_pc (pcptr);
7074}
7075
7076/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7077
7078static const gdb_byte *
7079linux_sw_breakpoint_from_kind (int kind, int *size)
7080{
7081 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7082
7083 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7084}
7085
7086/* Implementation of the target_ops method
7087 "breakpoint_kind_from_current_state". */
7088
7089static int
7090linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7091{
7092 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7093 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7094 else
7095 return linux_breakpoint_kind_from_pc (pcptr);
7096}
7097
7098static struct target_ops linux_target_ops = {
7099 linux_create_inferior,
7100 linux_post_create_inferior,
7101 linux_attach,
7102 linux_kill,
7103 linux_detach,
7104 linux_mourn,
7105 linux_join,
7106 linux_thread_alive,
7107 linux_resume,
7108 linux_wait,
7109 linux_fetch_registers,
7110 linux_store_registers,
7111 linux_prepare_to_access_memory,
7112 linux_done_accessing_memory,
7113 linux_read_memory,
7114 linux_write_memory,
7115 linux_look_up_symbols,
7116 linux_request_interrupt,
7117 linux_read_auxv,
7118 linux_supports_z_point_type,
7119 linux_insert_point,
7120 linux_remove_point,
7121 linux_stopped_by_sw_breakpoint,
7122 linux_supports_stopped_by_sw_breakpoint,
7123 linux_stopped_by_hw_breakpoint,
7124 linux_supports_stopped_by_hw_breakpoint,
7125 linux_supports_hardware_single_step,
7126 linux_stopped_by_watchpoint,
7127 linux_stopped_data_address,
7128#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7129 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7130 && defined(PT_TEXT_END_ADDR)
7131 linux_read_offsets,
7132#else
7133 NULL,
7134#endif
7135#ifdef USE_THREAD_DB
7136 thread_db_get_tls_address,
7137#else
7138 NULL,
7139#endif
7140 linux_qxfer_spu,
7141 hostio_last_error_from_errno,
7142 linux_qxfer_osdata,
7143 linux_xfer_siginfo,
7144 linux_supports_non_stop,
7145 linux_async,
7146 linux_start_non_stop,
7147 linux_supports_multi_process,
7148 linux_supports_fork_events,
7149 linux_supports_vfork_events,
7150 linux_supports_exec_events,
7151 linux_handle_new_gdb_connection,
7152#ifdef USE_THREAD_DB
7153 thread_db_handle_monitor_command,
7154#else
7155 NULL,
7156#endif
7157 linux_common_core_of_thread,
7158 linux_read_loadmap,
7159 linux_process_qsupported,
7160 linux_supports_tracepoints,
7161 linux_read_pc,
7162 linux_write_pc,
7163 linux_thread_stopped,
7164 NULL,
7165 linux_pause_all,
7166 linux_unpause_all,
7167 linux_stabilize_threads,
7168 linux_install_fast_tracepoint_jump_pad,
7169 linux_emit_ops,
7170 linux_supports_disable_randomization,
7171 linux_get_min_fast_tracepoint_insn_len,
7172 linux_qxfer_libraries_svr4,
7173 linux_supports_agent,
7174#ifdef HAVE_LINUX_BTRACE
7175 linux_supports_btrace,
7176 linux_enable_btrace,
7177 linux_low_disable_btrace,
7178 linux_low_read_btrace,
7179 linux_low_btrace_conf,
7180#else
7181 NULL,
7182 NULL,
7183 NULL,
7184 NULL,
7185 NULL,
7186#endif
7187 linux_supports_range_stepping,
7188 linux_proc_pid_to_exec_file,
7189 linux_mntns_open_cloexec,
7190 linux_mntns_unlink,
7191 linux_mntns_readlink,
7192 linux_breakpoint_kind_from_pc,
7193 linux_sw_breakpoint_from_kind,
7194 linux_proc_tid_get_name,
7195 linux_breakpoint_kind_from_current_state,
7196 linux_supports_software_single_step
7197};
7198
7199static void
7200linux_init_signals ()
7201{
7202 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
7203 to find what the cancel signal actually is. */
7204#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
7205 signal (__SIGRTMIN+1, SIG_IGN);
7206#endif
7207}
7208
7209#ifdef HAVE_LINUX_REGSETS
7210void
7211initialize_regsets_info (struct regsets_info *info)
7212{
7213 for (info->num_regsets = 0;
7214 info->regsets[info->num_regsets].size >= 0;
7215 info->num_regsets++)
7216 ;
7217}
7218#endif
7219
7220void
7221initialize_low (void)
7222{
7223 struct sigaction sigchld_action;
7224
7225 memset (&sigchld_action, 0, sizeof (sigchld_action));
7226 set_target_ops (&linux_target_ops);
7227
7228 linux_init_signals ();
7229 linux_ptrace_init_warnings ();
7230
7231 sigchld_action.sa_handler = sigchld_handler;
7232 sigemptyset (&sigchld_action.sa_mask);
7233 sigchld_action.sa_flags = SA_RESTART;
7234 sigaction (SIGCHLD, &sigchld_action, NULL);
7235
7236 initialize_low_arch ();
7237
7238 linux_check_ptrace_features ();
7239}
This page took 0.04593 seconds and 4 git commands to generate.