1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
29 #include <sys/ptrace.h>
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
34 #include <sys/ioctl.h>
37 #include <sys/syscall.h>
41 #include <sys/types.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
56 #include "nat/linux-namespaces.h"
59 #define SPUFS_MAGIC 0x23c9b64e
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
77 /* This is the kernel's hard limit. Not to be confused with
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
116 uint32_t a_type
; /* Entry type */
119 uint32_t a_val
; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
131 uint64_t a_type
; /* Entry type */
134 uint64_t a_val
; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
144 /* See nat/linux-nat.h. */
147 ptid_of_lwp (struct lwp_info
*lwp
)
149 return ptid_of (get_lwp_thread (lwp
));
152 /* See nat/linux-nat.h. */
155 lwp_set_arch_private_info (struct lwp_info
*lwp
,
156 struct arch_lwp_info
*info
)
158 lwp
->arch_private
= info
;
161 /* See nat/linux-nat.h. */
163 struct arch_lwp_info
*
164 lwp_arch_private_info (struct lwp_info
*lwp
)
166 return lwp
->arch_private
;
169 /* See nat/linux-nat.h. */
172 lwp_is_stopped (struct lwp_info
*lwp
)
177 /* See nat/linux-nat.h. */
179 enum target_stop_reason
180 lwp_stop_reason (struct lwp_info
*lwp
)
182 return lwp
->stop_reason
;
185 /* A list of all unknown processes which receive stop signals. Some
186 other process will presumably claim each of these as forked
187 children momentarily. */
189 struct simple_pid_list
191 /* The process ID. */
194 /* The status as reported by waitpid. */
198 struct simple_pid_list
*next
;
200 struct simple_pid_list
*stopped_pids
;
202 /* Trivial list manipulation functions to keep track of a list of new
203 stopped processes. */
206 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
208 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
211 new_pid
->status
= status
;
212 new_pid
->next
= *listp
;
217 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
219 struct simple_pid_list
**p
;
221 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
222 if ((*p
)->pid
== pid
)
224 struct simple_pid_list
*next
= (*p
)->next
;
226 *statusp
= (*p
)->status
;
234 enum stopping_threads_kind
236 /* Not stopping threads presently. */
237 NOT_STOPPING_THREADS
,
239 /* Stopping threads. */
242 /* Stopping and suspending threads. */
243 STOPPING_AND_SUSPENDING_THREADS
246 /* This is set while stop_all_lwps is in effect. */
247 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
249 /* FIXME make into a target method? */
250 int using_threads
= 1;
252 /* True if we're presently stabilizing threads (moving them out of
254 static int stabilizing_threads
;
256 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
257 int step
, int signal
, siginfo_t
*info
);
258 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
259 static void stop_all_lwps (int suspend
, struct lwp_info
*except
);
260 static void unstop_all_lwps (int unsuspend
, struct lwp_info
*except
);
261 static int linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
262 int *wstat
, int options
);
263 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
264 static struct lwp_info
*add_lwp (ptid_t ptid
);
265 static int linux_stopped_by_watchpoint (void);
266 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
267 static void proceed_all_lwps (void);
268 static int finish_step_over (struct lwp_info
*lwp
);
269 static int kill_lwp (unsigned long lwpid
, int signo
);
271 /* When the event-loop is doing a step-over, this points at the thread
273 ptid_t step_over_bkpt
;
275 /* True if the low target can hardware single-step. Such targets
276 don't need a BREAKPOINT_REINSERT_ADDR callback. */
279 can_hardware_single_step (void)
281 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
284 /* True if the low target supports memory breakpoints. If so, we'll
285 have a GET_PC implementation. */
288 supports_breakpoints (void)
290 return (the_low_target
.get_pc
!= NULL
);
293 /* Returns true if this target can support fast tracepoints. This
294 does not mean that the in-process agent has been loaded in the
298 supports_fast_tracepoints (void)
300 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
303 /* True if LWP is stopped in its stepping range. */
306 lwp_in_step_range (struct lwp_info
*lwp
)
308 CORE_ADDR pc
= lwp
->stop_pc
;
310 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
313 struct pending_signals
317 struct pending_signals
*prev
;
320 /* The read/write ends of the pipe registered as waitable file in the
322 static int linux_event_pipe
[2] = { -1, -1 };
324 /* True if we're currently in async mode. */
325 #define target_is_async_p() (linux_event_pipe[0] != -1)
327 static void send_sigstop (struct lwp_info
*lwp
);
328 static void wait_for_sigstop (void);
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
333 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
335 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
336 && header
->e_ident
[EI_MAG1
] == ELFMAG1
337 && header
->e_ident
[EI_MAG2
] == ELFMAG2
338 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
340 *machine
= header
->e_machine
;
341 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
353 elf_64_file_p (const char *file
, unsigned int *machine
)
358 fd
= open (file
, O_RDONLY
);
362 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
369 return elf_64_header_p (&header
, machine
);
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
376 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
380 sprintf (file
, "/proc/%d/exe", pid
);
381 return elf_64_file_p (file
, machine
);
385 delete_lwp (struct lwp_info
*lwp
)
387 struct thread_info
*thr
= get_lwp_thread (lwp
);
390 debug_printf ("deleting %ld\n", lwpid_of (thr
));
393 free (lwp
->arch_private
);
397 /* Add a process to the common process list, and set its private
400 static struct process_info
*
401 linux_add_process (int pid
, int attached
)
403 struct process_info
*proc
;
405 proc
= add_process (pid
, attached
);
406 proc
->priv
= xcalloc (1, sizeof (*proc
->priv
));
408 /* Set the arch when the first LWP stops. */
409 proc
->priv
->new_inferior
= 1;
411 if (the_low_target
.new_process
!= NULL
)
412 proc
->priv
->arch_private
= the_low_target
.new_process ();
417 static CORE_ADDR
get_pc (struct lwp_info
*lwp
);
419 /* Handle a GNU/Linux extended wait response. If we see a clone
420 event, we need to add the new LWP to our list (and return 0 so as
421 not to report the trap to higher layers). */
424 handle_extended_wait (struct lwp_info
*event_lwp
, int wstat
)
426 int event
= linux_ptrace_get_extended_event (wstat
);
427 struct thread_info
*event_thr
= get_lwp_thread (event_lwp
);
428 struct lwp_info
*new_lwp
;
430 if ((event
== PTRACE_EVENT_FORK
) || (event
== PTRACE_EVENT_VFORK
)
431 || (event
== PTRACE_EVENT_CLONE
))
434 unsigned long new_pid
;
437 /* Get the pid of the new lwp. */
438 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
441 /* If we haven't already seen the new PID stop, wait for it now. */
442 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
444 /* The new child has a pending SIGSTOP. We can't affect it until it
445 hits the SIGSTOP, but we're already attached. */
447 ret
= my_waitpid (new_pid
, &status
, __WALL
);
450 perror_with_name ("waiting for new child");
451 else if (ret
!= new_pid
)
452 warning ("wait returned unexpected PID %d", ret
);
453 else if (!WIFSTOPPED (status
))
454 warning ("wait returned unexpected status 0x%x", status
);
457 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
459 struct process_info
*parent_proc
;
460 struct process_info
*child_proc
;
461 struct lwp_info
*child_lwp
;
462 struct thread_info
*child_thr
;
463 struct target_desc
*tdesc
;
465 ptid
= ptid_build (new_pid
, new_pid
, 0);
469 debug_printf ("HEW: Got fork event from LWP %ld, "
471 ptid_get_lwp (ptid_of (event_thr
)),
472 ptid_get_pid (ptid
));
475 /* Add the new process to the tables and clone the breakpoint
476 lists of the parent. We need to do this even if the new process
477 will be detached, since we will need the process object and the
478 breakpoints to remove any breakpoints from memory when we
479 detach, and the client side will access registers. */
480 child_proc
= linux_add_process (new_pid
, 0);
481 gdb_assert (child_proc
!= NULL
);
482 child_lwp
= add_lwp (ptid
);
483 gdb_assert (child_lwp
!= NULL
);
484 child_lwp
->stopped
= 1;
485 child_lwp
->must_set_ptrace_flags
= 1;
486 child_lwp
->status_pending_p
= 0;
487 child_thr
= get_lwp_thread (child_lwp
);
488 child_thr
->last_resume_kind
= resume_stop
;
489 parent_proc
= get_thread_process (event_thr
);
490 child_proc
->attached
= parent_proc
->attached
;
491 clone_all_breakpoints (&child_proc
->breakpoints
,
492 &child_proc
->raw_breakpoints
,
493 parent_proc
->breakpoints
);
495 tdesc
= xmalloc (sizeof (struct target_desc
));
496 copy_target_description (tdesc
, parent_proc
->tdesc
);
497 child_proc
->tdesc
= tdesc
;
499 /* Clone arch-specific process data. */
500 if (the_low_target
.new_fork
!= NULL
)
501 the_low_target
.new_fork (parent_proc
, child_proc
);
503 /* Save fork info in the parent thread. */
504 if (event
== PTRACE_EVENT_FORK
)
505 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_FORKED
;
506 else if (event
== PTRACE_EVENT_VFORK
)
507 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORKED
;
509 event_lwp
->waitstatus
.value
.related_pid
= ptid
;
511 /* The status_pending field contains bits denoting the
512 extended event, so when the pending event is handled,
513 the handler will look at lwp->waitstatus. */
514 event_lwp
->status_pending_p
= 1;
515 event_lwp
->status_pending
= wstat
;
517 /* Report the event. */
522 debug_printf ("HEW: Got clone event "
523 "from LWP %ld, new child is LWP %ld\n",
524 lwpid_of (event_thr
), new_pid
);
526 ptid
= ptid_build (pid_of (event_thr
), new_pid
, 0);
527 new_lwp
= add_lwp (ptid
);
529 /* Either we're going to immediately resume the new thread
530 or leave it stopped. linux_resume_one_lwp is a nop if it
531 thinks the thread is currently running, so set this first
532 before calling linux_resume_one_lwp. */
533 new_lwp
->stopped
= 1;
535 /* If we're suspending all threads, leave this one suspended
537 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
)
538 new_lwp
->suspended
= 1;
540 /* Normally we will get the pending SIGSTOP. But in some cases
541 we might get another signal delivered to the group first.
542 If we do get another signal, be sure not to lose it. */
543 if (WSTOPSIG (status
) != SIGSTOP
)
545 new_lwp
->stop_expected
= 1;
546 new_lwp
->status_pending_p
= 1;
547 new_lwp
->status_pending
= status
;
550 /* Don't report the event. */
553 else if (event
== PTRACE_EVENT_VFORK_DONE
)
555 event_lwp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
557 /* Report the event. */
561 internal_error (__FILE__
, __LINE__
, _("unknown ptrace event %d"), event
);
564 /* Return the PC as read from the regcache of LWP, without any
568 get_pc (struct lwp_info
*lwp
)
570 struct thread_info
*saved_thread
;
571 struct regcache
*regcache
;
574 if (the_low_target
.get_pc
== NULL
)
577 saved_thread
= current_thread
;
578 current_thread
= get_lwp_thread (lwp
);
580 regcache
= get_thread_regcache (current_thread
, 1);
581 pc
= (*the_low_target
.get_pc
) (regcache
);
584 debug_printf ("pc is 0x%lx\n", (long) pc
);
586 current_thread
= saved_thread
;
590 /* This function should only be called if LWP got a SIGTRAP.
591 The SIGTRAP could mean several things.
593 On i386, where decr_pc_after_break is non-zero:
595 If we were single-stepping this process using PTRACE_SINGLESTEP, we
596 will get only the one SIGTRAP. The value of $eip will be the next
597 instruction. If the instruction we stepped over was a breakpoint,
598 we need to decrement the PC.
600 If we continue the process using PTRACE_CONT, we will get a
601 SIGTRAP when we hit a breakpoint. The value of $eip will be
602 the instruction after the breakpoint (i.e. needs to be
603 decremented). If we report the SIGTRAP to GDB, we must also
604 report the undecremented PC. If the breakpoint is removed, we
605 must resume at the decremented PC.
607 On a non-decr_pc_after_break machine with hardware or kernel
610 If we either single-step a breakpoint instruction, or continue and
611 hit a breakpoint instruction, our PC will point at the breakpoint
615 check_stopped_by_breakpoint (struct lwp_info
*lwp
)
618 CORE_ADDR sw_breakpoint_pc
;
619 struct thread_info
*saved_thread
;
620 #if USE_SIGTRAP_SIGINFO
624 if (the_low_target
.get_pc
== NULL
)
628 sw_breakpoint_pc
= pc
- the_low_target
.decr_pc_after_break
;
630 /* breakpoint_at reads from the current thread. */
631 saved_thread
= current_thread
;
632 current_thread
= get_lwp_thread (lwp
);
634 #if USE_SIGTRAP_SIGINFO
635 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
636 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
638 if (siginfo
.si_signo
== SIGTRAP
)
640 if (siginfo
.si_code
== GDB_ARCH_TRAP_BRKPT
)
644 struct thread_info
*thr
= get_lwp_thread (lwp
);
646 debug_printf ("CSBB: %s stopped by software breakpoint\n",
647 target_pid_to_str (ptid_of (thr
)));
650 /* Back up the PC if necessary. */
651 if (pc
!= sw_breakpoint_pc
)
653 struct regcache
*regcache
654 = get_thread_regcache (current_thread
, 1);
655 (*the_low_target
.set_pc
) (regcache
, sw_breakpoint_pc
);
658 lwp
->stop_pc
= sw_breakpoint_pc
;
659 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
660 current_thread
= saved_thread
;
663 else if (siginfo
.si_code
== TRAP_HWBKPT
)
667 struct thread_info
*thr
= get_lwp_thread (lwp
);
669 debug_printf ("CSBB: %s stopped by hardware "
670 "breakpoint/watchpoint\n",
671 target_pid_to_str (ptid_of (thr
)));
675 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
676 current_thread
= saved_thread
;
679 else if (siginfo
.si_code
== TRAP_TRACE
)
683 struct thread_info
*thr
= get_lwp_thread (lwp
);
685 debug_printf ("CSBB: %s stopped by trace\n",
686 target_pid_to_str (ptid_of (thr
)));
692 /* We may have just stepped a breakpoint instruction. E.g., in
693 non-stop mode, GDB first tells the thread A to step a range, and
694 then the user inserts a breakpoint inside the range. In that
695 case we need to report the breakpoint PC. */
696 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
697 && (*the_low_target
.breakpoint_at
) (sw_breakpoint_pc
))
701 struct thread_info
*thr
= get_lwp_thread (lwp
);
703 debug_printf ("CSBB: %s stopped by software breakpoint\n",
704 target_pid_to_str (ptid_of (thr
)));
707 /* Back up the PC if necessary. */
708 if (pc
!= sw_breakpoint_pc
)
710 struct regcache
*regcache
711 = get_thread_regcache (current_thread
, 1);
712 (*the_low_target
.set_pc
) (regcache
, sw_breakpoint_pc
);
715 lwp
->stop_pc
= sw_breakpoint_pc
;
716 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
717 current_thread
= saved_thread
;
721 if (hardware_breakpoint_inserted_here (pc
))
725 struct thread_info
*thr
= get_lwp_thread (lwp
);
727 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
728 target_pid_to_str (ptid_of (thr
)));
732 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
733 current_thread
= saved_thread
;
738 current_thread
= saved_thread
;
742 static struct lwp_info
*
743 add_lwp (ptid_t ptid
)
745 struct lwp_info
*lwp
;
747 lwp
= (struct lwp_info
*) xmalloc (sizeof (*lwp
));
748 memset (lwp
, 0, sizeof (*lwp
));
750 if (the_low_target
.new_thread
!= NULL
)
751 the_low_target
.new_thread (lwp
);
753 lwp
->thread
= add_thread (ptid
, lwp
);
758 /* Start an inferior process and returns its pid.
759 ALLARGS is a vector of program-name and args. */
762 linux_create_inferior (char *program
, char **allargs
)
764 struct lwp_info
*new_lwp
;
767 struct cleanup
*restore_personality
768 = maybe_disable_address_space_randomization (disable_randomization
);
770 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
776 perror_with_name ("fork");
781 ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
783 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
784 signal (__SIGRTMIN
+ 1, SIG_DFL
);
789 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
790 stdout to stderr so that inferior i/o doesn't corrupt the connection.
791 Also, redirect stdin to /dev/null. */
792 if (remote_connection_is_stdio ())
795 open ("/dev/null", O_RDONLY
);
797 if (write (2, "stdin/stdout redirected\n",
798 sizeof ("stdin/stdout redirected\n") - 1) < 0)
800 /* Errors ignored. */;
804 execv (program
, allargs
);
806 execvp (program
, allargs
);
808 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
814 do_cleanups (restore_personality
);
816 linux_add_process (pid
, 0);
818 ptid
= ptid_build (pid
, pid
, 0);
819 new_lwp
= add_lwp (ptid
);
820 new_lwp
->must_set_ptrace_flags
= 1;
825 /* Attach to an inferior process. Returns 0 on success, ERRNO on
829 linux_attach_lwp (ptid_t ptid
)
831 struct lwp_info
*new_lwp
;
832 int lwpid
= ptid_get_lwp (ptid
);
834 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
838 new_lwp
= add_lwp (ptid
);
840 /* We need to wait for SIGSTOP before being able to make the next
841 ptrace call on this LWP. */
842 new_lwp
->must_set_ptrace_flags
= 1;
844 if (linux_proc_pid_is_stopped (lwpid
))
847 debug_printf ("Attached to a stopped process\n");
849 /* The process is definitely stopped. It is in a job control
850 stop, unless the kernel predates the TASK_STOPPED /
851 TASK_TRACED distinction, in which case it might be in a
852 ptrace stop. Make sure it is in a ptrace stop; from there we
853 can kill it, signal it, et cetera.
855 First make sure there is a pending SIGSTOP. Since we are
856 already attached, the process can not transition from stopped
857 to running without a PTRACE_CONT; so we know this signal will
858 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
859 probably already in the queue (unless this kernel is old
860 enough to use TASK_STOPPED for ptrace stops); but since
861 SIGSTOP is not an RT signal, it can only be queued once. */
862 kill_lwp (lwpid
, SIGSTOP
);
864 /* Finally, resume the stopped process. This will deliver the
865 SIGSTOP (or a higher priority signal, just like normal
866 PTRACE_ATTACH), which we'll catch later on. */
867 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
870 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
873 There are several cases to consider here:
875 1) gdbserver has already attached to the process and is being notified
876 of a new thread that is being created.
877 In this case we should ignore that SIGSTOP and resume the
878 process. This is handled below by setting stop_expected = 1,
879 and the fact that add_thread sets last_resume_kind ==
882 2) This is the first thread (the process thread), and we're attaching
883 to it via attach_inferior.
884 In this case we want the process thread to stop.
885 This is handled by having linux_attach set last_resume_kind ==
886 resume_stop after we return.
888 If the pid we are attaching to is also the tgid, we attach to and
889 stop all the existing threads. Otherwise, we attach to pid and
890 ignore any other threads in the same group as this pid.
892 3) GDB is connecting to gdbserver and is requesting an enumeration of all
894 In this case we want the thread to stop.
895 FIXME: This case is currently not properly handled.
896 We should wait for the SIGSTOP but don't. Things work apparently
897 because enough time passes between when we ptrace (ATTACH) and when
898 gdb makes the next ptrace call on the thread.
900 On the other hand, if we are currently trying to stop all threads, we
901 should treat the new thread as if we had sent it a SIGSTOP. This works
902 because we are guaranteed that the add_lwp call above added us to the
903 end of the list, and so the new thread has not yet reached
904 wait_for_sigstop (but will). */
905 new_lwp
->stop_expected
= 1;
910 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
911 already attached. Returns true if a new LWP is found, false
915 attach_proc_task_lwp_callback (ptid_t ptid
)
917 /* Is this a new thread? */
918 if (find_thread_ptid (ptid
) == NULL
)
920 int lwpid
= ptid_get_lwp (ptid
);
924 debug_printf ("Found new lwp %d\n", lwpid
);
926 err
= linux_attach_lwp (ptid
);
928 /* Be quiet if we simply raced with the thread exiting. EPERM
929 is returned if the thread's task still exists, and is marked
930 as exited or zombie, as well as other conditions, so in that
931 case, confirm the status in /proc/PID/status. */
933 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
937 debug_printf ("Cannot attach to lwp %d: "
938 "thread is gone (%d: %s)\n",
939 lwpid
, err
, strerror (err
));
944 warning (_("Cannot attach to lwp %d: %s"),
946 linux_ptrace_attach_fail_reason_string (ptid
, err
));
954 /* Attach to PID. If PID is the tgid, attach to it and all
958 linux_attach (unsigned long pid
)
960 ptid_t ptid
= ptid_build (pid
, pid
, 0);
963 /* Attach to PID. We will check for other threads
965 err
= linux_attach_lwp (ptid
);
967 error ("Cannot attach to process %ld: %s",
968 pid
, linux_ptrace_attach_fail_reason_string (ptid
, err
));
970 linux_add_process (pid
, 1);
974 struct thread_info
*thread
;
976 /* Don't ignore the initial SIGSTOP if we just attached to this
977 process. It will be collected by wait shortly. */
978 thread
= find_thread_ptid (ptid_build (pid
, pid
, 0));
979 thread
->last_resume_kind
= resume_stop
;
982 /* We must attach to every LWP. If /proc is mounted, use that to
983 find them now. On the one hand, the inferior may be using raw
984 clone instead of using pthreads. On the other hand, even if it
985 is using pthreads, GDB may not be connected yet (thread_db needs
986 to do symbol lookups, through qSymbol). Also, thread_db walks
987 structures in the inferior's address space to find the list of
988 threads/LWPs, and those structures may well be corrupted. Note
989 that once thread_db is loaded, we'll still use it to list threads
990 and associate pthread info with each LWP. */
991 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
1002 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
1004 struct counter
*counter
= args
;
1006 if (ptid_get_pid (entry
->id
) == counter
->pid
)
1008 if (++counter
->count
> 1)
1016 last_thread_of_process_p (int pid
)
1018 struct counter counter
= { pid
, 0 };
1020 return (find_inferior (&all_threads
,
1021 second_thread_of_pid_p
, &counter
) == NULL
);
1027 linux_kill_one_lwp (struct lwp_info
*lwp
)
1029 struct thread_info
*thr
= get_lwp_thread (lwp
);
1030 int pid
= lwpid_of (thr
);
1032 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1033 there is no signal context, and ptrace(PTRACE_KILL) (or
1034 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1035 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1036 alternative is to kill with SIGKILL. We only need one SIGKILL
1037 per process, not one for each thread. But since we still support
1038 linuxthreads, and we also support debugging programs using raw
1039 clone without CLONE_THREAD, we send one for each thread. For
1040 years, we used PTRACE_KILL only, so we're being a bit paranoid
1041 about some old kernels where PTRACE_KILL might work better
1042 (dubious if there are any such, but that's why it's paranoia), so
1043 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1047 kill_lwp (pid
, SIGKILL
);
1050 int save_errno
= errno
;
1052 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1053 target_pid_to_str (ptid_of (thr
)),
1054 save_errno
? strerror (save_errno
) : "OK");
1058 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
1061 int save_errno
= errno
;
1063 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1064 target_pid_to_str (ptid_of (thr
)),
1065 save_errno
? strerror (save_errno
) : "OK");
1069 /* Kill LWP and wait for it to die. */
1072 kill_wait_lwp (struct lwp_info
*lwp
)
1074 struct thread_info
*thr
= get_lwp_thread (lwp
);
1075 int pid
= ptid_get_pid (ptid_of (thr
));
1076 int lwpid
= ptid_get_lwp (ptid_of (thr
));
1081 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
1085 linux_kill_one_lwp (lwp
);
1087 /* Make sure it died. Notes:
1089 - The loop is most likely unnecessary.
1091 - We don't use linux_wait_for_event as that could delete lwps
1092 while we're iterating over them. We're not interested in
1093 any pending status at this point, only in making sure all
1094 wait status on the kernel side are collected until the
1097 - We don't use __WALL here as the __WALL emulation relies on
1098 SIGCHLD, and killing a stopped process doesn't generate
1099 one, nor an exit status.
1101 res
= my_waitpid (lwpid
, &wstat
, 0);
1102 if (res
== -1 && errno
== ECHILD
)
1103 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
1104 } while (res
> 0 && WIFSTOPPED (wstat
));
1106 gdb_assert (res
> 0);
1109 /* Callback for `find_inferior'. Kills an lwp of a given process,
1110 except the leader. */
1113 kill_one_lwp_callback (struct inferior_list_entry
*entry
, void *args
)
1115 struct thread_info
*thread
= (struct thread_info
*) entry
;
1116 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1117 int pid
= * (int *) args
;
1119 if (ptid_get_pid (entry
->id
) != pid
)
1122 /* We avoid killing the first thread here, because of a Linux kernel (at
1123 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1124 the children get a chance to be reaped, it will remain a zombie
1127 if (lwpid_of (thread
) == pid
)
1130 debug_printf ("lkop: is last of process %s\n",
1131 target_pid_to_str (entry
->id
));
1135 kill_wait_lwp (lwp
);
1140 linux_kill (int pid
)
1142 struct process_info
*process
;
1143 struct lwp_info
*lwp
;
1145 process
= find_process_pid (pid
);
1146 if (process
== NULL
)
1149 /* If we're killing a running inferior, make sure it is stopped
1150 first, as PTRACE_KILL will not work otherwise. */
1151 stop_all_lwps (0, NULL
);
1153 find_inferior (&all_threads
, kill_one_lwp_callback
, &pid
);
1155 /* See the comment in linux_kill_one_lwp. We did not kill the first
1156 thread in the list, so do so now. */
1157 lwp
= find_lwp_pid (pid_to_ptid (pid
));
1162 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1166 kill_wait_lwp (lwp
);
1168 the_target
->mourn (process
);
1170 /* Since we presently can only stop all lwps of all processes, we
1171 need to unstop lwps of other processes. */
1172 unstop_all_lwps (0, NULL
);
1176 /* Get pending signal of THREAD, for detaching purposes. This is the
1177 signal the thread last stopped for, which we need to deliver to the
1178 thread when detaching, otherwise, it'd be suppressed/lost. */
1181 get_detach_signal (struct thread_info
*thread
)
1183 enum gdb_signal signo
= GDB_SIGNAL_0
;
1185 struct lwp_info
*lp
= get_thread_lwp (thread
);
1187 if (lp
->status_pending_p
)
1188 status
= lp
->status_pending
;
1191 /* If the thread had been suspended by gdbserver, and it stopped
1192 cleanly, then it'll have stopped with SIGSTOP. But we don't
1193 want to deliver that SIGSTOP. */
1194 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1195 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1198 /* Otherwise, we may need to deliver the signal we
1200 status
= lp
->last_status
;
1203 if (!WIFSTOPPED (status
))
1206 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1207 target_pid_to_str (ptid_of (thread
)));
1211 /* Extended wait statuses aren't real SIGTRAPs. */
1212 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1215 debug_printf ("GPS: lwp %s had stopped with extended "
1216 "status: no pending signal\n",
1217 target_pid_to_str (ptid_of (thread
)));
1221 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1223 if (program_signals_p
&& !program_signals
[signo
])
1226 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1227 target_pid_to_str (ptid_of (thread
)),
1228 gdb_signal_to_string (signo
));
1231 else if (!program_signals_p
1232 /* If we have no way to know which signals GDB does not
1233 want to have passed to the program, assume
1234 SIGTRAP/SIGINT, which is GDB's default. */
1235 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1238 debug_printf ("GPS: lwp %s had signal %s, "
1239 "but we don't know if we should pass it. "
1240 "Default to not.\n",
1241 target_pid_to_str (ptid_of (thread
)),
1242 gdb_signal_to_string (signo
));
1248 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1249 target_pid_to_str (ptid_of (thread
)),
1250 gdb_signal_to_string (signo
));
1252 return WSTOPSIG (status
);
1257 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
1259 struct thread_info
*thread
= (struct thread_info
*) entry
;
1260 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1261 int pid
= * (int *) args
;
1264 if (ptid_get_pid (entry
->id
) != pid
)
1267 /* If there is a pending SIGSTOP, get rid of it. */
1268 if (lwp
->stop_expected
)
1271 debug_printf ("Sending SIGCONT to %s\n",
1272 target_pid_to_str (ptid_of (thread
)));
1274 kill_lwp (lwpid_of (thread
), SIGCONT
);
1275 lwp
->stop_expected
= 0;
1278 /* Flush any pending changes to the process's registers. */
1279 regcache_invalidate_thread (thread
);
1281 /* Pass on any pending signal for this thread. */
1282 sig
= get_detach_signal (thread
);
1284 /* Finally, let it resume. */
1285 if (the_low_target
.prepare_to_resume
!= NULL
)
1286 the_low_target
.prepare_to_resume (lwp
);
1287 if (ptrace (PTRACE_DETACH
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1288 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1289 error (_("Can't detach %s: %s"),
1290 target_pid_to_str (ptid_of (thread
)),
1298 linux_detach (int pid
)
1300 struct process_info
*process
;
1302 process
= find_process_pid (pid
);
1303 if (process
== NULL
)
1306 /* Stop all threads before detaching. First, ptrace requires that
1307 the thread is stopped to sucessfully detach. Second, thread_db
1308 may need to uninstall thread event breakpoints from memory, which
1309 only works with a stopped process anyway. */
1310 stop_all_lwps (0, NULL
);
1312 #ifdef USE_THREAD_DB
1313 thread_db_detach (process
);
1316 /* Stabilize threads (move out of jump pads). */
1317 stabilize_threads ();
1319 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
1321 the_target
->mourn (process
);
1323 /* Since we presently can only stop all lwps of all processes, we
1324 need to unstop lwps of other processes. */
1325 unstop_all_lwps (0, NULL
);
1329 /* Remove all LWPs that belong to process PROC from the lwp list. */
1332 delete_lwp_callback (struct inferior_list_entry
*entry
, void *proc
)
1334 struct thread_info
*thread
= (struct thread_info
*) entry
;
1335 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1336 struct process_info
*process
= proc
;
1338 if (pid_of (thread
) == pid_of (process
))
1345 linux_mourn (struct process_info
*process
)
1347 struct process_info_private
*priv
;
1349 #ifdef USE_THREAD_DB
1350 thread_db_mourn (process
);
1353 find_inferior (&all_threads
, delete_lwp_callback
, process
);
1355 /* Freeing all private data. */
1356 priv
= process
->priv
;
1357 free (priv
->arch_private
);
1359 process
->priv
= NULL
;
1361 remove_process (process
);
1365 linux_join (int pid
)
1370 ret
= my_waitpid (pid
, &status
, 0);
1371 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1373 } while (ret
!= -1 || errno
!= ECHILD
);
1376 /* Return nonzero if the given thread is still alive. */
1378 linux_thread_alive (ptid_t ptid
)
1380 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1382 /* We assume we always know if a thread exits. If a whole process
1383 exited but we still haven't been able to report it to GDB, we'll
1384 hold on to the last lwp of the dead process. */
1391 /* Return 1 if this lwp still has an interesting status pending. If
1392 not (e.g., it had stopped for a breakpoint that is gone), return
1396 thread_still_has_status_pending_p (struct thread_info
*thread
)
1398 struct lwp_info
*lp
= get_thread_lwp (thread
);
1400 if (!lp
->status_pending_p
)
1403 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1404 report any status pending the LWP may have. */
1405 if (thread
->last_resume_kind
== resume_stop
1406 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
1409 if (thread
->last_resume_kind
!= resume_stop
1410 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1411 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1413 struct thread_info
*saved_thread
;
1417 gdb_assert (lp
->last_status
!= 0);
1421 saved_thread
= current_thread
;
1422 current_thread
= thread
;
1424 if (pc
!= lp
->stop_pc
)
1427 debug_printf ("PC of %ld changed\n",
1432 #if !USE_SIGTRAP_SIGINFO
1433 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1434 && !(*the_low_target
.breakpoint_at
) (pc
))
1437 debug_printf ("previous SW breakpoint of %ld gone\n",
1441 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1442 && !hardware_breakpoint_inserted_here (pc
))
1445 debug_printf ("previous HW breakpoint of %ld gone\n",
1451 current_thread
= saved_thread
;
1456 debug_printf ("discarding pending breakpoint status\n");
1457 lp
->status_pending_p
= 0;
1465 /* Return 1 if this lwp has an interesting status pending. */
1467 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
1469 struct thread_info
*thread
= (struct thread_info
*) entry
;
1470 struct lwp_info
*lp
= get_thread_lwp (thread
);
1471 ptid_t ptid
= * (ptid_t
*) arg
;
1473 /* Check if we're only interested in events from a specific process
1474 or a specific LWP. */
1475 if (!ptid_match (ptid_of (thread
), ptid
))
1478 if (lp
->status_pending_p
1479 && !thread_still_has_status_pending_p (thread
))
1481 linux_resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1485 return lp
->status_pending_p
;
1489 same_lwp (struct inferior_list_entry
*entry
, void *data
)
1491 ptid_t ptid
= *(ptid_t
*) data
;
1494 if (ptid_get_lwp (ptid
) != 0)
1495 lwp
= ptid_get_lwp (ptid
);
1497 lwp
= ptid_get_pid (ptid
);
1499 if (ptid_get_lwp (entry
->id
) == lwp
)
1506 find_lwp_pid (ptid_t ptid
)
1508 struct inferior_list_entry
*thread
1509 = find_inferior (&all_threads
, same_lwp
, &ptid
);
1514 return get_thread_lwp ((struct thread_info
*) thread
);
1517 /* Return the number of known LWPs in the tgid given by PID. */
1522 struct inferior_list_entry
*inf
, *tmp
;
1525 ALL_INFERIORS (&all_threads
, inf
, tmp
)
1527 if (ptid_get_pid (inf
->id
) == pid
)
1534 /* The arguments passed to iterate_over_lwps. */
1536 struct iterate_over_lwps_args
1538 /* The FILTER argument passed to iterate_over_lwps. */
1541 /* The CALLBACK argument passed to iterate_over_lwps. */
1542 iterate_over_lwps_ftype
*callback
;
1544 /* The DATA argument passed to iterate_over_lwps. */
1548 /* Callback for find_inferior used by iterate_over_lwps to filter
1549 calls to the callback supplied to that function. Returning a
1550 nonzero value causes find_inferiors to stop iterating and return
1551 the current inferior_list_entry. Returning zero indicates that
1552 find_inferiors should continue iterating. */
1555 iterate_over_lwps_filter (struct inferior_list_entry
*entry
, void *args_p
)
1557 struct iterate_over_lwps_args
*args
1558 = (struct iterate_over_lwps_args
*) args_p
;
1560 if (ptid_match (entry
->id
, args
->filter
))
1562 struct thread_info
*thr
= (struct thread_info
*) entry
;
1563 struct lwp_info
*lwp
= get_thread_lwp (thr
);
1565 return (*args
->callback
) (lwp
, args
->data
);
1571 /* See nat/linux-nat.h. */
1574 iterate_over_lwps (ptid_t filter
,
1575 iterate_over_lwps_ftype callback
,
1578 struct iterate_over_lwps_args args
= {filter
, callback
, data
};
1579 struct inferior_list_entry
*entry
;
1581 entry
= find_inferior (&all_threads
, iterate_over_lwps_filter
, &args
);
1585 return get_thread_lwp ((struct thread_info
*) entry
);
1588 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1589 their exits until all other threads in the group have exited. */
1592 check_zombie_leaders (void)
1594 struct process_info
*proc
, *tmp
;
1596 ALL_PROCESSES (proc
, tmp
)
1598 pid_t leader_pid
= pid_of (proc
);
1599 struct lwp_info
*leader_lp
;
1601 leader_lp
= find_lwp_pid (pid_to_ptid (leader_pid
));
1604 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1605 "num_lwps=%d, zombie=%d\n",
1606 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1607 linux_proc_pid_is_zombie (leader_pid
));
1609 if (leader_lp
!= NULL
1610 /* Check if there are other threads in the group, as we may
1611 have raced with the inferior simply exiting. */
1612 && !last_thread_of_process_p (leader_pid
)
1613 && linux_proc_pid_is_zombie (leader_pid
))
1615 /* A leader zombie can mean one of two things:
1617 - It exited, and there's an exit status pending
1618 available, or only the leader exited (not the whole
1619 program). In the latter case, we can't waitpid the
1620 leader's exit status until all other threads are gone.
1622 - There are 3 or more threads in the group, and a thread
1623 other than the leader exec'd. On an exec, the Linux
1624 kernel destroys all other threads (except the execing
1625 one) in the thread group, and resets the execing thread's
1626 tid to the tgid. No exit notification is sent for the
1627 execing thread -- from the ptracer's perspective, it
1628 appears as though the execing thread just vanishes.
1629 Until we reap all other threads except the leader and the
1630 execing thread, the leader will be zombie, and the
1631 execing thread will be in `D (disc sleep)'. As soon as
1632 all other threads are reaped, the execing thread changes
1633 it's tid to the tgid, and the previous (zombie) leader
1634 vanishes, giving place to the "new" leader. We could try
1635 distinguishing the exit and exec cases, by waiting once
1636 more, and seeing if something comes out, but it doesn't
1637 sound useful. The previous leader _does_ go away, and
1638 we'll re-add the new one once we see the exec event
1639 (which is just the same as what would happen if the
1640 previous leader did exit voluntarily before some other
1645 "CZL: Thread group leader %d zombie "
1646 "(it exited, or another thread execd).\n",
1649 delete_lwp (leader_lp
);
1654 /* Callback for `find_inferior'. Returns the first LWP that is not
1655 stopped. ARG is a PTID filter. */
1658 not_stopped_callback (struct inferior_list_entry
*entry
, void *arg
)
1660 struct thread_info
*thr
= (struct thread_info
*) entry
;
1661 struct lwp_info
*lwp
;
1662 ptid_t filter
= *(ptid_t
*) arg
;
1664 if (!ptid_match (ptid_of (thr
), filter
))
1667 lwp
= get_thread_lwp (thr
);
1674 /* This function should only be called if the LWP got a SIGTRAP.
1676 Handle any tracepoint steps or hits. Return true if a tracepoint
1677 event was handled, 0 otherwise. */
1680 handle_tracepoints (struct lwp_info
*lwp
)
1682 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1683 int tpoint_related_event
= 0;
1685 gdb_assert (lwp
->suspended
== 0);
1687 /* If this tracepoint hit causes a tracing stop, we'll immediately
1688 uninsert tracepoints. To do this, we temporarily pause all
1689 threads, unpatch away, and then unpause threads. We need to make
1690 sure the unpausing doesn't resume LWP too. */
1693 /* And we need to be sure that any all-threads-stopping doesn't try
1694 to move threads out of the jump pads, as it could deadlock the
1695 inferior (LWP could be in the jump pad, maybe even holding the
1698 /* Do any necessary step collect actions. */
1699 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1701 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1703 /* See if we just hit a tracepoint and do its main collect
1705 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1709 gdb_assert (lwp
->suspended
== 0);
1710 gdb_assert (!stabilizing_threads
|| lwp
->collecting_fast_tracepoint
);
1712 if (tpoint_related_event
)
1715 debug_printf ("got a tracepoint event\n");
1722 /* Convenience wrapper. Returns true if LWP is presently collecting a
1726 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
1727 struct fast_tpoint_collect_status
*status
)
1729 CORE_ADDR thread_area
;
1730 struct thread_info
*thread
= get_lwp_thread (lwp
);
1732 if (the_low_target
.get_thread_area
== NULL
)
1735 /* Get the thread area address. This is used to recognize which
1736 thread is which when tracing with the in-process agent library.
1737 We don't read anything from the address, and treat it as opaque;
1738 it's the address itself that we assume is unique per-thread. */
1739 if ((*the_low_target
.get_thread_area
) (lwpid_of (thread
), &thread_area
) == -1)
1742 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1745 /* The reason we resume in the caller, is because we want to be able
1746 to pass lwp->status_pending as WSTAT, and we need to clear
1747 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1748 refuses to resume. */
1751 maybe_move_out_of_jump_pad (struct lwp_info
*lwp
, int *wstat
)
1753 struct thread_info
*saved_thread
;
1755 saved_thread
= current_thread
;
1756 current_thread
= get_lwp_thread (lwp
);
1759 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1760 && supports_fast_tracepoints ()
1761 && agent_loaded_p ())
1763 struct fast_tpoint_collect_status status
;
1767 debug_printf ("Checking whether LWP %ld needs to move out of the "
1769 lwpid_of (current_thread
));
1771 r
= linux_fast_tracepoint_collecting (lwp
, &status
);
1774 || (WSTOPSIG (*wstat
) != SIGILL
1775 && WSTOPSIG (*wstat
) != SIGFPE
1776 && WSTOPSIG (*wstat
) != SIGSEGV
1777 && WSTOPSIG (*wstat
) != SIGBUS
))
1779 lwp
->collecting_fast_tracepoint
= r
;
1783 if (r
== 1 && lwp
->exit_jump_pad_bkpt
== NULL
)
1785 /* Haven't executed the original instruction yet.
1786 Set breakpoint there, and wait till it's hit,
1787 then single-step until exiting the jump pad. */
1788 lwp
->exit_jump_pad_bkpt
1789 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
1793 debug_printf ("Checking whether LWP %ld needs to move out of "
1794 "the jump pad...it does\n",
1795 lwpid_of (current_thread
));
1796 current_thread
= saved_thread
;
1803 /* If we get a synchronous signal while collecting, *and*
1804 while executing the (relocated) original instruction,
1805 reset the PC to point at the tpoint address, before
1806 reporting to GDB. Otherwise, it's an IPA lib bug: just
1807 report the signal to GDB, and pray for the best. */
1809 lwp
->collecting_fast_tracepoint
= 0;
1812 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
1813 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
1816 struct regcache
*regcache
;
1818 /* The si_addr on a few signals references the address
1819 of the faulting instruction. Adjust that as
1821 if ((WSTOPSIG (*wstat
) == SIGILL
1822 || WSTOPSIG (*wstat
) == SIGFPE
1823 || WSTOPSIG (*wstat
) == SIGBUS
1824 || WSTOPSIG (*wstat
) == SIGSEGV
)
1825 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
1826 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
1827 /* Final check just to make sure we don't clobber
1828 the siginfo of non-kernel-sent signals. */
1829 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
1831 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
1832 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
1833 (PTRACE_TYPE_ARG3
) 0, &info
);
1836 regcache
= get_thread_regcache (current_thread
, 1);
1837 (*the_low_target
.set_pc
) (regcache
, status
.tpoint_addr
);
1838 lwp
->stop_pc
= status
.tpoint_addr
;
1840 /* Cancel any fast tracepoint lock this thread was
1842 force_unlock_trace_buffer ();
1845 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
1848 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1849 "stopping all threads momentarily.\n");
1851 stop_all_lwps (1, lwp
);
1853 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
1854 lwp
->exit_jump_pad_bkpt
= NULL
;
1856 unstop_all_lwps (1, lwp
);
1858 gdb_assert (lwp
->suspended
>= 0);
1864 debug_printf ("Checking whether LWP %ld needs to move out of the "
1866 lwpid_of (current_thread
));
1868 current_thread
= saved_thread
;
1872 /* Enqueue one signal in the "signals to report later when out of the
1876 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1878 struct pending_signals
*p_sig
;
1879 struct thread_info
*thread
= get_lwp_thread (lwp
);
1882 debug_printf ("Deferring signal %d for LWP %ld.\n",
1883 WSTOPSIG (*wstat
), lwpid_of (thread
));
1887 struct pending_signals
*sig
;
1889 for (sig
= lwp
->pending_signals_to_report
;
1892 debug_printf (" Already queued %d\n",
1895 debug_printf (" (no more currently queued signals)\n");
1898 /* Don't enqueue non-RT signals if they are already in the deferred
1899 queue. (SIGSTOP being the easiest signal to see ending up here
1901 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
1903 struct pending_signals
*sig
;
1905 for (sig
= lwp
->pending_signals_to_report
;
1909 if (sig
->signal
== WSTOPSIG (*wstat
))
1912 debug_printf ("Not requeuing already queued non-RT signal %d"
1921 p_sig
= xmalloc (sizeof (*p_sig
));
1922 p_sig
->prev
= lwp
->pending_signals_to_report
;
1923 p_sig
->signal
= WSTOPSIG (*wstat
);
1924 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
1925 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1928 lwp
->pending_signals_to_report
= p_sig
;
1931 /* Dequeue one signal from the "signals to report later when out of
1932 the jump pad" list. */
1935 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1937 struct thread_info
*thread
= get_lwp_thread (lwp
);
1939 if (lwp
->pending_signals_to_report
!= NULL
)
1941 struct pending_signals
**p_sig
;
1943 p_sig
= &lwp
->pending_signals_to_report
;
1944 while ((*p_sig
)->prev
!= NULL
)
1945 p_sig
= &(*p_sig
)->prev
;
1947 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
1948 if ((*p_sig
)->info
.si_signo
!= 0)
1949 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1955 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1956 WSTOPSIG (*wstat
), lwpid_of (thread
));
1960 struct pending_signals
*sig
;
1962 for (sig
= lwp
->pending_signals_to_report
;
1965 debug_printf (" Still queued %d\n",
1968 debug_printf (" (no more queued signals)\n");
1977 /* Fetch the possibly triggered data watchpoint info and store it in
1980 On some archs, like x86, that use debug registers to set
1981 watchpoints, it's possible that the way to know which watched
1982 address trapped, is to check the register that is used to select
1983 which address to watch. Problem is, between setting the watchpoint
1984 and reading back which data address trapped, the user may change
1985 the set of watchpoints, and, as a consequence, GDB changes the
1986 debug registers in the inferior. To avoid reading back a stale
1987 stopped-data-address when that happens, we cache in LP the fact
1988 that a watchpoint trapped, and the corresponding data address, as
1989 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1990 registers meanwhile, we have the cached data we can rely on. */
1993 check_stopped_by_watchpoint (struct lwp_info
*child
)
1995 if (the_low_target
.stopped_by_watchpoint
!= NULL
)
1997 struct thread_info
*saved_thread
;
1999 saved_thread
= current_thread
;
2000 current_thread
= get_lwp_thread (child
);
2002 if (the_low_target
.stopped_by_watchpoint ())
2004 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
2006 if (the_low_target
.stopped_data_address
!= NULL
)
2007 child
->stopped_data_address
2008 = the_low_target
.stopped_data_address ();
2010 child
->stopped_data_address
= 0;
2013 current_thread
= saved_thread
;
2016 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
2019 /* Return the ptrace options that we want to try to enable. */
2022 linux_low_ptrace_options (int attached
)
2027 options
|= PTRACE_O_EXITKILL
;
2029 if (report_fork_events
)
2030 options
|= PTRACE_O_TRACEFORK
;
2032 if (report_vfork_events
)
2033 options
|= (PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEVFORKDONE
);
2038 /* Do low-level handling of the event, and check if we should go on
2039 and pass it to caller code. Return the affected lwp if we are, or
2042 static struct lwp_info
*
2043 linux_low_filter_event (int lwpid
, int wstat
)
2045 struct lwp_info
*child
;
2046 struct thread_info
*thread
;
2047 int have_stop_pc
= 0;
2049 child
= find_lwp_pid (pid_to_ptid (lwpid
));
2051 /* If we didn't find a process, one of two things presumably happened:
2052 - A process we started and then detached from has exited. Ignore it.
2053 - A process we are controlling has forked and the new child's stop
2054 was reported to us by the kernel. Save its PID. */
2055 if (child
== NULL
&& WIFSTOPPED (wstat
))
2057 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
2060 else if (child
== NULL
)
2063 thread
= get_lwp_thread (child
);
2067 child
->last_status
= wstat
;
2069 /* Check if the thread has exited. */
2070 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
2073 debug_printf ("LLFE: %d exited.\n", lwpid
);
2074 if (num_lwps (pid_of (thread
)) > 1)
2077 /* If there is at least one more LWP, then the exit signal was
2078 not the end of the debugged application and should be
2085 /* This was the last lwp in the process. Since events are
2086 serialized to GDB core, and we can't report this one
2087 right now, but GDB core and the other target layers will
2088 want to be notified about the exit code/signal, leave the
2089 status pending for the next time we're able to report
2091 mark_lwp_dead (child
, wstat
);
2096 gdb_assert (WIFSTOPPED (wstat
));
2098 if (WIFSTOPPED (wstat
))
2100 struct process_info
*proc
;
2102 /* Architecture-specific setup after inferior is running. This
2103 needs to happen after we have attached to the inferior and it
2104 is stopped for the first time, but before we access any
2105 inferior registers. */
2106 proc
= find_process_pid (pid_of (thread
));
2107 if (proc
->priv
->new_inferior
)
2109 struct thread_info
*saved_thread
;
2111 saved_thread
= current_thread
;
2112 current_thread
= thread
;
2114 the_low_target
.arch_setup ();
2116 current_thread
= saved_thread
;
2118 proc
->priv
->new_inferior
= 0;
2122 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
2124 struct process_info
*proc
= find_process_pid (pid_of (thread
));
2125 int options
= linux_low_ptrace_options (proc
->attached
);
2127 linux_enable_event_reporting (lwpid
, options
);
2128 child
->must_set_ptrace_flags
= 0;
2131 /* Be careful to not overwrite stop_pc until
2132 check_stopped_by_breakpoint is called. */
2133 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2134 && linux_is_extended_waitstatus (wstat
))
2136 child
->stop_pc
= get_pc (child
);
2137 if (handle_extended_wait (child
, wstat
))
2139 /* The event has been handled, so just return without
2145 /* Check first whether this was a SW/HW breakpoint before checking
2146 watchpoints, because at least s390 can't tell the data address of
2147 hardware watchpoint hits, and returns stopped-by-watchpoint as
2148 long as there's a watchpoint set. */
2149 if (WIFSTOPPED (wstat
) && linux_wstatus_maybe_breakpoint (wstat
))
2151 if (check_stopped_by_breakpoint (child
))
2155 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2156 or hardware watchpoint. Check which is which if we got
2157 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2158 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2159 && (child
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
2160 || child
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
2161 check_stopped_by_watchpoint (child
);
2164 child
->stop_pc
= get_pc (child
);
2166 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2167 && child
->stop_expected
)
2170 debug_printf ("Expected stop.\n");
2171 child
->stop_expected
= 0;
2173 if (thread
->last_resume_kind
== resume_stop
)
2175 /* We want to report the stop to the core. Treat the
2176 SIGSTOP as a normal event. */
2178 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2179 target_pid_to_str (ptid_of (thread
)));
2181 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2183 /* Stopping threads. We don't want this SIGSTOP to end up
2186 debug_printf ("LLW: SIGSTOP caught for %s "
2187 "while stopping threads.\n",
2188 target_pid_to_str (ptid_of (thread
)));
2193 /* This is a delayed SIGSTOP. Filter out the event. */
2195 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2196 child
->stepping
? "step" : "continue",
2197 target_pid_to_str (ptid_of (thread
)));
2199 linux_resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2204 child
->status_pending_p
= 1;
2205 child
->status_pending
= wstat
;
2209 /* Resume LWPs that are currently stopped without any pending status
2210 to report, but are resumed from the core's perspective. */
2213 resume_stopped_resumed_lwps (struct inferior_list_entry
*entry
)
2215 struct thread_info
*thread
= (struct thread_info
*) entry
;
2216 struct lwp_info
*lp
= get_thread_lwp (thread
);
2219 && !lp
->status_pending_p
2220 && thread
->last_resume_kind
!= resume_stop
2221 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
2223 int step
= thread
->last_resume_kind
== resume_step
;
2226 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2227 target_pid_to_str (ptid_of (thread
)),
2228 paddress (lp
->stop_pc
),
2231 linux_resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2235 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2236 match FILTER_PTID (leaving others pending). The PTIDs can be:
2237 minus_one_ptid, to specify any child; a pid PTID, specifying all
2238 lwps of a thread group; or a PTID representing a single lwp. Store
2239 the stop status through the status pointer WSTAT. OPTIONS is
2240 passed to the waitpid call. Return 0 if no event was found and
2241 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2242 was found. Return the PID of the stopped child otherwise. */
2245 linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
2246 int *wstatp
, int options
)
2248 struct thread_info
*event_thread
;
2249 struct lwp_info
*event_child
, *requested_child
;
2250 sigset_t block_mask
, prev_mask
;
2253 /* N.B. event_thread points to the thread_info struct that contains
2254 event_child. Keep them in sync. */
2255 event_thread
= NULL
;
2257 requested_child
= NULL
;
2259 /* Check for a lwp with a pending status. */
2261 if (ptid_equal (filter_ptid
, minus_one_ptid
) || ptid_is_pid (filter_ptid
))
2263 event_thread
= (struct thread_info
*)
2264 find_inferior (&all_threads
, status_pending_p_callback
, &filter_ptid
);
2265 if (event_thread
!= NULL
)
2266 event_child
= get_thread_lwp (event_thread
);
2267 if (debug_threads
&& event_thread
)
2268 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2270 else if (!ptid_equal (filter_ptid
, null_ptid
))
2272 requested_child
= find_lwp_pid (filter_ptid
);
2274 if (stopping_threads
== NOT_STOPPING_THREADS
2275 && requested_child
->status_pending_p
2276 && requested_child
->collecting_fast_tracepoint
)
2278 enqueue_one_deferred_signal (requested_child
,
2279 &requested_child
->status_pending
);
2280 requested_child
->status_pending_p
= 0;
2281 requested_child
->status_pending
= 0;
2282 linux_resume_one_lwp (requested_child
, 0, 0, NULL
);
2285 if (requested_child
->suspended
2286 && requested_child
->status_pending_p
)
2288 internal_error (__FILE__
, __LINE__
,
2289 "requesting an event out of a"
2290 " suspended child?");
2293 if (requested_child
->status_pending_p
)
2295 event_child
= requested_child
;
2296 event_thread
= get_lwp_thread (event_child
);
2300 if (event_child
!= NULL
)
2303 debug_printf ("Got an event from pending child %ld (%04x)\n",
2304 lwpid_of (event_thread
), event_child
->status_pending
);
2305 *wstatp
= event_child
->status_pending
;
2306 event_child
->status_pending_p
= 0;
2307 event_child
->status_pending
= 0;
2308 current_thread
= event_thread
;
2309 return lwpid_of (event_thread
);
2312 /* But if we don't find a pending event, we'll have to wait.
2314 We only enter this loop if no process has a pending wait status.
2315 Thus any action taken in response to a wait status inside this
2316 loop is responding as soon as we detect the status, not after any
2319 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2320 all signals while here. */
2321 sigfillset (&block_mask
);
2322 sigprocmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2324 /* Always pull all events out of the kernel. We'll randomly select
2325 an event LWP out of all that have events, to prevent
2327 while (event_child
== NULL
)
2331 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2334 - If the thread group leader exits while other threads in the
2335 thread group still exist, waitpid(TGID, ...) hangs. That
2336 waitpid won't return an exit status until the other threads
2337 in the group are reaped.
2339 - When a non-leader thread execs, that thread just vanishes
2340 without reporting an exit (so we'd hang if we waited for it
2341 explicitly in that case). The exec event is reported to
2342 the TGID pid (although we don't currently enable exec
2345 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2348 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2349 ret
, errno
? strerror (errno
) : "ERRNO-OK");
2355 debug_printf ("LLW: waitpid %ld received %s\n",
2356 (long) ret
, status_to_str (*wstatp
));
2359 /* Filter all events. IOW, leave all events pending. We'll
2360 randomly select an event LWP out of all that have events
2362 linux_low_filter_event (ret
, *wstatp
);
2363 /* Retry until nothing comes out of waitpid. A single
2364 SIGCHLD can indicate more than one child stopped. */
2368 /* Now that we've pulled all events out of the kernel, resume
2369 LWPs that don't have an interesting event to report. */
2370 if (stopping_threads
== NOT_STOPPING_THREADS
)
2371 for_each_inferior (&all_threads
, resume_stopped_resumed_lwps
);
2373 /* ... and find an LWP with a status to report to the core, if
2375 event_thread
= (struct thread_info
*)
2376 find_inferior (&all_threads
, status_pending_p_callback
, &filter_ptid
);
2377 if (event_thread
!= NULL
)
2379 event_child
= get_thread_lwp (event_thread
);
2380 *wstatp
= event_child
->status_pending
;
2381 event_child
->status_pending_p
= 0;
2382 event_child
->status_pending
= 0;
2386 /* Check for zombie thread group leaders. Those can't be reaped
2387 until all other threads in the thread group are. */
2388 check_zombie_leaders ();
2390 /* If there are no resumed children left in the set of LWPs we
2391 want to wait for, bail. We can't just block in
2392 waitpid/sigsuspend, because lwps might have been left stopped
2393 in trace-stop state, and we'd be stuck forever waiting for
2394 their status to change (which would only happen if we resumed
2395 them). Even if WNOHANG is set, this return code is preferred
2396 over 0 (below), as it is more detailed. */
2397 if ((find_inferior (&all_threads
,
2398 not_stopped_callback
,
2399 &wait_ptid
) == NULL
))
2402 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2403 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2407 /* No interesting event to report to the caller. */
2408 if ((options
& WNOHANG
))
2411 debug_printf ("WNOHANG set, no event found\n");
2413 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2417 /* Block until we get an event reported with SIGCHLD. */
2419 debug_printf ("sigsuspend'ing\n");
2421 sigsuspend (&prev_mask
);
2422 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2426 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2428 current_thread
= event_thread
;
2430 /* Check for thread exit. */
2431 if (! WIFSTOPPED (*wstatp
))
2433 gdb_assert (last_thread_of_process_p (pid_of (event_thread
)));
2436 debug_printf ("LWP %d is the last lwp of process. "
2437 "Process %ld exiting.\n",
2438 pid_of (event_thread
), lwpid_of (event_thread
));
2439 return lwpid_of (event_thread
);
2442 return lwpid_of (event_thread
);
2445 /* Wait for an event from child(ren) PTID. PTIDs can be:
2446 minus_one_ptid, to specify any child; a pid PTID, specifying all
2447 lwps of a thread group; or a PTID representing a single lwp. Store
2448 the stop status through the status pointer WSTAT. OPTIONS is
2449 passed to the waitpid call. Return 0 if no event was found and
2450 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2451 was found. Return the PID of the stopped child otherwise. */
2454 linux_wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2456 return linux_wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2459 /* Count the LWP's that have had events. */
2462 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
2464 struct thread_info
*thread
= (struct thread_info
*) entry
;
2465 struct lwp_info
*lp
= get_thread_lwp (thread
);
2468 gdb_assert (count
!= NULL
);
2470 /* Count only resumed LWPs that have an event pending. */
2471 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2472 && lp
->status_pending_p
)
2478 /* Select the LWP (if any) that is currently being single-stepped. */
2481 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2483 struct thread_info
*thread
= (struct thread_info
*) entry
;
2484 struct lwp_info
*lp
= get_thread_lwp (thread
);
2486 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2487 && thread
->last_resume_kind
== resume_step
2488 && lp
->status_pending_p
)
2494 /* Select the Nth LWP that has had an event. */
2497 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2499 struct thread_info
*thread
= (struct thread_info
*) entry
;
2500 struct lwp_info
*lp
= get_thread_lwp (thread
);
2501 int *selector
= data
;
2503 gdb_assert (selector
!= NULL
);
2505 /* Select only resumed LWPs that have an event pending. */
2506 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2507 && lp
->status_pending_p
)
2508 if ((*selector
)-- == 0)
2514 /* Select one LWP out of those that have events pending. */
2517 select_event_lwp (struct lwp_info
**orig_lp
)
2520 int random_selector
;
2521 struct thread_info
*event_thread
= NULL
;
2523 /* In all-stop, give preference to the LWP that is being
2524 single-stepped. There will be at most one, and it's the LWP that
2525 the core is most interested in. If we didn't do this, then we'd
2526 have to handle pending step SIGTRAPs somehow in case the core
2527 later continues the previously-stepped thread, otherwise we'd
2528 report the pending SIGTRAP, and the core, not having stepped the
2529 thread, wouldn't understand what the trap was for, and therefore
2530 would report it to the user as a random signal. */
2534 = (struct thread_info
*) find_inferior (&all_threads
,
2535 select_singlestep_lwp_callback
,
2537 if (event_thread
!= NULL
)
2540 debug_printf ("SEL: Select single-step %s\n",
2541 target_pid_to_str (ptid_of (event_thread
)));
2544 if (event_thread
== NULL
)
2546 /* No single-stepping LWP. Select one at random, out of those
2547 which have had events. */
2549 /* First see how many events we have. */
2550 find_inferior (&all_threads
, count_events_callback
, &num_events
);
2551 gdb_assert (num_events
> 0);
2553 /* Now randomly pick a LWP out of those that have had
2555 random_selector
= (int)
2556 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2558 if (debug_threads
&& num_events
> 1)
2559 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2560 num_events
, random_selector
);
2563 = (struct thread_info
*) find_inferior (&all_threads
,
2564 select_event_lwp_callback
,
2568 if (event_thread
!= NULL
)
2570 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2572 /* Switch the event LWP. */
2573 *orig_lp
= event_lp
;
2577 /* Decrement the suspend count of an LWP. */
2580 unsuspend_one_lwp (struct inferior_list_entry
*entry
, void *except
)
2582 struct thread_info
*thread
= (struct thread_info
*) entry
;
2583 struct lwp_info
*lwp
= get_thread_lwp (thread
);
2585 /* Ignore EXCEPT. */
2591 gdb_assert (lwp
->suspended
>= 0);
2595 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2599 unsuspend_all_lwps (struct lwp_info
*except
)
2601 find_inferior (&all_threads
, unsuspend_one_lwp
, except
);
2604 static void move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
);
2605 static int stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
,
2607 static int lwp_running (struct inferior_list_entry
*entry
, void *data
);
2608 static ptid_t
linux_wait_1 (ptid_t ptid
,
2609 struct target_waitstatus
*ourstatus
,
2610 int target_options
);
2612 /* Stabilize threads (move out of jump pads).
2614 If a thread is midway collecting a fast tracepoint, we need to
2615 finish the collection and move it out of the jump pad before
2616 reporting the signal.
2618 This avoids recursion while collecting (when a signal arrives
2619 midway, and the signal handler itself collects), which would trash
2620 the trace buffer. In case the user set a breakpoint in a signal
2621 handler, this avoids the backtrace showing the jump pad, etc..
2622 Most importantly, there are certain things we can't do safely if
2623 threads are stopped in a jump pad (or in its callee's). For
2626 - starting a new trace run. A thread still collecting the
2627 previous run, could trash the trace buffer when resumed. The trace
2628 buffer control structures would have been reset but the thread had
2629 no way to tell. The thread could even midway memcpy'ing to the
2630 buffer, which would mean that when resumed, it would clobber the
2631 trace buffer that had been set for a new run.
2633 - we can't rewrite/reuse the jump pads for new tracepoints
2634 safely. Say you do tstart while a thread is stopped midway while
2635 collecting. When the thread is later resumed, it finishes the
2636 collection, and returns to the jump pad, to execute the original
2637 instruction that was under the tracepoint jump at the time the
2638 older run had been started. If the jump pad had been rewritten
2639 since for something else in the new run, the thread would now
2640 execute the wrong / random instructions. */
2643 linux_stabilize_threads (void)
2645 struct thread_info
*saved_thread
;
2646 struct thread_info
*thread_stuck
;
2649 = (struct thread_info
*) find_inferior (&all_threads
,
2650 stuck_in_jump_pad_callback
,
2652 if (thread_stuck
!= NULL
)
2655 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2656 lwpid_of (thread_stuck
));
2660 saved_thread
= current_thread
;
2662 stabilizing_threads
= 1;
2665 for_each_inferior (&all_threads
, move_out_of_jump_pad_callback
);
2667 /* Loop until all are stopped out of the jump pads. */
2668 while (find_inferior (&all_threads
, lwp_running
, NULL
) != NULL
)
2670 struct target_waitstatus ourstatus
;
2671 struct lwp_info
*lwp
;
2674 /* Note that we go through the full wait even loop. While
2675 moving threads out of jump pad, we need to be able to step
2676 over internal breakpoints and such. */
2677 linux_wait_1 (minus_one_ptid
, &ourstatus
, 0);
2679 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2681 lwp
= get_thread_lwp (current_thread
);
2686 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2687 || current_thread
->last_resume_kind
== resume_stop
)
2689 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2690 enqueue_one_deferred_signal (lwp
, &wstat
);
2695 find_inferior (&all_threads
, unsuspend_one_lwp
, NULL
);
2697 stabilizing_threads
= 0;
2699 current_thread
= saved_thread
;
2704 = (struct thread_info
*) find_inferior (&all_threads
,
2705 stuck_in_jump_pad_callback
,
2707 if (thread_stuck
!= NULL
)
2708 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2709 lwpid_of (thread_stuck
));
2713 static void async_file_mark (void);
2715 /* Convenience function that is called when the kernel reports an
2716 event that is not passed out to GDB. */
2719 ignore_event (struct target_waitstatus
*ourstatus
)
2721 /* If we got an event, there may still be others, as a single
2722 SIGCHLD can indicate more than one child stopped. This forces
2723 another target_wait call. */
2726 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2730 /* Return non-zero if WAITSTATUS reflects an extended linux
2731 event. Otherwise, return zero. */
2734 extended_event_reported (const struct target_waitstatus
*waitstatus
)
2736 if (waitstatus
== NULL
)
2739 return (waitstatus
->kind
== TARGET_WAITKIND_FORKED
2740 || waitstatus
->kind
== TARGET_WAITKIND_VFORKED
2741 || waitstatus
->kind
== TARGET_WAITKIND_VFORK_DONE
);
2744 /* Wait for process, returns status. */
2747 linux_wait_1 (ptid_t ptid
,
2748 struct target_waitstatus
*ourstatus
, int target_options
)
2751 struct lwp_info
*event_child
;
2754 int step_over_finished
;
2755 int bp_explains_trap
;
2756 int maybe_internal_trap
;
2764 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid
));
2767 /* Translate generic target options into linux options. */
2769 if (target_options
& TARGET_WNOHANG
)
2772 bp_explains_trap
= 0;
2775 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2777 if (ptid_equal (step_over_bkpt
, null_ptid
))
2778 pid
= linux_wait_for_event (ptid
, &w
, options
);
2782 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2783 target_pid_to_str (step_over_bkpt
));
2784 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
2789 gdb_assert (target_options
& TARGET_WNOHANG
);
2793 debug_printf ("linux_wait_1 ret = null_ptid, "
2794 "TARGET_WAITKIND_IGNORE\n");
2798 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2805 debug_printf ("linux_wait_1 ret = null_ptid, "
2806 "TARGET_WAITKIND_NO_RESUMED\n");
2810 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
2814 event_child
= get_thread_lwp (current_thread
);
2816 /* linux_wait_for_event only returns an exit status for the last
2817 child of a process. Report it. */
2818 if (WIFEXITED (w
) || WIFSIGNALED (w
))
2822 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
2823 ourstatus
->value
.integer
= WEXITSTATUS (w
);
2827 debug_printf ("linux_wait_1 ret = %s, exited with "
2829 target_pid_to_str (ptid_of (current_thread
)),
2836 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
2837 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
2841 debug_printf ("linux_wait_1 ret = %s, terminated with "
2843 target_pid_to_str (ptid_of (current_thread
)),
2849 return ptid_of (current_thread
);
2852 /* If step-over executes a breakpoint instruction, it means a
2853 gdb/gdbserver breakpoint had been planted on top of a permanent
2854 breakpoint. The PC has been adjusted by
2855 check_stopped_by_breakpoint to point at the breakpoint address.
2856 Advance the PC manually past the breakpoint, otherwise the
2857 program would keep trapping the permanent breakpoint forever. */
2858 if (!ptid_equal (step_over_bkpt
, null_ptid
)
2859 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
2861 unsigned int increment_pc
= the_low_target
.breakpoint_len
;
2865 debug_printf ("step-over for %s executed software breakpoint\n",
2866 target_pid_to_str (ptid_of (current_thread
)));
2869 if (increment_pc
!= 0)
2871 struct regcache
*regcache
2872 = get_thread_regcache (current_thread
, 1);
2874 event_child
->stop_pc
+= increment_pc
;
2875 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
2877 if (!(*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))
2878 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
2882 /* If this event was not handled before, and is not a SIGTRAP, we
2883 report it. SIGILL and SIGSEGV are also treated as traps in case
2884 a breakpoint is inserted at the current PC. If this target does
2885 not support internal breakpoints at all, we also report the
2886 SIGTRAP without further processing; it's of no concern to us. */
2888 = (supports_breakpoints ()
2889 && (WSTOPSIG (w
) == SIGTRAP
2890 || ((WSTOPSIG (w
) == SIGILL
2891 || WSTOPSIG (w
) == SIGSEGV
)
2892 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
2894 if (maybe_internal_trap
)
2896 /* Handle anything that requires bookkeeping before deciding to
2897 report the event or continue waiting. */
2899 /* First check if we can explain the SIGTRAP with an internal
2900 breakpoint, or if we should possibly report the event to GDB.
2901 Do this before anything that may remove or insert a
2903 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
2905 /* We have a SIGTRAP, possibly a step-over dance has just
2906 finished. If so, tweak the state machine accordingly,
2907 reinsert breakpoints and delete any reinsert (software
2908 single-step) breakpoints. */
2909 step_over_finished
= finish_step_over (event_child
);
2911 /* Now invoke the callbacks of any internal breakpoints there. */
2912 check_breakpoints (event_child
->stop_pc
);
2914 /* Handle tracepoint data collecting. This may overflow the
2915 trace buffer, and cause a tracing stop, removing
2917 trace_event
= handle_tracepoints (event_child
);
2919 if (bp_explains_trap
)
2921 /* If we stepped or ran into an internal breakpoint, we've
2922 already handled it. So next time we resume (from this
2923 PC), we should step over it. */
2925 debug_printf ("Hit a gdbserver breakpoint.\n");
2927 if (breakpoint_here (event_child
->stop_pc
))
2928 event_child
->need_step_over
= 1;
2933 /* We have some other signal, possibly a step-over dance was in
2934 progress, and it should be cancelled too. */
2935 step_over_finished
= finish_step_over (event_child
);
2938 /* We have all the data we need. Either report the event to GDB, or
2939 resume threads and keep waiting for more. */
2941 /* If we're collecting a fast tracepoint, finish the collection and
2942 move out of the jump pad before delivering a signal. See
2943 linux_stabilize_threads. */
2946 && WSTOPSIG (w
) != SIGTRAP
2947 && supports_fast_tracepoints ()
2948 && agent_loaded_p ())
2951 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2952 "to defer or adjust it.\n",
2953 WSTOPSIG (w
), lwpid_of (current_thread
));
2955 /* Allow debugging the jump pad itself. */
2956 if (current_thread
->last_resume_kind
!= resume_step
2957 && maybe_move_out_of_jump_pad (event_child
, &w
))
2959 enqueue_one_deferred_signal (event_child
, &w
);
2962 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2963 WSTOPSIG (w
), lwpid_of (current_thread
));
2965 linux_resume_one_lwp (event_child
, 0, 0, NULL
);
2967 return ignore_event (ourstatus
);
2971 if (event_child
->collecting_fast_tracepoint
)
2974 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2975 "Check if we're already there.\n",
2976 lwpid_of (current_thread
),
2977 event_child
->collecting_fast_tracepoint
);
2981 event_child
->collecting_fast_tracepoint
2982 = linux_fast_tracepoint_collecting (event_child
, NULL
);
2984 if (event_child
->collecting_fast_tracepoint
!= 1)
2986 /* No longer need this breakpoint. */
2987 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
2990 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2991 "stopping all threads momentarily.\n");
2993 /* Other running threads could hit this breakpoint.
2994 We don't handle moribund locations like GDB does,
2995 instead we always pause all threads when removing
2996 breakpoints, so that any step-over or
2997 decr_pc_after_break adjustment is always taken
2998 care of while the breakpoint is still
3000 stop_all_lwps (1, event_child
);
3002 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
3003 event_child
->exit_jump_pad_bkpt
= NULL
;
3005 unstop_all_lwps (1, event_child
);
3007 gdb_assert (event_child
->suspended
>= 0);
3011 if (event_child
->collecting_fast_tracepoint
== 0)
3014 debug_printf ("fast tracepoint finished "
3015 "collecting successfully.\n");
3017 /* We may have a deferred signal to report. */
3018 if (dequeue_one_deferred_signal (event_child
, &w
))
3021 debug_printf ("dequeued one signal.\n");
3026 debug_printf ("no deferred signals.\n");
3028 if (stabilizing_threads
)
3030 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3031 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3035 debug_printf ("linux_wait_1 ret = %s, stopped "
3036 "while stabilizing threads\n",
3037 target_pid_to_str (ptid_of (current_thread
)));
3041 return ptid_of (current_thread
);
3047 /* Check whether GDB would be interested in this event. */
3049 /* If GDB is not interested in this signal, don't stop other
3050 threads, and don't report it to GDB. Just resume the inferior
3051 right away. We do this for threading-related signals as well as
3052 any that GDB specifically requested we ignore. But never ignore
3053 SIGSTOP if we sent it ourselves, and do not ignore signals when
3054 stepping - they may require special handling to skip the signal
3055 handler. Also never ignore signals that could be caused by a
3057 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3060 && current_thread
->last_resume_kind
!= resume_step
3062 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3063 (current_process ()->priv
->thread_db
!= NULL
3064 && (WSTOPSIG (w
) == __SIGRTMIN
3065 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
3068 (pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
3069 && !(WSTOPSIG (w
) == SIGSTOP
3070 && current_thread
->last_resume_kind
== resume_stop
)
3071 && !linux_wstatus_maybe_breakpoint (w
))))
3073 siginfo_t info
, *info_p
;
3076 debug_printf ("Ignored signal %d for LWP %ld.\n",
3077 WSTOPSIG (w
), lwpid_of (current_thread
));
3079 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
3080 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
3084 linux_resume_one_lwp (event_child
, event_child
->stepping
,
3085 WSTOPSIG (w
), info_p
);
3086 return ignore_event (ourstatus
);
3089 /* Note that all addresses are always "out of the step range" when
3090 there's no range to begin with. */
3091 in_step_range
= lwp_in_step_range (event_child
);
3093 /* If GDB wanted this thread to single step, and the thread is out
3094 of the step range, we always want to report the SIGTRAP, and let
3095 GDB handle it. Watchpoints should always be reported. So should
3096 signals we can't explain. A SIGTRAP we can't explain could be a
3097 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3098 do, we're be able to handle GDB breakpoints on top of internal
3099 breakpoints, by handling the internal breakpoint and still
3100 reporting the event to GDB. If we don't, we're out of luck, GDB
3101 won't see the breakpoint hit. */
3102 report_to_gdb
= (!maybe_internal_trap
3103 || (current_thread
->last_resume_kind
== resume_step
3105 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3106 || (!step_over_finished
&& !in_step_range
3107 && !bp_explains_trap
&& !trace_event
)
3108 || (gdb_breakpoint_here (event_child
->stop_pc
)
3109 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
3110 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
))
3111 || extended_event_reported (&event_child
->waitstatus
));
3113 run_breakpoint_commands (event_child
->stop_pc
);
3115 /* We found no reason GDB would want us to stop. We either hit one
3116 of our own breakpoints, or finished an internal step GDB
3117 shouldn't know about. */
3122 if (bp_explains_trap
)
3123 debug_printf ("Hit a gdbserver breakpoint.\n");
3124 if (step_over_finished
)
3125 debug_printf ("Step-over finished.\n");
3127 debug_printf ("Tracepoint event.\n");
3128 if (lwp_in_step_range (event_child
))
3129 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3130 paddress (event_child
->stop_pc
),
3131 paddress (event_child
->step_range_start
),
3132 paddress (event_child
->step_range_end
));
3133 if (extended_event_reported (&event_child
->waitstatus
))
3135 char *str
= target_waitstatus_to_string (ourstatus
);
3136 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3137 lwpid_of (get_lwp_thread (event_child
)), str
);
3142 /* We're not reporting this breakpoint to GDB, so apply the
3143 decr_pc_after_break adjustment to the inferior's regcache
3146 if (the_low_target
.set_pc
!= NULL
)
3148 struct regcache
*regcache
3149 = get_thread_regcache (current_thread
, 1);
3150 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
3153 /* We may have finished stepping over a breakpoint. If so,
3154 we've stopped and suspended all LWPs momentarily except the
3155 stepping one. This is where we resume them all again. We're
3156 going to keep waiting, so use proceed, which handles stepping
3157 over the next breakpoint. */
3159 debug_printf ("proceeding all threads.\n");
3161 if (step_over_finished
)
3162 unsuspend_all_lwps (event_child
);
3164 proceed_all_lwps ();
3165 return ignore_event (ourstatus
);
3170 if (current_thread
->last_resume_kind
== resume_step
)
3172 if (event_child
->step_range_start
== event_child
->step_range_end
)
3173 debug_printf ("GDB wanted to single-step, reporting event.\n");
3174 else if (!lwp_in_step_range (event_child
))
3175 debug_printf ("Out of step range, reporting event.\n");
3177 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3178 debug_printf ("Stopped by watchpoint.\n");
3179 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3180 debug_printf ("Stopped by GDB breakpoint.\n");
3182 debug_printf ("Hit a non-gdbserver trap event.\n");
3185 /* Alright, we're going to report a stop. */
3187 if (!stabilizing_threads
)
3189 /* In all-stop, stop all threads. */
3191 stop_all_lwps (0, NULL
);
3193 /* If we're not waiting for a specific LWP, choose an event LWP
3194 from among those that have had events. Giving equal priority
3195 to all LWPs that have had events helps prevent
3197 if (ptid_equal (ptid
, minus_one_ptid
))
3199 event_child
->status_pending_p
= 1;
3200 event_child
->status_pending
= w
;
3202 select_event_lwp (&event_child
);
3204 /* current_thread and event_child must stay in sync. */
3205 current_thread
= get_lwp_thread (event_child
);
3207 event_child
->status_pending_p
= 0;
3208 w
= event_child
->status_pending
;
3211 if (step_over_finished
)
3215 /* If we were doing a step-over, all other threads but
3216 the stepping one had been paused in start_step_over,
3217 with their suspend counts incremented. We don't want
3218 to do a full unstop/unpause, because we're in
3219 all-stop mode (so we want threads stopped), but we
3220 still need to unsuspend the other threads, to
3221 decrement their `suspended' count back. */
3222 unsuspend_all_lwps (event_child
);
3226 /* If we just finished a step-over, then all threads had
3227 been momentarily paused. In all-stop, that's fine,
3228 we want threads stopped by now anyway. In non-stop,
3229 we need to re-resume threads that GDB wanted to be
3231 unstop_all_lwps (1, event_child
);
3235 /* Stabilize threads (move out of jump pads). */
3237 stabilize_threads ();
3241 /* If we just finished a step-over, then all threads had been
3242 momentarily paused. In all-stop, that's fine, we want
3243 threads stopped by now anyway. In non-stop, we need to
3244 re-resume threads that GDB wanted to be running. */
3245 if (step_over_finished
)
3246 unstop_all_lwps (1, event_child
);
3249 if (extended_event_reported (&event_child
->waitstatus
))
3251 /* If the reported event is a fork, vfork or exec, let GDB know. */
3252 ourstatus
->kind
= event_child
->waitstatus
.kind
;
3253 ourstatus
->value
= event_child
->waitstatus
.value
;
3255 /* Clear the event lwp's waitstatus since we handled it already. */
3256 event_child
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3259 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3261 /* Now that we've selected our final event LWP, un-adjust its PC if
3262 it was a software breakpoint, and the client doesn't know we can
3263 adjust the breakpoint ourselves. */
3264 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3265 && !swbreak_feature
)
3267 int decr_pc
= the_low_target
.decr_pc_after_break
;
3271 struct regcache
*regcache
3272 = get_thread_regcache (current_thread
, 1);
3273 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
+ decr_pc
);
3277 if (current_thread
->last_resume_kind
== resume_stop
3278 && WSTOPSIG (w
) == SIGSTOP
)
3280 /* A thread that has been requested to stop by GDB with vCont;t,
3281 and it stopped cleanly, so report as SIG0. The use of
3282 SIGSTOP is an implementation detail. */
3283 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3285 else if (current_thread
->last_resume_kind
== resume_stop
3286 && WSTOPSIG (w
) != SIGSTOP
)
3288 /* A thread that has been requested to stop by GDB with vCont;t,
3289 but, it stopped for other reasons. */
3290 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3292 else if (ourstatus
->kind
== TARGET_WAITKIND_STOPPED
)
3294 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3297 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
3301 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3302 target_pid_to_str (ptid_of (current_thread
)),
3303 ourstatus
->kind
, ourstatus
->value
.sig
);
3307 return ptid_of (current_thread
);
3310 /* Get rid of any pending event in the pipe. */
3312 async_file_flush (void)
3318 ret
= read (linux_event_pipe
[0], &buf
, 1);
3319 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3322 /* Put something in the pipe, so the event loop wakes up. */
3324 async_file_mark (void)
3328 async_file_flush ();
3331 ret
= write (linux_event_pipe
[1], "+", 1);
3332 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3334 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3335 be awakened anyway. */
3339 linux_wait (ptid_t ptid
,
3340 struct target_waitstatus
*ourstatus
, int target_options
)
3344 /* Flush the async file first. */
3345 if (target_is_async_p ())
3346 async_file_flush ();
3350 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
3352 while ((target_options
& TARGET_WNOHANG
) == 0
3353 && ptid_equal (event_ptid
, null_ptid
)
3354 && ourstatus
->kind
== TARGET_WAITKIND_IGNORE
);
3356 /* If at least one stop was reported, there may be more. A single
3357 SIGCHLD can signal more than one child stop. */
3358 if (target_is_async_p ()
3359 && (target_options
& TARGET_WNOHANG
) != 0
3360 && !ptid_equal (event_ptid
, null_ptid
))
3366 /* Send a signal to an LWP. */
3369 kill_lwp (unsigned long lwpid
, int signo
)
3371 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3372 fails, then we are not using nptl threads and we should be using kill. */
3376 static int tkill_failed
;
3383 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3384 if (errno
!= ENOSYS
)
3391 return kill (lwpid
, signo
);
3395 linux_stop_lwp (struct lwp_info
*lwp
)
3401 send_sigstop (struct lwp_info
*lwp
)
3405 pid
= lwpid_of (get_lwp_thread (lwp
));
3407 /* If we already have a pending stop signal for this process, don't
3409 if (lwp
->stop_expected
)
3412 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3418 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3420 lwp
->stop_expected
= 1;
3421 kill_lwp (pid
, SIGSTOP
);
3425 send_sigstop_callback (struct inferior_list_entry
*entry
, void *except
)
3427 struct thread_info
*thread
= (struct thread_info
*) entry
;
3428 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3430 /* Ignore EXCEPT. */
3441 /* Increment the suspend count of an LWP, and stop it, if not stopped
3444 suspend_and_send_sigstop_callback (struct inferior_list_entry
*entry
,
3447 struct thread_info
*thread
= (struct thread_info
*) entry
;
3448 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3450 /* Ignore EXCEPT. */
3456 return send_sigstop_callback (entry
, except
);
3460 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3462 /* It's dead, really. */
3465 /* Store the exit status for later. */
3466 lwp
->status_pending_p
= 1;
3467 lwp
->status_pending
= wstat
;
3469 /* Prevent trying to stop it. */
3472 /* No further stops are expected from a dead lwp. */
3473 lwp
->stop_expected
= 0;
3476 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3479 wait_for_sigstop (void)
3481 struct thread_info
*saved_thread
;
3486 saved_thread
= current_thread
;
3487 if (saved_thread
!= NULL
)
3488 saved_tid
= saved_thread
->entry
.id
;
3490 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3493 debug_printf ("wait_for_sigstop: pulling events\n");
3495 /* Passing NULL_PTID as filter indicates we want all events to be
3496 left pending. Eventually this returns when there are no
3497 unwaited-for children left. */
3498 ret
= linux_wait_for_event_filtered (minus_one_ptid
, null_ptid
,
3500 gdb_assert (ret
== -1);
3502 if (saved_thread
== NULL
|| linux_thread_alive (saved_tid
))
3503 current_thread
= saved_thread
;
3507 debug_printf ("Previously current thread died.\n");
3511 /* We can't change the current inferior behind GDB's back,
3512 otherwise, a subsequent command may apply to the wrong
3514 current_thread
= NULL
;
3518 /* Set a valid thread as current. */
3519 set_desired_thread (0);
3524 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3525 move it out, because we need to report the stop event to GDB. For
3526 example, if the user puts a breakpoint in the jump pad, it's
3527 because she wants to debug it. */
3530 stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
, void *data
)
3532 struct thread_info
*thread
= (struct thread_info
*) entry
;
3533 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3535 gdb_assert (lwp
->suspended
== 0);
3536 gdb_assert (lwp
->stopped
);
3538 /* Allow debugging the jump pad, gdb_collect, etc.. */
3539 return (supports_fast_tracepoints ()
3540 && agent_loaded_p ()
3541 && (gdb_breakpoint_here (lwp
->stop_pc
)
3542 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3543 || thread
->last_resume_kind
== resume_step
)
3544 && linux_fast_tracepoint_collecting (lwp
, NULL
));
3548 move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
)
3550 struct thread_info
*thread
= (struct thread_info
*) entry
;
3551 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3554 gdb_assert (lwp
->suspended
== 0);
3555 gdb_assert (lwp
->stopped
);
3557 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3559 /* Allow debugging the jump pad, gdb_collect, etc. */
3560 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3561 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
3562 && thread
->last_resume_kind
!= resume_step
3563 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3566 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3571 lwp
->status_pending_p
= 0;
3572 enqueue_one_deferred_signal (lwp
, wstat
);
3575 debug_printf ("Signal %d for LWP %ld deferred "
3577 WSTOPSIG (*wstat
), lwpid_of (thread
));
3580 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
3587 lwp_running (struct inferior_list_entry
*entry
, void *data
)
3589 struct thread_info
*thread
= (struct thread_info
*) entry
;
3590 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3599 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3600 If SUSPEND, then also increase the suspend count of every LWP,
3604 stop_all_lwps (int suspend
, struct lwp_info
*except
)
3606 /* Should not be called recursively. */
3607 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
3612 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3613 suspend
? "stop-and-suspend" : "stop",
3615 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
3619 stopping_threads
= (suspend
3620 ? STOPPING_AND_SUSPENDING_THREADS
3621 : STOPPING_THREADS
);
3624 find_inferior (&all_threads
, suspend_and_send_sigstop_callback
, except
);
3626 find_inferior (&all_threads
, send_sigstop_callback
, except
);
3627 wait_for_sigstop ();
3628 stopping_threads
= NOT_STOPPING_THREADS
;
3632 debug_printf ("stop_all_lwps done, setting stopping_threads "
3633 "back to !stopping\n");
3638 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3639 SIGNAL is nonzero, give it that signal. */
3642 linux_resume_one_lwp_throw (struct lwp_info
*lwp
,
3643 int step
, int signal
, siginfo_t
*info
)
3645 struct thread_info
*thread
= get_lwp_thread (lwp
);
3646 struct thread_info
*saved_thread
;
3647 int fast_tp_collecting
;
3649 if (lwp
->stopped
== 0)
3652 fast_tp_collecting
= lwp
->collecting_fast_tracepoint
;
3654 gdb_assert (!stabilizing_threads
|| fast_tp_collecting
);
3656 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3657 user used the "jump" command, or "set $pc = foo"). */
3658 if (lwp
->stop_pc
!= get_pc (lwp
))
3660 /* Collecting 'while-stepping' actions doesn't make sense
3662 release_while_stepping_state_list (thread
);
3665 /* If we have pending signals or status, and a new signal, enqueue the
3666 signal. Also enqueue the signal if we are waiting to reinsert a
3667 breakpoint; it will be picked up again below. */
3669 && (lwp
->status_pending_p
3670 || lwp
->pending_signals
!= NULL
3671 || lwp
->bp_reinsert
!= 0
3672 || fast_tp_collecting
))
3674 struct pending_signals
*p_sig
;
3675 p_sig
= xmalloc (sizeof (*p_sig
));
3676 p_sig
->prev
= lwp
->pending_signals
;
3677 p_sig
->signal
= signal
;
3679 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3681 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
3682 lwp
->pending_signals
= p_sig
;
3685 if (lwp
->status_pending_p
)
3688 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3689 " has pending status\n",
3690 lwpid_of (thread
), step
? "step" : "continue", signal
,
3691 lwp
->stop_expected
? "expected" : "not expected");
3695 saved_thread
= current_thread
;
3696 current_thread
= thread
;
3699 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3700 lwpid_of (thread
), step
? "step" : "continue", signal
,
3701 lwp
->stop_expected
? "expected" : "not expected");
3703 /* This bit needs some thinking about. If we get a signal that
3704 we must report while a single-step reinsert is still pending,
3705 we often end up resuming the thread. It might be better to
3706 (ew) allow a stack of pending events; then we could be sure that
3707 the reinsert happened right away and not lose any signals.
3709 Making this stack would also shrink the window in which breakpoints are
3710 uninserted (see comment in linux_wait_for_lwp) but not enough for
3711 complete correctness, so it won't solve that problem. It may be
3712 worthwhile just to solve this one, however. */
3713 if (lwp
->bp_reinsert
!= 0)
3716 debug_printf (" pending reinsert at 0x%s\n",
3717 paddress (lwp
->bp_reinsert
));
3719 if (can_hardware_single_step ())
3721 if (fast_tp_collecting
== 0)
3724 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
3726 fprintf (stderr
, "BAD - reinserting and suspended(%d).\n",
3733 /* Postpone any pending signal. It was enqueued above. */
3737 if (fast_tp_collecting
== 1)
3740 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3741 " (exit-jump-pad-bkpt)\n",
3744 /* Postpone any pending signal. It was enqueued above. */
3747 else if (fast_tp_collecting
== 2)
3750 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3751 " single-stepping\n",
3754 if (can_hardware_single_step ())
3758 internal_error (__FILE__
, __LINE__
,
3759 "moving out of jump pad single-stepping"
3760 " not implemented on this target");
3763 /* Postpone any pending signal. It was enqueued above. */
3767 /* If we have while-stepping actions in this thread set it stepping.
3768 If we have a signal to deliver, it may or may not be set to
3769 SIG_IGN, we don't know. Assume so, and allow collecting
3770 while-stepping into a signal handler. A possible smart thing to
3771 do would be to set an internal breakpoint at the signal return
3772 address, continue, and carry on catching this while-stepping
3773 action only when that breakpoint is hit. A future
3775 if (thread
->while_stepping
!= NULL
3776 && can_hardware_single_step ())
3779 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3784 if (the_low_target
.get_pc
!= NULL
)
3786 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
3788 lwp
->stop_pc
= (*the_low_target
.get_pc
) (regcache
);
3792 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
3793 (long) lwp
->stop_pc
);
3797 /* If we have pending signals, consume one unless we are trying to
3798 reinsert a breakpoint or we're trying to finish a fast tracepoint
3800 if (lwp
->pending_signals
!= NULL
3801 && lwp
->bp_reinsert
== 0
3802 && fast_tp_collecting
== 0)
3804 struct pending_signals
**p_sig
;
3806 p_sig
= &lwp
->pending_signals
;
3807 while ((*p_sig
)->prev
!= NULL
)
3808 p_sig
= &(*p_sig
)->prev
;
3810 signal
= (*p_sig
)->signal
;
3811 if ((*p_sig
)->info
.si_signo
!= 0)
3812 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
3819 if (the_low_target
.prepare_to_resume
!= NULL
)
3820 the_low_target
.prepare_to_resume (lwp
);
3822 regcache_invalidate_thread (thread
);
3824 lwp
->stepping
= step
;
3825 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (thread
),
3826 (PTRACE_TYPE_ARG3
) 0,
3827 /* Coerce to a uintptr_t first to avoid potential gcc warning
3828 of coercing an 8 byte integer to a 4 byte pointer. */
3829 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
3831 current_thread
= saved_thread
;
3833 perror_with_name ("resuming thread");
3835 /* Successfully resumed. Clear state that no longer makes sense,
3836 and mark the LWP as running. Must not do this before resuming
3837 otherwise if that fails other code will be confused. E.g., we'd
3838 later try to stop the LWP and hang forever waiting for a stop
3839 status. Note that we must not throw after this is cleared,
3840 otherwise handle_zombie_lwp_error would get confused. */
3842 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3845 /* Called when we try to resume a stopped LWP and that errors out. If
3846 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3847 or about to become), discard the error, clear any pending status
3848 the LWP may have, and return true (we'll collect the exit status
3849 soon enough). Otherwise, return false. */
3852 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
3854 struct thread_info
*thread
= get_lwp_thread (lp
);
3856 /* If we get an error after resuming the LWP successfully, we'd
3857 confuse !T state for the LWP being gone. */
3858 gdb_assert (lp
->stopped
);
3860 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3861 because even if ptrace failed with ESRCH, the tracee may be "not
3862 yet fully dead", but already refusing ptrace requests. In that
3863 case the tracee has 'R (Running)' state for a little bit
3864 (observed in Linux 3.18). See also the note on ESRCH in the
3865 ptrace(2) man page. Instead, check whether the LWP has any state
3866 other than ptrace-stopped. */
3868 /* Don't assume anything if /proc/PID/status can't be read. */
3869 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
3871 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3872 lp
->status_pending_p
= 0;
3878 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3879 disappears while we try to resume it. */
3882 linux_resume_one_lwp (struct lwp_info
*lwp
,
3883 int step
, int signal
, siginfo_t
*info
)
3887 linux_resume_one_lwp_throw (lwp
, step
, signal
, info
);
3889 CATCH (ex
, RETURN_MASK_ERROR
)
3891 if (!check_ptrace_stopped_lwp_gone (lwp
))
3892 throw_exception (ex
);
3897 struct thread_resume_array
3899 struct thread_resume
*resume
;
3903 /* This function is called once per thread via find_inferior.
3904 ARG is a pointer to a thread_resume_array struct.
3905 We look up the thread specified by ENTRY in ARG, and mark the thread
3906 with a pointer to the appropriate resume request.
3908 This algorithm is O(threads * resume elements), but resume elements
3909 is small (and will remain small at least until GDB supports thread
3913 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
3915 struct thread_info
*thread
= (struct thread_info
*) entry
;
3916 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3918 struct thread_resume_array
*r
;
3922 for (ndx
= 0; ndx
< r
->n
; ndx
++)
3924 ptid_t ptid
= r
->resume
[ndx
].thread
;
3925 if (ptid_equal (ptid
, minus_one_ptid
)
3926 || ptid_equal (ptid
, entry
->id
)
3927 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3929 || (ptid_get_pid (ptid
) == pid_of (thread
)
3930 && (ptid_is_pid (ptid
)
3931 || ptid_get_lwp (ptid
) == -1)))
3933 if (r
->resume
[ndx
].kind
== resume_stop
3934 && thread
->last_resume_kind
== resume_stop
)
3937 debug_printf ("already %s LWP %ld at GDB's request\n",
3938 (thread
->last_status
.kind
3939 == TARGET_WAITKIND_STOPPED
)
3947 lwp
->resume
= &r
->resume
[ndx
];
3948 thread
->last_resume_kind
= lwp
->resume
->kind
;
3950 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
3951 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
3953 /* If we had a deferred signal to report, dequeue one now.
3954 This can happen if LWP gets more than one signal while
3955 trying to get out of a jump pad. */
3957 && !lwp
->status_pending_p
3958 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
3960 lwp
->status_pending_p
= 1;
3963 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3964 "leaving status pending.\n",
3965 WSTOPSIG (lwp
->status_pending
),
3973 /* No resume action for this thread. */
3979 /* find_inferior callback for linux_resume.
3980 Set *FLAG_P if this lwp has an interesting status pending. */
3983 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
3985 struct thread_info
*thread
= (struct thread_info
*) entry
;
3986 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3988 /* LWPs which will not be resumed are not interesting, because
3989 we might not wait for them next time through linux_wait. */
3990 if (lwp
->resume
== NULL
)
3993 if (thread_still_has_status_pending_p (thread
))
3994 * (int *) flag_p
= 1;
3999 /* Return 1 if this lwp that GDB wants running is stopped at an
4000 internal breakpoint that we need to step over. It assumes that any
4001 required STOP_PC adjustment has already been propagated to the
4002 inferior's regcache. */
4005 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
4007 struct thread_info
*thread
= (struct thread_info
*) entry
;
4008 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4009 struct thread_info
*saved_thread
;
4012 /* LWPs which will not be resumed are not interesting, because we
4013 might not wait for them next time through linux_wait. */
4018 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4023 if (thread
->last_resume_kind
== resume_stop
)
4026 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4032 gdb_assert (lwp
->suspended
>= 0);
4037 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4042 if (!lwp
->need_step_over
)
4045 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread
));
4048 if (lwp
->status_pending_p
)
4051 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4057 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4061 /* If the PC has changed since we stopped, then don't do anything,
4062 and let the breakpoint/tracepoint be hit. This happens if, for
4063 instance, GDB handled the decr_pc_after_break subtraction itself,
4064 GDB is OOL stepping this thread, or the user has issued a "jump"
4065 command, or poked thread's registers herself. */
4066 if (pc
!= lwp
->stop_pc
)
4069 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4070 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4072 paddress (lwp
->stop_pc
), paddress (pc
));
4074 lwp
->need_step_over
= 0;
4078 saved_thread
= current_thread
;
4079 current_thread
= thread
;
4081 /* We can only step over breakpoints we know about. */
4082 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
4084 /* Don't step over a breakpoint that GDB expects to hit
4085 though. If the condition is being evaluated on the target's side
4086 and it evaluate to false, step over this breakpoint as well. */
4087 if (gdb_breakpoint_here (pc
)
4088 && gdb_condition_true_at_breakpoint (pc
)
4089 && gdb_no_commands_at_breakpoint (pc
))
4092 debug_printf ("Need step over [LWP %ld]? yes, but found"
4093 " GDB breakpoint at 0x%s; skipping step over\n",
4094 lwpid_of (thread
), paddress (pc
));
4096 current_thread
= saved_thread
;
4102 debug_printf ("Need step over [LWP %ld]? yes, "
4103 "found breakpoint at 0x%s\n",
4104 lwpid_of (thread
), paddress (pc
));
4106 /* We've found an lwp that needs stepping over --- return 1 so
4107 that find_inferior stops looking. */
4108 current_thread
= saved_thread
;
4110 /* If the step over is cancelled, this is set again. */
4111 lwp
->need_step_over
= 0;
4116 current_thread
= saved_thread
;
4119 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4121 lwpid_of (thread
), paddress (pc
));
4126 /* Start a step-over operation on LWP. When LWP stopped at a
4127 breakpoint, to make progress, we need to remove the breakpoint out
4128 of the way. If we let other threads run while we do that, they may
4129 pass by the breakpoint location and miss hitting it. To avoid
4130 that, a step-over momentarily stops all threads while LWP is
4131 single-stepped while the breakpoint is temporarily uninserted from
4132 the inferior. When the single-step finishes, we reinsert the
4133 breakpoint, and let all threads that are supposed to be running,
4136 On targets that don't support hardware single-step, we don't
4137 currently support full software single-stepping. Instead, we only
4138 support stepping over the thread event breakpoint, by asking the
4139 low target where to place a reinsert breakpoint. Since this
4140 routine assumes the breakpoint being stepped over is a thread event
4141 breakpoint, it usually assumes the return address of the current
4142 function is a good enough place to set the reinsert breakpoint. */
4145 start_step_over (struct lwp_info
*lwp
)
4147 struct thread_info
*thread
= get_lwp_thread (lwp
);
4148 struct thread_info
*saved_thread
;
4153 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4156 stop_all_lwps (1, lwp
);
4157 gdb_assert (lwp
->suspended
== 0);
4160 debug_printf ("Done stopping all threads for step-over.\n");
4162 /* Note, we should always reach here with an already adjusted PC,
4163 either by GDB (if we're resuming due to GDB's request), or by our
4164 caller, if we just finished handling an internal breakpoint GDB
4165 shouldn't care about. */
4168 saved_thread
= current_thread
;
4169 current_thread
= thread
;
4171 lwp
->bp_reinsert
= pc
;
4172 uninsert_breakpoints_at (pc
);
4173 uninsert_fast_tracepoint_jumps_at (pc
);
4175 if (can_hardware_single_step ())
4181 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
4182 set_reinsert_breakpoint (raddr
);
4186 current_thread
= saved_thread
;
4188 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4190 /* Require next event from this LWP. */
4191 step_over_bkpt
= thread
->entry
.id
;
4195 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4196 start_step_over, if still there, and delete any reinsert
4197 breakpoints we've set, on non hardware single-step targets. */
4200 finish_step_over (struct lwp_info
*lwp
)
4202 if (lwp
->bp_reinsert
!= 0)
4205 debug_printf ("Finished step over.\n");
4207 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4208 may be no breakpoint to reinsert there by now. */
4209 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4210 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4212 lwp
->bp_reinsert
= 0;
4214 /* Delete any software-single-step reinsert breakpoints. No
4215 longer needed. We don't have to worry about other threads
4216 hitting this trap, and later not being able to explain it,
4217 because we were stepping over a breakpoint, and we hold all
4218 threads but LWP stopped while doing that. */
4219 if (!can_hardware_single_step ())
4220 delete_reinsert_breakpoints ();
4222 step_over_bkpt
= null_ptid
;
4229 /* This function is called once per thread. We check the thread's resume
4230 request, which will tell us whether to resume, step, or leave the thread
4231 stopped; and what signal, if any, it should be sent.
4233 For threads which we aren't explicitly told otherwise, we preserve
4234 the stepping flag; this is used for stepping over gdbserver-placed
4237 If pending_flags was set in any thread, we queue any needed
4238 signals, since we won't actually resume. We already have a pending
4239 event to report, so we don't need to preserve any step requests;
4240 they should be re-issued if necessary. */
4243 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
4245 struct thread_info
*thread
= (struct thread_info
*) entry
;
4246 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4248 int leave_all_stopped
= * (int *) arg
;
4251 if (lwp
->resume
== NULL
)
4254 if (lwp
->resume
->kind
== resume_stop
)
4257 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4262 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4264 /* Stop the thread, and wait for the event asynchronously,
4265 through the event loop. */
4271 debug_printf ("already stopped LWP %ld\n",
4274 /* The LWP may have been stopped in an internal event that
4275 was not meant to be notified back to GDB (e.g., gdbserver
4276 breakpoint), so we should be reporting a stop event in
4279 /* If the thread already has a pending SIGSTOP, this is a
4280 no-op. Otherwise, something later will presumably resume
4281 the thread and this will cause it to cancel any pending
4282 operation, due to last_resume_kind == resume_stop. If
4283 the thread already has a pending status to report, we
4284 will still report it the next time we wait - see
4285 status_pending_p_callback. */
4287 /* If we already have a pending signal to report, then
4288 there's no need to queue a SIGSTOP, as this means we're
4289 midway through moving the LWP out of the jumppad, and we
4290 will report the pending signal as soon as that is
4292 if (lwp
->pending_signals_to_report
== NULL
)
4296 /* For stop requests, we're done. */
4298 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4302 /* If this thread which is about to be resumed has a pending status,
4303 then don't resume any threads - we can just report the pending
4304 status. Make sure to queue any signals that would otherwise be
4305 sent. In all-stop mode, we do this decision based on if *any*
4306 thread has a pending status. If there's a thread that needs the
4307 step-over-breakpoint dance, then don't resume any other thread
4308 but that particular one. */
4309 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
4314 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
4316 step
= (lwp
->resume
->kind
== resume_step
);
4317 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
4322 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
4324 /* If we have a new signal, enqueue the signal. */
4325 if (lwp
->resume
->sig
!= 0)
4327 struct pending_signals
*p_sig
;
4328 p_sig
= xmalloc (sizeof (*p_sig
));
4329 p_sig
->prev
= lwp
->pending_signals
;
4330 p_sig
->signal
= lwp
->resume
->sig
;
4331 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
4333 /* If this is the same signal we were previously stopped by,
4334 make sure to queue its siginfo. We can ignore the return
4335 value of ptrace; if it fails, we'll skip
4336 PTRACE_SETSIGINFO. */
4337 if (WIFSTOPPED (lwp
->last_status
)
4338 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
4339 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4342 lwp
->pending_signals
= p_sig
;
4346 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4352 linux_resume (struct thread_resume
*resume_info
, size_t n
)
4354 struct thread_resume_array array
= { resume_info
, n
};
4355 struct thread_info
*need_step_over
= NULL
;
4357 int leave_all_stopped
;
4362 debug_printf ("linux_resume:\n");
4365 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
4367 /* If there is a thread which would otherwise be resumed, which has
4368 a pending status, then don't resume any threads - we can just
4369 report the pending status. Make sure to queue any signals that
4370 would otherwise be sent. In non-stop mode, we'll apply this
4371 logic to each thread individually. We consume all pending events
4372 before considering to start a step-over (in all-stop). */
4375 find_inferior (&all_threads
, resume_status_pending_p
, &any_pending
);
4377 /* If there is a thread which would otherwise be resumed, which is
4378 stopped at a breakpoint that needs stepping over, then don't
4379 resume any threads - have it step over the breakpoint with all
4380 other threads stopped, then resume all threads again. Make sure
4381 to queue any signals that would otherwise be delivered or
4383 if (!any_pending
&& supports_breakpoints ())
4385 = (struct thread_info
*) find_inferior (&all_threads
,
4386 need_step_over_p
, NULL
);
4388 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4392 if (need_step_over
!= NULL
)
4393 debug_printf ("Not resuming all, need step over\n");
4394 else if (any_pending
)
4395 debug_printf ("Not resuming, all-stop and found "
4396 "an LWP with pending status\n");
4398 debug_printf ("Resuming, no pending status or step over needed\n");
4401 /* Even if we're leaving threads stopped, queue all signals we'd
4402 otherwise deliver. */
4403 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
4406 start_step_over (get_thread_lwp (need_step_over
));
4410 debug_printf ("linux_resume done\n");
4415 /* This function is called once per thread. We check the thread's
4416 last resume request, which will tell us whether to resume, step, or
4417 leave the thread stopped. Any signal the client requested to be
4418 delivered has already been enqueued at this point.
4420 If any thread that GDB wants running is stopped at an internal
4421 breakpoint that needs stepping over, we start a step-over operation
4422 on that particular thread, and leave all others stopped. */
4425 proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4427 struct thread_info
*thread
= (struct thread_info
*) entry
;
4428 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4435 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4440 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4444 if (thread
->last_resume_kind
== resume_stop
4445 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
4448 debug_printf (" client wants LWP to remain %ld stopped\n",
4453 if (lwp
->status_pending_p
)
4456 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4461 gdb_assert (lwp
->suspended
>= 0);
4466 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
4470 if (thread
->last_resume_kind
== resume_stop
4471 && lwp
->pending_signals_to_report
== NULL
4472 && lwp
->collecting_fast_tracepoint
== 0)
4474 /* We haven't reported this LWP as stopped yet (otherwise, the
4475 last_status.kind check above would catch it, and we wouldn't
4476 reach here. This LWP may have been momentarily paused by a
4477 stop_all_lwps call while handling for example, another LWP's
4478 step-over. In that case, the pending expected SIGSTOP signal
4479 that was queued at vCont;t handling time will have already
4480 been consumed by wait_for_sigstop, and so we need to requeue
4481 another one here. Note that if the LWP already has a SIGSTOP
4482 pending, this is a no-op. */
4485 debug_printf ("Client wants LWP %ld to stop. "
4486 "Making sure it has a SIGSTOP pending\n",
4492 step
= thread
->last_resume_kind
== resume_step
;
4493 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4498 unsuspend_and_proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4500 struct thread_info
*thread
= (struct thread_info
*) entry
;
4501 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4507 gdb_assert (lwp
->suspended
>= 0);
4509 return proceed_one_lwp (entry
, except
);
4512 /* When we finish a step-over, set threads running again. If there's
4513 another thread that may need a step-over, now's the time to start
4514 it. Eventually, we'll move all threads past their breakpoints. */
4517 proceed_all_lwps (void)
4519 struct thread_info
*need_step_over
;
4521 /* If there is a thread which would otherwise be resumed, which is
4522 stopped at a breakpoint that needs stepping over, then don't
4523 resume any threads - have it step over the breakpoint with all
4524 other threads stopped, then resume all threads again. */
4526 if (supports_breakpoints ())
4529 = (struct thread_info
*) find_inferior (&all_threads
,
4530 need_step_over_p
, NULL
);
4532 if (need_step_over
!= NULL
)
4535 debug_printf ("proceed_all_lwps: found "
4536 "thread %ld needing a step-over\n",
4537 lwpid_of (need_step_over
));
4539 start_step_over (get_thread_lwp (need_step_over
));
4545 debug_printf ("Proceeding, no step-over needed\n");
4547 find_inferior (&all_threads
, proceed_one_lwp
, NULL
);
4550 /* Stopped LWPs that the client wanted to be running, that don't have
4551 pending statuses, are set to run again, except for EXCEPT, if not
4552 NULL. This undoes a stop_all_lwps call. */
4555 unstop_all_lwps (int unsuspend
, struct lwp_info
*except
)
4561 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4562 lwpid_of (get_lwp_thread (except
)));
4564 debug_printf ("unstopping all lwps\n");
4568 find_inferior (&all_threads
, unsuspend_and_proceed_one_lwp
, except
);
4570 find_inferior (&all_threads
, proceed_one_lwp
, except
);
4574 debug_printf ("unstop_all_lwps done\n");
4580 #ifdef HAVE_LINUX_REGSETS
4582 #define use_linux_regsets 1
4584 /* Returns true if REGSET has been disabled. */
4587 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
4589 return (info
->disabled_regsets
!= NULL
4590 && info
->disabled_regsets
[regset
- info
->regsets
]);
4593 /* Disable REGSET. */
4596 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
4600 dr_offset
= regset
- info
->regsets
;
4601 if (info
->disabled_regsets
== NULL
)
4602 info
->disabled_regsets
= xcalloc (1, info
->num_regsets
);
4603 info
->disabled_regsets
[dr_offset
] = 1;
4607 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
4608 struct regcache
*regcache
)
4610 struct regset_info
*regset
;
4611 int saw_general_regs
= 0;
4615 pid
= lwpid_of (current_thread
);
4616 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
4621 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
4624 buf
= xmalloc (regset
->size
);
4626 nt_type
= regset
->nt_type
;
4630 iov
.iov_len
= regset
->size
;
4631 data
= (void *) &iov
;
4637 res
= ptrace (regset
->get_request
, pid
,
4638 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4640 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4646 /* If we get EIO on a regset, do not try it again for
4647 this process mode. */
4648 disable_regset (regsets_info
, regset
);
4650 else if (errno
== ENODATA
)
4652 /* ENODATA may be returned if the regset is currently
4653 not "active". This can happen in normal operation,
4654 so suppress the warning in this case. */
4659 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4666 if (regset
->type
== GENERAL_REGS
)
4667 saw_general_regs
= 1;
4668 regset
->store_function (regcache
, buf
);
4672 if (saw_general_regs
)
4679 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
4680 struct regcache
*regcache
)
4682 struct regset_info
*regset
;
4683 int saw_general_regs
= 0;
4687 pid
= lwpid_of (current_thread
);
4688 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
4693 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
4694 || regset
->fill_function
== NULL
)
4697 buf
= xmalloc (regset
->size
);
4699 /* First fill the buffer with the current register set contents,
4700 in case there are any items in the kernel's regset that are
4701 not in gdbserver's regcache. */
4703 nt_type
= regset
->nt_type
;
4707 iov
.iov_len
= regset
->size
;
4708 data
= (void *) &iov
;
4714 res
= ptrace (regset
->get_request
, pid
,
4715 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4717 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4722 /* Then overlay our cached registers on that. */
4723 regset
->fill_function (regcache
, buf
);
4725 /* Only now do we write the register set. */
4727 res
= ptrace (regset
->set_request
, pid
,
4728 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4730 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
4738 /* If we get EIO on a regset, do not try it again for
4739 this process mode. */
4740 disable_regset (regsets_info
, regset
);
4742 else if (errno
== ESRCH
)
4744 /* At this point, ESRCH should mean the process is
4745 already gone, in which case we simply ignore attempts
4746 to change its registers. See also the related
4747 comment in linux_resume_one_lwp. */
4753 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4756 else if (regset
->type
== GENERAL_REGS
)
4757 saw_general_regs
= 1;
4760 if (saw_general_regs
)
4766 #else /* !HAVE_LINUX_REGSETS */
4768 #define use_linux_regsets 0
4769 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4770 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4774 /* Return 1 if register REGNO is supported by one of the regset ptrace
4775 calls or 0 if it has to be transferred individually. */
4778 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
4780 unsigned char mask
= 1 << (regno
% 8);
4781 size_t index
= regno
/ 8;
4783 return (use_linux_regsets
4784 && (regs_info
->regset_bitmap
== NULL
4785 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
4788 #ifdef HAVE_LINUX_USRREGS
4791 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
4795 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
4796 error ("Invalid register number %d.", regnum
);
4798 addr
= usrregs
->regmap
[regnum
];
4803 /* Fetch one register. */
4805 fetch_register (const struct usrregs_info
*usrregs
,
4806 struct regcache
*regcache
, int regno
)
4813 if (regno
>= usrregs
->num_regs
)
4815 if ((*the_low_target
.cannot_fetch_register
) (regno
))
4818 regaddr
= register_addr (usrregs
, regno
);
4822 size
= ((register_size (regcache
->tdesc
, regno
)
4823 + sizeof (PTRACE_XFER_TYPE
) - 1)
4824 & -sizeof (PTRACE_XFER_TYPE
));
4825 buf
= alloca (size
);
4827 pid
= lwpid_of (current_thread
);
4828 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4831 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
4832 ptrace (PTRACE_PEEKUSER
, pid
,
4833 /* Coerce to a uintptr_t first to avoid potential gcc warning
4834 of coercing an 8 byte integer to a 4 byte pointer. */
4835 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
4836 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4838 error ("reading register %d: %s", regno
, strerror (errno
));
4841 if (the_low_target
.supply_ptrace_register
)
4842 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
4844 supply_register (regcache
, regno
, buf
);
4847 /* Store one register. */
4849 store_register (const struct usrregs_info
*usrregs
,
4850 struct regcache
*regcache
, int regno
)
4857 if (regno
>= usrregs
->num_regs
)
4859 if ((*the_low_target
.cannot_store_register
) (regno
))
4862 regaddr
= register_addr (usrregs
, regno
);
4866 size
= ((register_size (regcache
->tdesc
, regno
)
4867 + sizeof (PTRACE_XFER_TYPE
) - 1)
4868 & -sizeof (PTRACE_XFER_TYPE
));
4869 buf
= alloca (size
);
4870 memset (buf
, 0, size
);
4872 if (the_low_target
.collect_ptrace_register
)
4873 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
4875 collect_register (regcache
, regno
, buf
);
4877 pid
= lwpid_of (current_thread
);
4878 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4881 ptrace (PTRACE_POKEUSER
, pid
,
4882 /* Coerce to a uintptr_t first to avoid potential gcc warning
4883 about coercing an 8 byte integer to a 4 byte pointer. */
4884 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
4885 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
4888 /* At this point, ESRCH should mean the process is
4889 already gone, in which case we simply ignore attempts
4890 to change its registers. See also the related
4891 comment in linux_resume_one_lwp. */
4895 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
4896 error ("writing register %d: %s", regno
, strerror (errno
));
4898 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4902 /* Fetch all registers, or just one, from the child process.
4903 If REGNO is -1, do this for all registers, skipping any that are
4904 assumed to have been retrieved by regsets_fetch_inferior_registers,
4905 unless ALL is non-zero.
4906 Otherwise, REGNO specifies which register (so we can save time). */
4908 usr_fetch_inferior_registers (const struct regs_info
*regs_info
,
4909 struct regcache
*regcache
, int regno
, int all
)
4911 struct usrregs_info
*usr
= regs_info
->usrregs
;
4915 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4916 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4917 fetch_register (usr
, regcache
, regno
);
4920 fetch_register (usr
, regcache
, regno
);
4923 /* Store our register values back into the inferior.
4924 If REGNO is -1, do this for all registers, skipping any that are
4925 assumed to have been saved by regsets_store_inferior_registers,
4926 unless ALL is non-zero.
4927 Otherwise, REGNO specifies which register (so we can save time). */
4929 usr_store_inferior_registers (const struct regs_info
*regs_info
,
4930 struct regcache
*regcache
, int regno
, int all
)
4932 struct usrregs_info
*usr
= regs_info
->usrregs
;
4936 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4937 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4938 store_register (usr
, regcache
, regno
);
4941 store_register (usr
, regcache
, regno
);
4944 #else /* !HAVE_LINUX_USRREGS */
4946 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4947 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4953 linux_fetch_registers (struct regcache
*regcache
, int regno
)
4957 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4961 if (the_low_target
.fetch_register
!= NULL
4962 && regs_info
->usrregs
!= NULL
)
4963 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
4964 (*the_low_target
.fetch_register
) (regcache
, regno
);
4966 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
4967 if (regs_info
->usrregs
!= NULL
)
4968 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
4972 if (the_low_target
.fetch_register
!= NULL
4973 && (*the_low_target
.fetch_register
) (regcache
, regno
))
4976 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
4978 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
4980 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
4981 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
4986 linux_store_registers (struct regcache
*regcache
, int regno
)
4990 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4994 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
4996 if (regs_info
->usrregs
!= NULL
)
4997 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
5001 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
5003 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
5005 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
5006 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
5011 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5012 to debugger memory starting at MYADDR. */
5015 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
5017 int pid
= lwpid_of (current_thread
);
5018 register PTRACE_XFER_TYPE
*buffer
;
5019 register CORE_ADDR addr
;
5026 /* Try using /proc. Don't bother for one word. */
5027 if (len
>= 3 * sizeof (long))
5031 /* We could keep this file open and cache it - possibly one per
5032 thread. That requires some juggling, but is even faster. */
5033 sprintf (filename
, "/proc/%d/mem", pid
);
5034 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
5038 /* If pread64 is available, use it. It's faster if the kernel
5039 supports it (only one syscall), and it's 64-bit safe even on
5040 32-bit platforms (for instance, SPARC debugging a SPARC64
5043 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
5046 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
5047 bytes
= read (fd
, myaddr
, len
);
5054 /* Some data was read, we'll try to get the rest with ptrace. */
5064 /* Round starting address down to longword boundary. */
5065 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5066 /* Round ending address up; get number of longwords that makes. */
5067 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5068 / sizeof (PTRACE_XFER_TYPE
));
5069 /* Allocate buffer of that many longwords. */
5070 buffer
= (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
5072 /* Read all the longwords */
5074 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5076 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5077 about coercing an 8 byte integer to a 4 byte pointer. */
5078 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
5079 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5080 (PTRACE_TYPE_ARG4
) 0);
5086 /* Copy appropriate bytes out of the buffer. */
5089 i
*= sizeof (PTRACE_XFER_TYPE
);
5090 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
5092 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5099 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5100 memory at MEMADDR. On failure (cannot write to the inferior)
5101 returns the value of errno. Always succeeds if LEN is zero. */
5104 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
5107 /* Round starting address down to longword boundary. */
5108 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
5109 /* Round ending address up; get number of longwords that makes. */
5111 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
5112 / sizeof (PTRACE_XFER_TYPE
);
5114 /* Allocate buffer of that many longwords. */
5115 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*)
5116 alloca (count
* sizeof (PTRACE_XFER_TYPE
));
5118 int pid
= lwpid_of (current_thread
);
5122 /* Zero length write always succeeds. */
5128 /* Dump up to four bytes. */
5129 unsigned int val
= * (unsigned int *) myaddr
;
5135 val
= val
& 0xffffff;
5136 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5137 2 * ((len
< 4) ? len
: 4), val
, (long)memaddr
, pid
);
5140 /* Fill start and end extra bytes of buffer with existing memory data. */
5143 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5144 about coercing an 8 byte integer to a 4 byte pointer. */
5145 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
5146 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5147 (PTRACE_TYPE_ARG4
) 0);
5155 = ptrace (PTRACE_PEEKTEXT
, pid
,
5156 /* Coerce to a uintptr_t first to avoid potential gcc warning
5157 about coercing an 8 byte integer to a 4 byte pointer. */
5158 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
5159 * sizeof (PTRACE_XFER_TYPE
)),
5160 (PTRACE_TYPE_ARG4
) 0);
5165 /* Copy data to be written over corresponding part of buffer. */
5167 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
5170 /* Write the entire buffer. */
5172 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
5175 ptrace (PTRACE_POKETEXT
, pid
,
5176 /* Coerce to a uintptr_t first to avoid potential gcc warning
5177 about coercing an 8 byte integer to a 4 byte pointer. */
5178 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5179 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5188 linux_look_up_symbols (void)
5190 #ifdef USE_THREAD_DB
5191 struct process_info
*proc
= current_process ();
5193 if (proc
->priv
->thread_db
!= NULL
)
5196 /* If the kernel supports tracing clones, then we don't need to
5197 use the magic thread event breakpoint to learn about
5199 thread_db_init (!linux_supports_traceclone ());
5204 linux_request_interrupt (void)
5206 extern unsigned long signal_pid
;
5208 /* Send a SIGINT to the process group. This acts just like the user
5209 typed a ^C on the controlling terminal. */
5210 kill (-signal_pid
, SIGINT
);
5213 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5214 to debugger memory starting at MYADDR. */
5217 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
5219 char filename
[PATH_MAX
];
5221 int pid
= lwpid_of (current_thread
);
5223 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5225 fd
= open (filename
, O_RDONLY
);
5229 if (offset
!= (CORE_ADDR
) 0
5230 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5233 n
= read (fd
, myaddr
, len
);
5240 /* These breakpoint and watchpoint related wrapper functions simply
5241 pass on the function call if the target has registered a
5242 corresponding function. */
5245 linux_supports_z_point_type (char z_type
)
5247 return (the_low_target
.supports_z_point_type
!= NULL
5248 && the_low_target
.supports_z_point_type (z_type
));
5252 linux_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5253 int size
, struct raw_breakpoint
*bp
)
5255 if (type
== raw_bkpt_type_sw
)
5256 return insert_memory_breakpoint (bp
);
5257 else if (the_low_target
.insert_point
!= NULL
)
5258 return the_low_target
.insert_point (type
, addr
, size
, bp
);
5260 /* Unsupported (see target.h). */
5265 linux_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5266 int size
, struct raw_breakpoint
*bp
)
5268 if (type
== raw_bkpt_type_sw
)
5269 return remove_memory_breakpoint (bp
);
5270 else if (the_low_target
.remove_point
!= NULL
)
5271 return the_low_target
.remove_point (type
, addr
, size
, bp
);
5273 /* Unsupported (see target.h). */
5277 /* Implement the to_stopped_by_sw_breakpoint target_ops
5281 linux_stopped_by_sw_breakpoint (void)
5283 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5285 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
5288 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5292 linux_supports_stopped_by_sw_breakpoint (void)
5294 return USE_SIGTRAP_SIGINFO
;
5297 /* Implement the to_stopped_by_hw_breakpoint target_ops
5301 linux_stopped_by_hw_breakpoint (void)
5303 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5305 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
5308 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5312 linux_supports_stopped_by_hw_breakpoint (void)
5314 return USE_SIGTRAP_SIGINFO
;
5317 /* Implement the supports_conditional_breakpoints target_ops
5321 linux_supports_conditional_breakpoints (void)
5323 /* GDBserver needs to step over the breakpoint if the condition is
5324 false. GDBserver software single step is too simple, so disable
5325 conditional breakpoints if the target doesn't have hardware single
5327 return can_hardware_single_step ();
5331 linux_stopped_by_watchpoint (void)
5333 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5335 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
5339 linux_stopped_data_address (void)
5341 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5343 return lwp
->stopped_data_address
;
5346 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5347 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5348 && defined(PT_TEXT_END_ADDR)
5350 /* This is only used for targets that define PT_TEXT_ADDR,
5351 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5352 the target has different ways of acquiring this information, like
5355 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5356 to tell gdb about. */
5359 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
5361 unsigned long text
, text_end
, data
;
5362 int pid
= lwpid_of (current_thread
);
5366 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
5367 (PTRACE_TYPE_ARG4
) 0);
5368 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
5369 (PTRACE_TYPE_ARG4
) 0);
5370 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
5371 (PTRACE_TYPE_ARG4
) 0);
5375 /* Both text and data offsets produced at compile-time (and so
5376 used by gdb) are relative to the beginning of the program,
5377 with the data segment immediately following the text segment.
5378 However, the actual runtime layout in memory may put the data
5379 somewhere else, so when we send gdb a data base-address, we
5380 use the real data base address and subtract the compile-time
5381 data base-address from it (which is just the length of the
5382 text segment). BSS immediately follows data in both
5385 *data_p
= data
- (text_end
- text
);
5394 linux_qxfer_osdata (const char *annex
,
5395 unsigned char *readbuf
, unsigned const char *writebuf
,
5396 CORE_ADDR offset
, int len
)
5398 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
5401 /* Convert a native/host siginfo object, into/from the siginfo in the
5402 layout of the inferiors' architecture. */
5405 siginfo_fixup (siginfo_t
*siginfo
, void *inf_siginfo
, int direction
)
5409 if (the_low_target
.siginfo_fixup
!= NULL
)
5410 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
5412 /* If there was no callback, or the callback didn't do anything,
5413 then just do a straight memcpy. */
5417 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
5419 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
5424 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
5425 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
5429 char inf_siginfo
[sizeof (siginfo_t
)];
5431 if (current_thread
== NULL
)
5434 pid
= lwpid_of (current_thread
);
5437 debug_printf ("%s siginfo for lwp %d.\n",
5438 readbuf
!= NULL
? "Reading" : "Writing",
5441 if (offset
>= sizeof (siginfo
))
5444 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5447 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5448 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5449 inferior with a 64-bit GDBSERVER should look the same as debugging it
5450 with a 32-bit GDBSERVER, we need to convert it. */
5451 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
5453 if (offset
+ len
> sizeof (siginfo
))
5454 len
= sizeof (siginfo
) - offset
;
5456 if (readbuf
!= NULL
)
5457 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
5460 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
5462 /* Convert back to ptrace layout before flushing it out. */
5463 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
5465 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5472 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5473 so we notice when children change state; as the handler for the
5474 sigsuspend in my_waitpid. */
5477 sigchld_handler (int signo
)
5479 int old_errno
= errno
;
5485 /* fprintf is not async-signal-safe, so call write
5487 if (write (2, "sigchld_handler\n",
5488 sizeof ("sigchld_handler\n") - 1) < 0)
5489 break; /* just ignore */
5493 if (target_is_async_p ())
5494 async_file_mark (); /* trigger a linux_wait */
5500 linux_supports_non_stop (void)
5506 linux_async (int enable
)
5508 int previous
= target_is_async_p ();
5511 debug_printf ("linux_async (%d), previous=%d\n",
5514 if (previous
!= enable
)
5517 sigemptyset (&mask
);
5518 sigaddset (&mask
, SIGCHLD
);
5520 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
5524 if (pipe (linux_event_pipe
) == -1)
5526 linux_event_pipe
[0] = -1;
5527 linux_event_pipe
[1] = -1;
5528 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
5530 warning ("creating event pipe failed.");
5534 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
5535 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
5537 /* Register the event loop handler. */
5538 add_file_handler (linux_event_pipe
[0],
5539 handle_target_event
, NULL
);
5541 /* Always trigger a linux_wait. */
5546 delete_file_handler (linux_event_pipe
[0]);
5548 close (linux_event_pipe
[0]);
5549 close (linux_event_pipe
[1]);
5550 linux_event_pipe
[0] = -1;
5551 linux_event_pipe
[1] = -1;
5554 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
5561 linux_start_non_stop (int nonstop
)
5563 /* Register or unregister from event-loop accordingly. */
5564 linux_async (nonstop
);
5566 if (target_is_async_p () != (nonstop
!= 0))
5573 linux_supports_multi_process (void)
5578 /* Check if fork events are supported. */
5581 linux_supports_fork_events (void)
5583 return linux_supports_tracefork ();
5586 /* Check if vfork events are supported. */
5589 linux_supports_vfork_events (void)
5591 return linux_supports_tracefork ();
5594 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5595 options for the specified lwp. */
5598 reset_lwp_ptrace_options_callback (struct inferior_list_entry
*entry
,
5601 struct thread_info
*thread
= (struct thread_info
*) entry
;
5602 struct lwp_info
*lwp
= get_thread_lwp (thread
);
5606 /* Stop the lwp so we can modify its ptrace options. */
5607 lwp
->must_set_ptrace_flags
= 1;
5608 linux_stop_lwp (lwp
);
5612 /* Already stopped; go ahead and set the ptrace options. */
5613 struct process_info
*proc
= find_process_pid (pid_of (thread
));
5614 int options
= linux_low_ptrace_options (proc
->attached
);
5616 linux_enable_event_reporting (lwpid_of (thread
), options
);
5617 lwp
->must_set_ptrace_flags
= 0;
5623 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5624 ptrace flags for all inferiors. This is in case the new GDB connection
5625 doesn't support the same set of events that the previous one did. */
5628 linux_handle_new_gdb_connection (void)
5632 /* Request that all the lwps reset their ptrace options. */
5633 find_inferior (&all_threads
, reset_lwp_ptrace_options_callback
, &pid
);
5637 linux_supports_disable_randomization (void)
5639 #ifdef HAVE_PERSONALITY
5647 linux_supports_agent (void)
5653 linux_supports_range_stepping (void)
5655 if (*the_low_target
.supports_range_stepping
== NULL
)
5658 return (*the_low_target
.supports_range_stepping
) ();
5661 /* Enumerate spufs IDs for process PID. */
5663 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
5669 struct dirent
*entry
;
5671 sprintf (path
, "/proc/%ld/fd", pid
);
5672 dir
= opendir (path
);
5677 while ((entry
= readdir (dir
)) != NULL
)
5683 fd
= atoi (entry
->d_name
);
5687 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
5688 if (stat (path
, &st
) != 0)
5690 if (!S_ISDIR (st
.st_mode
))
5693 if (statfs (path
, &stfs
) != 0)
5695 if (stfs
.f_type
!= SPUFS_MAGIC
)
5698 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
5700 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
5710 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5711 object type, using the /proc file system. */
5713 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
5714 unsigned const char *writebuf
,
5715 CORE_ADDR offset
, int len
)
5717 long pid
= lwpid_of (current_thread
);
5722 if (!writebuf
&& !readbuf
)
5730 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
5733 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
5734 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
5739 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5746 ret
= write (fd
, writebuf
, (size_t) len
);
5748 ret
= read (fd
, readbuf
, (size_t) len
);
5754 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5755 struct target_loadseg
5757 /* Core address to which the segment is mapped. */
5759 /* VMA recorded in the program header. */
5761 /* Size of this segment in memory. */
5765 # if defined PT_GETDSBT
5766 struct target_loadmap
5768 /* Protocol version number, must be zero. */
5770 /* Pointer to the DSBT table, its size, and the DSBT index. */
5771 unsigned *dsbt_table
;
5772 unsigned dsbt_size
, dsbt_index
;
5773 /* Number of segments in this map. */
5775 /* The actual memory map. */
5776 struct target_loadseg segs
[/*nsegs*/];
5778 # define LINUX_LOADMAP PT_GETDSBT
5779 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5780 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5782 struct target_loadmap
5784 /* Protocol version number, must be zero. */
5786 /* Number of segments in this map. */
5788 /* The actual memory map. */
5789 struct target_loadseg segs
[/*nsegs*/];
5791 # define LINUX_LOADMAP PTRACE_GETFDPIC
5792 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5793 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5797 linux_read_loadmap (const char *annex
, CORE_ADDR offset
,
5798 unsigned char *myaddr
, unsigned int len
)
5800 int pid
= lwpid_of (current_thread
);
5802 struct target_loadmap
*data
= NULL
;
5803 unsigned int actual_length
, copy_length
;
5805 if (strcmp (annex
, "exec") == 0)
5806 addr
= (int) LINUX_LOADMAP_EXEC
;
5807 else if (strcmp (annex
, "interp") == 0)
5808 addr
= (int) LINUX_LOADMAP_INTERP
;
5812 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
5818 actual_length
= sizeof (struct target_loadmap
)
5819 + sizeof (struct target_loadseg
) * data
->nsegs
;
5821 if (offset
< 0 || offset
> actual_length
)
5824 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
5825 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
5829 # define linux_read_loadmap NULL
5830 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5833 linux_process_qsupported (const char *query
)
5835 if (the_low_target
.process_qsupported
!= NULL
)
5836 the_low_target
.process_qsupported (query
);
5840 linux_supports_tracepoints (void)
5842 if (*the_low_target
.supports_tracepoints
== NULL
)
5845 return (*the_low_target
.supports_tracepoints
) ();
5849 linux_read_pc (struct regcache
*regcache
)
5851 if (the_low_target
.get_pc
== NULL
)
5854 return (*the_low_target
.get_pc
) (regcache
);
5858 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
5860 gdb_assert (the_low_target
.set_pc
!= NULL
);
5862 (*the_low_target
.set_pc
) (regcache
, pc
);
5866 linux_thread_stopped (struct thread_info
*thread
)
5868 return get_thread_lwp (thread
)->stopped
;
5871 /* This exposes stop-all-threads functionality to other modules. */
5874 linux_pause_all (int freeze
)
5876 stop_all_lwps (freeze
, NULL
);
5879 /* This exposes unstop-all-threads functionality to other gdbserver
5883 linux_unpause_all (int unfreeze
)
5885 unstop_all_lwps (unfreeze
, NULL
);
5889 linux_prepare_to_access_memory (void)
5891 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5894 linux_pause_all (1);
5899 linux_done_accessing_memory (void)
5901 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5904 linux_unpause_all (1);
5908 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
5909 CORE_ADDR collector
,
5912 CORE_ADDR
*jump_entry
,
5913 CORE_ADDR
*trampoline
,
5914 ULONGEST
*trampoline_size
,
5915 unsigned char *jjump_pad_insn
,
5916 ULONGEST
*jjump_pad_insn_size
,
5917 CORE_ADDR
*adjusted_insn_addr
,
5918 CORE_ADDR
*adjusted_insn_addr_end
,
5921 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
5922 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
5923 jump_entry
, trampoline
, trampoline_size
,
5924 jjump_pad_insn
, jjump_pad_insn_size
,
5925 adjusted_insn_addr
, adjusted_insn_addr_end
,
5929 static struct emit_ops
*
5930 linux_emit_ops (void)
5932 if (the_low_target
.emit_ops
!= NULL
)
5933 return (*the_low_target
.emit_ops
) ();
5939 linux_get_min_fast_tracepoint_insn_len (void)
5941 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
5944 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5947 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
5948 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
5950 char filename
[PATH_MAX
];
5952 const int auxv_size
= is_elf64
5953 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
5954 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
5956 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5958 fd
= open (filename
, O_RDONLY
);
5964 while (read (fd
, buf
, auxv_size
) == auxv_size
5965 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
5969 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
5971 switch (aux
->a_type
)
5974 *phdr_memaddr
= aux
->a_un
.a_val
;
5977 *num_phdr
= aux
->a_un
.a_val
;
5983 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
5985 switch (aux
->a_type
)
5988 *phdr_memaddr
= aux
->a_un
.a_val
;
5991 *num_phdr
= aux
->a_un
.a_val
;
5999 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
6001 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6002 "phdr_memaddr = %ld, phdr_num = %d",
6003 (long) *phdr_memaddr
, *num_phdr
);
6010 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6013 get_dynamic (const int pid
, const int is_elf64
)
6015 CORE_ADDR phdr_memaddr
, relocation
;
6017 unsigned char *phdr_buf
;
6018 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
6020 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
6023 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
6024 phdr_buf
= alloca (num_phdr
* phdr_size
);
6026 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
6029 /* Compute relocation: it is expected to be 0 for "regular" executables,
6030 non-zero for PIE ones. */
6032 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
6035 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6037 if (p
->p_type
== PT_PHDR
)
6038 relocation
= phdr_memaddr
- p
->p_vaddr
;
6042 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6044 if (p
->p_type
== PT_PHDR
)
6045 relocation
= phdr_memaddr
- p
->p_vaddr
;
6048 if (relocation
== -1)
6050 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6051 any real world executables, including PIE executables, have always
6052 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6053 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6054 or present DT_DEBUG anyway (fpc binaries are statically linked).
6056 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6058 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6063 for (i
= 0; i
< num_phdr
; i
++)
6067 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6069 if (p
->p_type
== PT_DYNAMIC
)
6070 return p
->p_vaddr
+ relocation
;
6074 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
6076 if (p
->p_type
== PT_DYNAMIC
)
6077 return p
->p_vaddr
+ relocation
;
6084 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6085 can be 0 if the inferior does not yet have the library list initialized.
6086 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6087 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6090 get_r_debug (const int pid
, const int is_elf64
)
6092 CORE_ADDR dynamic_memaddr
;
6093 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
6094 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
6097 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
6098 if (dynamic_memaddr
== 0)
6101 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
6105 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
6106 #ifdef DT_MIPS_RLD_MAP
6110 unsigned char buf
[sizeof (Elf64_Xword
)];
6114 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6116 if (linux_read_memory (dyn
->d_un
.d_val
,
6117 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6122 #endif /* DT_MIPS_RLD_MAP */
6124 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6125 map
= dyn
->d_un
.d_val
;
6127 if (dyn
->d_tag
== DT_NULL
)
6132 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
6133 #ifdef DT_MIPS_RLD_MAP
6137 unsigned char buf
[sizeof (Elf32_Word
)];
6141 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
6143 if (linux_read_memory (dyn
->d_un
.d_val
,
6144 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
6149 #endif /* DT_MIPS_RLD_MAP */
6151 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
6152 map
= dyn
->d_un
.d_val
;
6154 if (dyn
->d_tag
== DT_NULL
)
6158 dynamic_memaddr
+= dyn_size
;
6164 /* Read one pointer from MEMADDR in the inferior. */
6167 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
6171 /* Go through a union so this works on either big or little endian
6172 hosts, when the inferior's pointer size is smaller than the size
6173 of CORE_ADDR. It is assumed the inferior's endianness is the
6174 same of the superior's. */
6177 CORE_ADDR core_addr
;
6182 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
6185 if (ptr_size
== sizeof (CORE_ADDR
))
6186 *ptr
= addr
.core_addr
;
6187 else if (ptr_size
== sizeof (unsigned int))
6190 gdb_assert_not_reached ("unhandled pointer size");
6195 struct link_map_offsets
6197 /* Offset and size of r_debug.r_version. */
6198 int r_version_offset
;
6200 /* Offset and size of r_debug.r_map. */
6203 /* Offset to l_addr field in struct link_map. */
6206 /* Offset to l_name field in struct link_map. */
6209 /* Offset to l_ld field in struct link_map. */
6212 /* Offset to l_next field in struct link_map. */
6215 /* Offset to l_prev field in struct link_map. */
6219 /* Construct qXfer:libraries-svr4:read reply. */
6222 linux_qxfer_libraries_svr4 (const char *annex
, unsigned char *readbuf
,
6223 unsigned const char *writebuf
,
6224 CORE_ADDR offset
, int len
)
6227 unsigned document_len
;
6228 struct process_info_private
*const priv
= current_process ()->priv
;
6229 char filename
[PATH_MAX
];
6232 static const struct link_map_offsets lmo_32bit_offsets
=
6234 0, /* r_version offset. */
6235 4, /* r_debug.r_map offset. */
6236 0, /* l_addr offset in link_map. */
6237 4, /* l_name offset in link_map. */
6238 8, /* l_ld offset in link_map. */
6239 12, /* l_next offset in link_map. */
6240 16 /* l_prev offset in link_map. */
6243 static const struct link_map_offsets lmo_64bit_offsets
=
6245 0, /* r_version offset. */
6246 8, /* r_debug.r_map offset. */
6247 0, /* l_addr offset in link_map. */
6248 8, /* l_name offset in link_map. */
6249 16, /* l_ld offset in link_map. */
6250 24, /* l_next offset in link_map. */
6251 32 /* l_prev offset in link_map. */
6253 const struct link_map_offsets
*lmo
;
6254 unsigned int machine
;
6256 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
6257 int allocated
= 1024;
6259 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
6260 int header_done
= 0;
6262 if (writebuf
!= NULL
)
6264 if (readbuf
== NULL
)
6267 pid
= lwpid_of (current_thread
);
6268 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
6269 is_elf64
= elf_64_file_p (filename
, &machine
);
6270 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
6271 ptr_size
= is_elf64
? 8 : 4;
6273 while (annex
[0] != '\0')
6279 sep
= strchr (annex
, '=');
6284 if (len
== 5 && startswith (annex
, "start"))
6286 else if (len
== 4 && startswith (annex
, "prev"))
6290 annex
= strchr (sep
, ';');
6297 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
6304 if (priv
->r_debug
== 0)
6305 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
6307 /* We failed to find DT_DEBUG. Such situation will not change
6308 for this inferior - do not retry it. Report it to GDB as
6309 E01, see for the reasons at the GDB solib-svr4.c side. */
6310 if (priv
->r_debug
== (CORE_ADDR
) -1)
6313 if (priv
->r_debug
!= 0)
6315 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
6316 (unsigned char *) &r_version
,
6317 sizeof (r_version
)) != 0
6320 warning ("unexpected r_debug version %d", r_version
);
6322 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
6323 &lm_addr
, ptr_size
) != 0)
6325 warning ("unable to read r_map from 0x%lx",
6326 (long) priv
->r_debug
+ lmo
->r_map_offset
);
6331 document
= xmalloc (allocated
);
6332 strcpy (document
, "<library-list-svr4 version=\"1.0\"");
6333 p
= document
+ strlen (document
);
6336 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
6337 &l_name
, ptr_size
) == 0
6338 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
6339 &l_addr
, ptr_size
) == 0
6340 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
6341 &l_ld
, ptr_size
) == 0
6342 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
6343 &l_prev
, ptr_size
) == 0
6344 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
6345 &l_next
, ptr_size
) == 0)
6347 unsigned char libname
[PATH_MAX
];
6349 if (lm_prev
!= l_prev
)
6351 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6352 (long) lm_prev
, (long) l_prev
);
6356 /* Ignore the first entry even if it has valid name as the first entry
6357 corresponds to the main executable. The first entry should not be
6358 skipped if the dynamic loader was loaded late by a static executable
6359 (see solib-svr4.c parameter ignore_first). But in such case the main
6360 executable does not have PT_DYNAMIC present and this function already
6361 exited above due to failed get_r_debug. */
6364 sprintf (p
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
6369 /* Not checking for error because reading may stop before
6370 we've got PATH_MAX worth of characters. */
6372 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
6373 libname
[sizeof (libname
) - 1] = '\0';
6374 if (libname
[0] != '\0')
6376 /* 6x the size for xml_escape_text below. */
6377 size_t len
= 6 * strlen ((char *) libname
);
6382 /* Terminate `<library-list-svr4'. */
6387 while (allocated
< p
- document
+ len
+ 200)
6389 /* Expand to guarantee sufficient storage. */
6390 uintptr_t document_len
= p
- document
;
6392 document
= xrealloc (document
, 2 * allocated
);
6394 p
= document
+ document_len
;
6397 name
= xml_escape_text ((char *) libname
);
6398 p
+= sprintf (p
, "<library name=\"%s\" lm=\"0x%lx\" "
6399 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6400 name
, (unsigned long) lm_addr
,
6401 (unsigned long) l_addr
, (unsigned long) l_ld
);
6412 /* Empty list; terminate `<library-list-svr4'. */
6416 strcpy (p
, "</library-list-svr4>");
6418 document_len
= strlen (document
);
6419 if (offset
< document_len
)
6420 document_len
-= offset
;
6423 if (len
> document_len
)
6426 memcpy (readbuf
, document
+ offset
, len
);
6432 #ifdef HAVE_LINUX_BTRACE
6434 /* See to_enable_btrace target method. */
6436 static struct btrace_target_info
*
6437 linux_low_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
6439 struct btrace_target_info
*tinfo
;
6441 tinfo
= linux_enable_btrace (ptid
, conf
);
6443 if (tinfo
!= NULL
&& tinfo
->ptr_bits
== 0)
6445 struct thread_info
*thread
= find_thread_ptid (ptid
);
6446 struct regcache
*regcache
= get_thread_regcache (thread
, 0);
6448 tinfo
->ptr_bits
= register_size (regcache
->tdesc
, 0) * 8;
6454 /* See to_disable_btrace target method. */
6457 linux_low_disable_btrace (struct btrace_target_info
*tinfo
)
6459 enum btrace_error err
;
6461 err
= linux_disable_btrace (tinfo
);
6462 return (err
== BTRACE_ERR_NONE
? 0 : -1);
6465 /* Encode an Intel(R) Processor Trace configuration. */
6468 linux_low_encode_pt_config (struct buffer
*buffer
,
6469 const struct btrace_data_pt_config
*config
)
6471 buffer_grow_str (buffer
, "<pt-config>\n");
6473 switch (config
->cpu
.vendor
)
6476 buffer_xml_printf (buffer
, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6477 "model=\"%u\" stepping=\"%u\"/>\n",
6478 config
->cpu
.family
, config
->cpu
.model
,
6479 config
->cpu
.stepping
);
6486 buffer_grow_str (buffer
, "</pt-config>\n");
6489 /* Encode a raw buffer. */
6492 linux_low_encode_raw (struct buffer
*buffer
, const gdb_byte
*data
,
6498 /* We use hex encoding - see common/rsp-low.h. */
6499 buffer_grow_str (buffer
, "<raw>\n");
6505 elem
[0] = tohex ((*data
>> 4) & 0xf);
6506 elem
[1] = tohex (*data
++ & 0xf);
6508 buffer_grow (buffer
, elem
, 2);
6511 buffer_grow_str (buffer
, "</raw>\n");
6514 /* See to_read_btrace target method. */
6517 linux_low_read_btrace (struct btrace_target_info
*tinfo
, struct buffer
*buffer
,
6520 struct btrace_data btrace
;
6521 struct btrace_block
*block
;
6522 enum btrace_error err
;
6525 btrace_data_init (&btrace
);
6527 err
= linux_read_btrace (&btrace
, tinfo
, type
);
6528 if (err
!= BTRACE_ERR_NONE
)
6530 if (err
== BTRACE_ERR_OVERFLOW
)
6531 buffer_grow_str0 (buffer
, "E.Overflow.");
6533 buffer_grow_str0 (buffer
, "E.Generic Error.");
6538 switch (btrace
.format
)
6540 case BTRACE_FORMAT_NONE
:
6541 buffer_grow_str0 (buffer
, "E.No Trace.");
6544 case BTRACE_FORMAT_BTS
:
6545 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6546 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
6549 VEC_iterate (btrace_block_s
, btrace
.variant
.bts
.blocks
, i
, block
);
6551 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6552 paddress (block
->begin
), paddress (block
->end
));
6554 buffer_grow_str0 (buffer
, "</btrace>\n");
6557 case BTRACE_FORMAT_PT
:
6558 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6559 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
6560 buffer_grow_str (buffer
, "<pt>\n");
6562 linux_low_encode_pt_config (buffer
, &btrace
.variant
.pt
.config
);
6564 linux_low_encode_raw (buffer
, btrace
.variant
.pt
.data
,
6565 btrace
.variant
.pt
.size
);
6567 buffer_grow_str (buffer
, "</pt>\n");
6568 buffer_grow_str0 (buffer
, "</btrace>\n");
6572 buffer_grow_str0 (buffer
, "E.Unsupported Trace Format.");
6576 btrace_data_fini (&btrace
);
6580 btrace_data_fini (&btrace
);
6584 /* See to_btrace_conf target method. */
6587 linux_low_btrace_conf (const struct btrace_target_info
*tinfo
,
6588 struct buffer
*buffer
)
6590 const struct btrace_config
*conf
;
6592 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6593 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
6595 conf
= linux_btrace_conf (tinfo
);
6598 switch (conf
->format
)
6600 case BTRACE_FORMAT_NONE
:
6603 case BTRACE_FORMAT_BTS
:
6604 buffer_xml_printf (buffer
, "<bts");
6605 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
6606 buffer_xml_printf (buffer
, " />\n");
6609 case BTRACE_FORMAT_PT
:
6610 buffer_xml_printf (buffer
, "<pt");
6611 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->pt
.size
);
6612 buffer_xml_printf (buffer
, "/>\n");
6617 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
6620 #endif /* HAVE_LINUX_BTRACE */
6622 /* See nat/linux-nat.h. */
6625 current_lwp_ptid (void)
6627 return ptid_of (current_thread
);
6630 static struct target_ops linux_target_ops
= {
6631 linux_create_inferior
,
6640 linux_fetch_registers
,
6641 linux_store_registers
,
6642 linux_prepare_to_access_memory
,
6643 linux_done_accessing_memory
,
6646 linux_look_up_symbols
,
6647 linux_request_interrupt
,
6649 linux_supports_z_point_type
,
6652 linux_stopped_by_sw_breakpoint
,
6653 linux_supports_stopped_by_sw_breakpoint
,
6654 linux_stopped_by_hw_breakpoint
,
6655 linux_supports_stopped_by_hw_breakpoint
,
6656 linux_supports_conditional_breakpoints
,
6657 linux_stopped_by_watchpoint
,
6658 linux_stopped_data_address
,
6659 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6660 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6661 && defined(PT_TEXT_END_ADDR)
6666 #ifdef USE_THREAD_DB
6667 thread_db_get_tls_address
,
6672 hostio_last_error_from_errno
,
6675 linux_supports_non_stop
,
6677 linux_start_non_stop
,
6678 linux_supports_multi_process
,
6679 linux_supports_fork_events
,
6680 linux_supports_vfork_events
,
6681 linux_handle_new_gdb_connection
,
6682 #ifdef USE_THREAD_DB
6683 thread_db_handle_monitor_command
,
6687 linux_common_core_of_thread
,
6689 linux_process_qsupported
,
6690 linux_supports_tracepoints
,
6693 linux_thread_stopped
,
6697 linux_stabilize_threads
,
6698 linux_install_fast_tracepoint_jump_pad
,
6700 linux_supports_disable_randomization
,
6701 linux_get_min_fast_tracepoint_insn_len
,
6702 linux_qxfer_libraries_svr4
,
6703 linux_supports_agent
,
6704 #ifdef HAVE_LINUX_BTRACE
6705 linux_supports_btrace
,
6706 linux_low_enable_btrace
,
6707 linux_low_disable_btrace
,
6708 linux_low_read_btrace
,
6709 linux_low_btrace_conf
,
6717 linux_supports_range_stepping
,
6718 linux_proc_pid_to_exec_file
,
6719 linux_mntns_open_cloexec
,
6721 linux_mntns_readlink
,
6725 linux_init_signals ()
6727 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6728 to find what the cancel signal actually is. */
6729 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6730 signal (__SIGRTMIN
+1, SIG_IGN
);
6734 #ifdef HAVE_LINUX_REGSETS
6736 initialize_regsets_info (struct regsets_info
*info
)
6738 for (info
->num_regsets
= 0;
6739 info
->regsets
[info
->num_regsets
].size
>= 0;
6740 info
->num_regsets
++)
6746 initialize_low (void)
6748 struct sigaction sigchld_action
;
6749 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
6750 set_target_ops (&linux_target_ops
);
6751 set_breakpoint_data (the_low_target
.breakpoint
,
6752 the_low_target
.breakpoint_len
);
6753 linux_init_signals ();
6754 linux_ptrace_init_warnings ();
6756 sigchld_action
.sa_handler
= sigchld_handler
;
6757 sigemptyset (&sigchld_action
.sa_mask
);
6758 sigchld_action
.sa_flags
= SA_RESTART
;
6759 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
6761 initialize_low_arch ();
6763 linux_check_ptrace_features ();