1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
31 #include <sys/ioctl.h>
36 #include <sys/syscall.h>
40 #include <sys/types.h>
45 #include "filestuff.h"
46 #include "tracepoint.h"
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
57 #define SPUFS_MAGIC 0x23c9b64e
60 #ifdef HAVE_PERSONALITY
61 # include <sys/personality.h>
62 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
63 # define ADDR_NO_RANDOMIZE 0x0040000
72 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 /* This is the kernel's hard limit. Not to be confused with
81 /* Some targets did not define these ptrace constants from the start,
82 so gdbserver defines them locally here. In the future, these may
83 be removed after they are added to asm/ptrace.h. */
84 #if !(defined(PT_TEXT_ADDR) \
85 || defined(PT_DATA_ADDR) \
86 || defined(PT_TEXT_END_ADDR))
87 #if defined(__mcoldfire__)
88 /* These are still undefined in 3.10 kernels. */
89 #define PT_TEXT_ADDR 49*4
90 #define PT_DATA_ADDR 50*4
91 #define PT_TEXT_END_ADDR 51*4
92 /* BFIN already defines these since at least 2.6.32 kernels. */
94 #define PT_TEXT_ADDR 220
95 #define PT_TEXT_END_ADDR 224
96 #define PT_DATA_ADDR 228
97 /* These are still undefined in 3.10 kernels. */
98 #elif defined(__TMS320C6X__)
99 #define PT_TEXT_ADDR (0x10000*4)
100 #define PT_DATA_ADDR (0x10004*4)
101 #define PT_TEXT_END_ADDR (0x10008*4)
105 #ifdef HAVE_LINUX_BTRACE
106 # include "nat/linux-btrace.h"
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
113 uint32_t a_type
; /* Entry type */
116 uint32_t a_val
; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
128 uint64_t a_type
; /* Entry type */
131 uint64_t a_val
; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
139 /* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
143 struct simple_pid_list
145 /* The process ID. */
148 /* The status as reported by waitpid. */
152 struct simple_pid_list
*next
;
154 struct simple_pid_list
*stopped_pids
;
156 /* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
160 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
162 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
165 new_pid
->status
= status
;
166 new_pid
->next
= *listp
;
171 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
173 struct simple_pid_list
**p
;
175 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
176 if ((*p
)->pid
== pid
)
178 struct simple_pid_list
*next
= (*p
)->next
;
180 *statusp
= (*p
)->status
;
188 enum stopping_threads_kind
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS
,
193 /* Stopping threads. */
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
200 /* This is set while stop_all_lwps is in effect. */
201 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
203 /* FIXME make into a target method? */
204 int using_threads
= 1;
206 /* True if we're presently stabilizing threads (moving them out of
208 static int stabilizing_threads
;
210 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
211 int step
, int signal
, siginfo_t
*info
);
212 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
213 static void stop_all_lwps (int suspend
, struct lwp_info
*except
);
214 static void unstop_all_lwps (int unsuspend
, struct lwp_info
*except
);
215 static int linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
216 int *wstat
, int options
);
217 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
218 static struct lwp_info
*add_lwp (ptid_t ptid
);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info
*lwp
);
223 static CORE_ADDR
get_stop_pc (struct lwp_info
*lwp
);
224 static int kill_lwp (unsigned long lwpid
, int signo
);
226 /* True if the low target can hardware single-step. Such targets
227 don't need a BREAKPOINT_REINSERT_ADDR callback. */
230 can_hardware_single_step (void)
232 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
235 /* True if the low target supports memory breakpoints. If so, we'll
236 have a GET_PC implementation. */
239 supports_breakpoints (void)
241 return (the_low_target
.get_pc
!= NULL
);
244 /* Returns true if this target can support fast tracepoints. This
245 does not mean that the in-process agent has been loaded in the
249 supports_fast_tracepoints (void)
251 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
254 /* True if LWP is stopped in its stepping range. */
257 lwp_in_step_range (struct lwp_info
*lwp
)
259 CORE_ADDR pc
= lwp
->stop_pc
;
261 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
264 struct pending_signals
268 struct pending_signals
*prev
;
271 /* The read/write ends of the pipe registered as waitable file in the
273 static int linux_event_pipe
[2] = { -1, -1 };
275 /* True if we're currently in async mode. */
276 #define target_is_async_p() (linux_event_pipe[0] != -1)
278 static void send_sigstop (struct lwp_info
*lwp
);
279 static void wait_for_sigstop (void);
281 /* Return non-zero if HEADER is a 64-bit ELF file. */
284 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
286 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
287 && header
->e_ident
[EI_MAG1
] == ELFMAG1
288 && header
->e_ident
[EI_MAG2
] == ELFMAG2
289 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
291 *machine
= header
->e_machine
;
292 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
299 /* Return non-zero if FILE is a 64-bit ELF file,
300 zero if the file is not a 64-bit ELF file,
301 and -1 if the file is not accessible or doesn't exist. */
304 elf_64_file_p (const char *file
, unsigned int *machine
)
309 fd
= open (file
, O_RDONLY
);
313 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
320 return elf_64_header_p (&header
, machine
);
323 /* Accepts an integer PID; Returns true if the executable PID is
324 running is a 64-bit ELF file.. */
327 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
331 sprintf (file
, "/proc/%d/exe", pid
);
332 return elf_64_file_p (file
, machine
);
336 delete_lwp (struct lwp_info
*lwp
)
338 struct thread_info
*thr
= get_lwp_thread (lwp
);
341 debug_printf ("deleting %ld\n", lwpid_of (thr
));
344 free (lwp
->arch_private
);
348 /* Add a process to the common process list, and set its private
351 static struct process_info
*
352 linux_add_process (int pid
, int attached
)
354 struct process_info
*proc
;
356 proc
= add_process (pid
, attached
);
357 proc
->private = xcalloc (1, sizeof (*proc
->private));
359 /* Set the arch when the first LWP stops. */
360 proc
->private->new_inferior
= 1;
362 if (the_low_target
.new_process
!= NULL
)
363 proc
->private->arch_private
= the_low_target
.new_process ();
368 /* Handle a GNU/Linux extended wait response. If we see a clone
369 event, we need to add the new LWP to our list (and not report the
370 trap to higher layers). */
373 handle_extended_wait (struct lwp_info
*event_child
, int wstat
)
375 int event
= wstat
>> 16;
376 struct thread_info
*event_thr
= get_lwp_thread (event_child
);
377 struct lwp_info
*new_lwp
;
379 if (event
== PTRACE_EVENT_CLONE
)
382 unsigned long new_pid
;
385 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
388 /* If we haven't already seen the new PID stop, wait for it now. */
389 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
391 /* The new child has a pending SIGSTOP. We can't affect it until it
392 hits the SIGSTOP, but we're already attached. */
394 ret
= my_waitpid (new_pid
, &status
, __WALL
);
397 perror_with_name ("waiting for new child");
398 else if (ret
!= new_pid
)
399 warning ("wait returned unexpected PID %d", ret
);
400 else if (!WIFSTOPPED (status
))
401 warning ("wait returned unexpected status 0x%x", status
);
405 debug_printf ("HEW: Got clone event "
406 "from LWP %ld, new child is LWP %ld\n",
407 lwpid_of (event_thr
), new_pid
);
409 ptid
= ptid_build (pid_of (event_thr
), new_pid
, 0);
410 new_lwp
= add_lwp (ptid
);
412 /* Either we're going to immediately resume the new thread
413 or leave it stopped. linux_resume_one_lwp is a nop if it
414 thinks the thread is currently running, so set this first
415 before calling linux_resume_one_lwp. */
416 new_lwp
->stopped
= 1;
418 /* If we're suspending all threads, leave this one suspended
420 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
)
421 new_lwp
->suspended
= 1;
423 /* Normally we will get the pending SIGSTOP. But in some cases
424 we might get another signal delivered to the group first.
425 If we do get another signal, be sure not to lose it. */
426 if (WSTOPSIG (status
) == SIGSTOP
)
428 if (stopping_threads
!= NOT_STOPPING_THREADS
)
429 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
431 linux_resume_one_lwp (new_lwp
, 0, 0, NULL
);
435 new_lwp
->stop_expected
= 1;
437 if (stopping_threads
!= NOT_STOPPING_THREADS
)
439 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
440 new_lwp
->status_pending_p
= 1;
441 new_lwp
->status_pending
= status
;
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
446 linux_resume_one_lwp (new_lwp
, 0, WSTOPSIG (status
), NULL
);
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
452 linux_resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
456 /* Return the PC as read from the regcache of LWP, without any
460 get_pc (struct lwp_info
*lwp
)
462 struct thread_info
*saved_inferior
;
463 struct regcache
*regcache
;
466 if (the_low_target
.get_pc
== NULL
)
469 saved_inferior
= current_inferior
;
470 current_inferior
= get_lwp_thread (lwp
);
472 regcache
= get_thread_regcache (current_inferior
, 1);
473 pc
= (*the_low_target
.get_pc
) (regcache
);
476 debug_printf ("pc is 0x%lx\n", (long) pc
);
478 current_inferior
= saved_inferior
;
482 /* This function should only be called if LWP got a SIGTRAP.
483 The SIGTRAP could mean several things.
485 On i386, where decr_pc_after_break is non-zero:
486 If we were single-stepping this process using PTRACE_SINGLESTEP,
487 we will get only the one SIGTRAP (even if the instruction we
488 stepped over was a breakpoint). The value of $eip will be the
490 If we continue the process using PTRACE_CONT, we will get a
491 SIGTRAP when we hit a breakpoint. The value of $eip will be
492 the instruction after the breakpoint (i.e. needs to be
493 decremented). If we report the SIGTRAP to GDB, we must also
494 report the undecremented PC. If we cancel the SIGTRAP, we
495 must resume at the decremented PC.
497 (Presumably, not yet tested) On a non-decr_pc_after_break machine
498 with hardware or kernel single-step:
499 If we single-step over a breakpoint instruction, our PC will
500 point at the following instruction. If we continue and hit a
501 breakpoint instruction, our PC will point at the breakpoint
505 get_stop_pc (struct lwp_info
*lwp
)
509 if (the_low_target
.get_pc
== NULL
)
512 stop_pc
= get_pc (lwp
);
514 if (WSTOPSIG (lwp
->last_status
) == SIGTRAP
516 && !lwp
->stopped_by_watchpoint
517 && lwp
->last_status
>> 16 == 0)
518 stop_pc
-= the_low_target
.decr_pc_after_break
;
521 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc
);
526 static struct lwp_info
*
527 add_lwp (ptid_t ptid
)
529 struct lwp_info
*lwp
;
531 lwp
= (struct lwp_info
*) xmalloc (sizeof (*lwp
));
532 memset (lwp
, 0, sizeof (*lwp
));
534 if (the_low_target
.new_thread
!= NULL
)
535 lwp
->arch_private
= the_low_target
.new_thread ();
537 lwp
->thread
= add_thread (ptid
, lwp
);
542 /* Start an inferior process and returns its pid.
543 ALLARGS is a vector of program-name and args. */
546 linux_create_inferior (char *program
, char **allargs
)
548 #ifdef HAVE_PERSONALITY
549 int personality_orig
= 0, personality_set
= 0;
551 struct lwp_info
*new_lwp
;
555 #ifdef HAVE_PERSONALITY
556 if (disable_randomization
)
559 personality_orig
= personality (0xffffffff);
560 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
563 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
565 if (errno
!= 0 || (personality_set
566 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
567 warning ("Error disabling address space randomization: %s",
572 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
578 perror_with_name ("fork");
583 ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
585 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
586 signal (__SIGRTMIN
+ 1, SIG_DFL
);
591 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
592 stdout to stderr so that inferior i/o doesn't corrupt the connection.
593 Also, redirect stdin to /dev/null. */
594 if (remote_connection_is_stdio ())
597 open ("/dev/null", O_RDONLY
);
599 if (write (2, "stdin/stdout redirected\n",
600 sizeof ("stdin/stdout redirected\n") - 1) < 0)
602 /* Errors ignored. */;
606 execv (program
, allargs
);
608 execvp (program
, allargs
);
610 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
616 #ifdef HAVE_PERSONALITY
620 personality (personality_orig
);
622 warning ("Error restoring address space randomization: %s",
627 linux_add_process (pid
, 0);
629 ptid
= ptid_build (pid
, pid
, 0);
630 new_lwp
= add_lwp (ptid
);
631 new_lwp
->must_set_ptrace_flags
= 1;
637 linux_attach_fail_reason_string (ptid_t ptid
, int err
)
639 static char *reason_string
;
640 struct buffer buffer
;
642 long lwpid
= ptid_get_lwp (ptid
);
644 xfree (reason_string
);
646 buffer_init (&buffer
);
647 linux_ptrace_attach_fail_reason (lwpid
, &buffer
);
648 buffer_grow_str0 (&buffer
, "");
649 warnings
= buffer_finish (&buffer
);
650 if (warnings
[0] != '\0')
651 reason_string
= xstrprintf ("%s (%d), %s",
652 strerror (err
), err
, warnings
);
654 reason_string
= xstrprintf ("%s (%d)",
655 strerror (err
), err
);
657 return reason_string
;
660 /* Attach to an inferior process. */
663 linux_attach_lwp (ptid_t ptid
)
665 struct lwp_info
*new_lwp
;
666 int lwpid
= ptid_get_lwp (ptid
);
668 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
672 new_lwp
= add_lwp (ptid
);
674 /* We need to wait for SIGSTOP before being able to make the next
675 ptrace call on this LWP. */
676 new_lwp
->must_set_ptrace_flags
= 1;
678 if (linux_proc_pid_is_stopped (lwpid
))
681 debug_printf ("Attached to a stopped process\n");
683 /* The process is definitely stopped. It is in a job control
684 stop, unless the kernel predates the TASK_STOPPED /
685 TASK_TRACED distinction, in which case it might be in a
686 ptrace stop. Make sure it is in a ptrace stop; from there we
687 can kill it, signal it, et cetera.
689 First make sure there is a pending SIGSTOP. Since we are
690 already attached, the process can not transition from stopped
691 to running without a PTRACE_CONT; so we know this signal will
692 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
693 probably already in the queue (unless this kernel is old
694 enough to use TASK_STOPPED for ptrace stops); but since
695 SIGSTOP is not an RT signal, it can only be queued once. */
696 kill_lwp (lwpid
, SIGSTOP
);
698 /* Finally, resume the stopped process. This will deliver the
699 SIGSTOP (or a higher priority signal, just like normal
700 PTRACE_ATTACH), which we'll catch later on. */
701 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
704 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
707 There are several cases to consider here:
709 1) gdbserver has already attached to the process and is being notified
710 of a new thread that is being created.
711 In this case we should ignore that SIGSTOP and resume the
712 process. This is handled below by setting stop_expected = 1,
713 and the fact that add_thread sets last_resume_kind ==
716 2) This is the first thread (the process thread), and we're attaching
717 to it via attach_inferior.
718 In this case we want the process thread to stop.
719 This is handled by having linux_attach set last_resume_kind ==
720 resume_stop after we return.
722 If the pid we are attaching to is also the tgid, we attach to and
723 stop all the existing threads. Otherwise, we attach to pid and
724 ignore any other threads in the same group as this pid.
726 3) GDB is connecting to gdbserver and is requesting an enumeration of all
728 In this case we want the thread to stop.
729 FIXME: This case is currently not properly handled.
730 We should wait for the SIGSTOP but don't. Things work apparently
731 because enough time passes between when we ptrace (ATTACH) and when
732 gdb makes the next ptrace call on the thread.
734 On the other hand, if we are currently trying to stop all threads, we
735 should treat the new thread as if we had sent it a SIGSTOP. This works
736 because we are guaranteed that the add_lwp call above added us to the
737 end of the list, and so the new thread has not yet reached
738 wait_for_sigstop (but will). */
739 new_lwp
->stop_expected
= 1;
744 /* Attach to PID. If PID is the tgid, attach to it and all
748 linux_attach (unsigned long pid
)
750 ptid_t ptid
= ptid_build (pid
, pid
, 0);
753 /* Attach to PID. We will check for other threads
755 err
= linux_attach_lwp (ptid
);
757 error ("Cannot attach to process %ld: %s",
758 pid
, linux_attach_fail_reason_string (ptid
, err
));
760 linux_add_process (pid
, 1);
764 struct thread_info
*thread
;
766 /* Don't ignore the initial SIGSTOP if we just attached to this
767 process. It will be collected by wait shortly. */
768 thread
= find_thread_ptid (ptid_build (pid
, pid
, 0));
769 thread
->last_resume_kind
= resume_stop
;
772 if (linux_proc_get_tgid (pid
) == pid
)
777 sprintf (pathname
, "/proc/%ld/task", pid
);
779 dir
= opendir (pathname
);
783 fprintf (stderr
, "Could not open /proc/%ld/task.\n", pid
);
788 /* At this point we attached to the tgid. Scan the task for
790 int new_threads_found
;
793 while (iterations
< 2)
797 new_threads_found
= 0;
798 /* Add all the other threads. While we go through the
799 threads, new threads may be spawned. Cycle through
800 the list of threads until we have done two iterations without
801 finding new threads. */
802 while ((dp
= readdir (dir
)) != NULL
)
808 lwp
= strtoul (dp
->d_name
, NULL
, 10);
810 ptid
= ptid_build (pid
, lwp
, 0);
812 /* Is this a new thread? */
813 if (lwp
!= 0 && find_thread_ptid (ptid
) == NULL
)
818 debug_printf ("Found new lwp %ld\n", lwp
);
820 err
= linux_attach_lwp (ptid
);
822 warning ("Cannot attach to lwp %ld: %s",
824 linux_attach_fail_reason_string (ptid
, err
));
830 if (!new_threads_found
)
851 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
853 struct counter
*counter
= args
;
855 if (ptid_get_pid (entry
->id
) == counter
->pid
)
857 if (++counter
->count
> 1)
865 last_thread_of_process_p (int pid
)
867 struct counter counter
= { pid
, 0 };
869 return (find_inferior (&all_threads
,
870 second_thread_of_pid_p
, &counter
) == NULL
);
876 linux_kill_one_lwp (struct lwp_info
*lwp
)
878 struct thread_info
*thr
= get_lwp_thread (lwp
);
879 int pid
= lwpid_of (thr
);
881 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
882 there is no signal context, and ptrace(PTRACE_KILL) (or
883 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
884 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
885 alternative is to kill with SIGKILL. We only need one SIGKILL
886 per process, not one for each thread. But since we still support
887 linuxthreads, and we also support debugging programs using raw
888 clone without CLONE_THREAD, we send one for each thread. For
889 years, we used PTRACE_KILL only, so we're being a bit paranoid
890 about some old kernels where PTRACE_KILL might work better
891 (dubious if there are any such, but that's why it's paranoia), so
892 we try SIGKILL first, PTRACE_KILL second, and so we're fine
896 kill_lwp (pid
, SIGKILL
);
899 int save_errno
= errno
;
901 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
902 target_pid_to_str (ptid_of (thr
)),
903 save_errno
? strerror (save_errno
) : "OK");
907 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
910 int save_errno
= errno
;
912 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
913 target_pid_to_str (ptid_of (thr
)),
914 save_errno
? strerror (save_errno
) : "OK");
918 /* Kill LWP and wait for it to die. */
921 kill_wait_lwp (struct lwp_info
*lwp
)
923 struct thread_info
*thr
= get_lwp_thread (lwp
);
924 int pid
= ptid_get_pid (ptid_of (thr
));
925 int lwpid
= ptid_get_lwp (ptid_of (thr
));
930 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
934 linux_kill_one_lwp (lwp
);
936 /* Make sure it died. Notes:
938 - The loop is most likely unnecessary.
940 - We don't use linux_wait_for_event as that could delete lwps
941 while we're iterating over them. We're not interested in
942 any pending status at this point, only in making sure all
943 wait status on the kernel side are collected until the
946 - We don't use __WALL here as the __WALL emulation relies on
947 SIGCHLD, and killing a stopped process doesn't generate
948 one, nor an exit status.
950 res
= my_waitpid (lwpid
, &wstat
, 0);
951 if (res
== -1 && errno
== ECHILD
)
952 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
953 } while (res
> 0 && WIFSTOPPED (wstat
));
955 gdb_assert (res
> 0);
958 /* Callback for `find_inferior'. Kills an lwp of a given process,
959 except the leader. */
962 kill_one_lwp_callback (struct inferior_list_entry
*entry
, void *args
)
964 struct thread_info
*thread
= (struct thread_info
*) entry
;
965 struct lwp_info
*lwp
= get_thread_lwp (thread
);
966 int pid
= * (int *) args
;
968 if (ptid_get_pid (entry
->id
) != pid
)
971 /* We avoid killing the first thread here, because of a Linux kernel (at
972 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
973 the children get a chance to be reaped, it will remain a zombie
976 if (lwpid_of (thread
) == pid
)
979 debug_printf ("lkop: is last of process %s\n",
980 target_pid_to_str (entry
->id
));
991 struct process_info
*process
;
992 struct lwp_info
*lwp
;
994 process
= find_process_pid (pid
);
998 /* If we're killing a running inferior, make sure it is stopped
999 first, as PTRACE_KILL will not work otherwise. */
1000 stop_all_lwps (0, NULL
);
1002 find_inferior (&all_threads
, kill_one_lwp_callback
, &pid
);
1004 /* See the comment in linux_kill_one_lwp. We did not kill the first
1005 thread in the list, so do so now. */
1006 lwp
= find_lwp_pid (pid_to_ptid (pid
));
1011 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1015 kill_wait_lwp (lwp
);
1017 the_target
->mourn (process
);
1019 /* Since we presently can only stop all lwps of all processes, we
1020 need to unstop lwps of other processes. */
1021 unstop_all_lwps (0, NULL
);
1025 /* Get pending signal of THREAD, for detaching purposes. This is the
1026 signal the thread last stopped for, which we need to deliver to the
1027 thread when detaching, otherwise, it'd be suppressed/lost. */
1030 get_detach_signal (struct thread_info
*thread
)
1032 enum gdb_signal signo
= GDB_SIGNAL_0
;
1034 struct lwp_info
*lp
= get_thread_lwp (thread
);
1036 if (lp
->status_pending_p
)
1037 status
= lp
->status_pending
;
1040 /* If the thread had been suspended by gdbserver, and it stopped
1041 cleanly, then it'll have stopped with SIGSTOP. But we don't
1042 want to deliver that SIGSTOP. */
1043 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1044 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1047 /* Otherwise, we may need to deliver the signal we
1049 status
= lp
->last_status
;
1052 if (!WIFSTOPPED (status
))
1055 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1056 target_pid_to_str (ptid_of (thread
)));
1060 /* Extended wait statuses aren't real SIGTRAPs. */
1061 if (WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
1064 debug_printf ("GPS: lwp %s had stopped with extended "
1065 "status: no pending signal\n",
1066 target_pid_to_str (ptid_of (thread
)));
1070 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1072 if (program_signals_p
&& !program_signals
[signo
])
1075 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1076 target_pid_to_str (ptid_of (thread
)),
1077 gdb_signal_to_string (signo
));
1080 else if (!program_signals_p
1081 /* If we have no way to know which signals GDB does not
1082 want to have passed to the program, assume
1083 SIGTRAP/SIGINT, which is GDB's default. */
1084 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1087 debug_printf ("GPS: lwp %s had signal %s, "
1088 "but we don't know if we should pass it. "
1089 "Default to not.\n",
1090 target_pid_to_str (ptid_of (thread
)),
1091 gdb_signal_to_string (signo
));
1097 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1098 target_pid_to_str (ptid_of (thread
)),
1099 gdb_signal_to_string (signo
));
1101 return WSTOPSIG (status
);
1106 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
1108 struct thread_info
*thread
= (struct thread_info
*) entry
;
1109 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1110 int pid
= * (int *) args
;
1113 if (ptid_get_pid (entry
->id
) != pid
)
1116 /* If there is a pending SIGSTOP, get rid of it. */
1117 if (lwp
->stop_expected
)
1120 debug_printf ("Sending SIGCONT to %s\n",
1121 target_pid_to_str (ptid_of (thread
)));
1123 kill_lwp (lwpid_of (thread
), SIGCONT
);
1124 lwp
->stop_expected
= 0;
1127 /* Flush any pending changes to the process's registers. */
1128 regcache_invalidate_thread (thread
);
1130 /* Pass on any pending signal for this thread. */
1131 sig
= get_detach_signal (thread
);
1133 /* Finally, let it resume. */
1134 if (the_low_target
.prepare_to_resume
!= NULL
)
1135 the_low_target
.prepare_to_resume (lwp
);
1136 if (ptrace (PTRACE_DETACH
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1137 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1138 error (_("Can't detach %s: %s"),
1139 target_pid_to_str (ptid_of (thread
)),
1147 linux_detach (int pid
)
1149 struct process_info
*process
;
1151 process
= find_process_pid (pid
);
1152 if (process
== NULL
)
1155 /* Stop all threads before detaching. First, ptrace requires that
1156 the thread is stopped to sucessfully detach. Second, thread_db
1157 may need to uninstall thread event breakpoints from memory, which
1158 only works with a stopped process anyway. */
1159 stop_all_lwps (0, NULL
);
1161 #ifdef USE_THREAD_DB
1162 thread_db_detach (process
);
1165 /* Stabilize threads (move out of jump pads). */
1166 stabilize_threads ();
1168 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
1170 the_target
->mourn (process
);
1172 /* Since we presently can only stop all lwps of all processes, we
1173 need to unstop lwps of other processes. */
1174 unstop_all_lwps (0, NULL
);
1178 /* Remove all LWPs that belong to process PROC from the lwp list. */
1181 delete_lwp_callback (struct inferior_list_entry
*entry
, void *proc
)
1183 struct thread_info
*thread
= (struct thread_info
*) entry
;
1184 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1185 struct process_info
*process
= proc
;
1187 if (pid_of (thread
) == pid_of (process
))
1194 linux_mourn (struct process_info
*process
)
1196 struct process_info_private
*priv
;
1198 #ifdef USE_THREAD_DB
1199 thread_db_mourn (process
);
1202 find_inferior (&all_threads
, delete_lwp_callback
, process
);
1204 /* Freeing all private data. */
1205 priv
= process
->private;
1206 free (priv
->arch_private
);
1208 process
->private = NULL
;
1210 remove_process (process
);
1214 linux_join (int pid
)
1219 ret
= my_waitpid (pid
, &status
, 0);
1220 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1222 } while (ret
!= -1 || errno
!= ECHILD
);
1225 /* Return nonzero if the given thread is still alive. */
1227 linux_thread_alive (ptid_t ptid
)
1229 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1231 /* We assume we always know if a thread exits. If a whole process
1232 exited but we still haven't been able to report it to GDB, we'll
1233 hold on to the last lwp of the dead process. */
1240 /* Return 1 if this lwp has an interesting status pending. */
1242 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
1244 struct thread_info
*thread
= (struct thread_info
*) entry
;
1245 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1246 ptid_t ptid
= * (ptid_t
*) arg
;
1248 /* Check if we're only interested in events from a specific process
1250 if (!ptid_equal (minus_one_ptid
, ptid
)
1251 && ptid_get_pid (ptid
) != ptid_get_pid (thread
->entry
.id
))
1254 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1255 report any status pending the LWP may have. */
1256 if (thread
->last_resume_kind
== resume_stop
1257 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
1260 return lwp
->status_pending_p
;
1264 same_lwp (struct inferior_list_entry
*entry
, void *data
)
1266 ptid_t ptid
= *(ptid_t
*) data
;
1269 if (ptid_get_lwp (ptid
) != 0)
1270 lwp
= ptid_get_lwp (ptid
);
1272 lwp
= ptid_get_pid (ptid
);
1274 if (ptid_get_lwp (entry
->id
) == lwp
)
1281 find_lwp_pid (ptid_t ptid
)
1283 struct inferior_list_entry
*thread
1284 = find_inferior (&all_threads
, same_lwp
, &ptid
);
1289 return get_thread_lwp ((struct thread_info
*) thread
);
1292 /* Return the number of known LWPs in the tgid given by PID. */
1297 struct inferior_list_entry
*inf
, *tmp
;
1300 ALL_INFERIORS (&all_threads
, inf
, tmp
)
1302 if (ptid_get_pid (inf
->id
) == pid
)
1309 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1310 their exits until all other threads in the group have exited. */
1313 check_zombie_leaders (void)
1315 struct process_info
*proc
, *tmp
;
1317 ALL_PROCESSES (proc
, tmp
)
1319 pid_t leader_pid
= pid_of (proc
);
1320 struct lwp_info
*leader_lp
;
1322 leader_lp
= find_lwp_pid (pid_to_ptid (leader_pid
));
1325 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1326 "num_lwps=%d, zombie=%d\n",
1327 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1328 linux_proc_pid_is_zombie (leader_pid
));
1330 if (leader_lp
!= NULL
1331 /* Check if there are other threads in the group, as we may
1332 have raced with the inferior simply exiting. */
1333 && !last_thread_of_process_p (leader_pid
)
1334 && linux_proc_pid_is_zombie (leader_pid
))
1336 /* A leader zombie can mean one of two things:
1338 - It exited, and there's an exit status pending
1339 available, or only the leader exited (not the whole
1340 program). In the latter case, we can't waitpid the
1341 leader's exit status until all other threads are gone.
1343 - There are 3 or more threads in the group, and a thread
1344 other than the leader exec'd. On an exec, the Linux
1345 kernel destroys all other threads (except the execing
1346 one) in the thread group, and resets the execing thread's
1347 tid to the tgid. No exit notification is sent for the
1348 execing thread -- from the ptracer's perspective, it
1349 appears as though the execing thread just vanishes.
1350 Until we reap all other threads except the leader and the
1351 execing thread, the leader will be zombie, and the
1352 execing thread will be in `D (disc sleep)'. As soon as
1353 all other threads are reaped, the execing thread changes
1354 it's tid to the tgid, and the previous (zombie) leader
1355 vanishes, giving place to the "new" leader. We could try
1356 distinguishing the exit and exec cases, by waiting once
1357 more, and seeing if something comes out, but it doesn't
1358 sound useful. The previous leader _does_ go away, and
1359 we'll re-add the new one once we see the exec event
1360 (which is just the same as what would happen if the
1361 previous leader did exit voluntarily before some other
1366 "CZL: Thread group leader %d zombie "
1367 "(it exited, or another thread execd).\n",
1370 delete_lwp (leader_lp
);
1375 /* Callback for `find_inferior'. Returns the first LWP that is not
1376 stopped. ARG is a PTID filter. */
1379 not_stopped_callback (struct inferior_list_entry
*entry
, void *arg
)
1381 struct thread_info
*thr
= (struct thread_info
*) entry
;
1382 struct lwp_info
*lwp
;
1383 ptid_t filter
= *(ptid_t
*) arg
;
1385 if (!ptid_match (ptid_of (thr
), filter
))
1388 lwp
= get_thread_lwp (thr
);
1395 /* This function should only be called if the LWP got a SIGTRAP.
1397 Handle any tracepoint steps or hits. Return true if a tracepoint
1398 event was handled, 0 otherwise. */
1401 handle_tracepoints (struct lwp_info
*lwp
)
1403 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1404 int tpoint_related_event
= 0;
1406 /* If this tracepoint hit causes a tracing stop, we'll immediately
1407 uninsert tracepoints. To do this, we temporarily pause all
1408 threads, unpatch away, and then unpause threads. We need to make
1409 sure the unpausing doesn't resume LWP too. */
1412 /* And we need to be sure that any all-threads-stopping doesn't try
1413 to move threads out of the jump pads, as it could deadlock the
1414 inferior (LWP could be in the jump pad, maybe even holding the
1417 /* Do any necessary step collect actions. */
1418 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1420 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1422 /* See if we just hit a tracepoint and do its main collect
1424 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1428 gdb_assert (lwp
->suspended
== 0);
1429 gdb_assert (!stabilizing_threads
|| lwp
->collecting_fast_tracepoint
);
1431 if (tpoint_related_event
)
1434 debug_printf ("got a tracepoint event\n");
1441 /* Convenience wrapper. Returns true if LWP is presently collecting a
1445 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
1446 struct fast_tpoint_collect_status
*status
)
1448 CORE_ADDR thread_area
;
1449 struct thread_info
*thread
= get_lwp_thread (lwp
);
1451 if (the_low_target
.get_thread_area
== NULL
)
1454 /* Get the thread area address. This is used to recognize which
1455 thread is which when tracing with the in-process agent library.
1456 We don't read anything from the address, and treat it as opaque;
1457 it's the address itself that we assume is unique per-thread. */
1458 if ((*the_low_target
.get_thread_area
) (lwpid_of (thread
), &thread_area
) == -1)
1461 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1464 /* The reason we resume in the caller, is because we want to be able
1465 to pass lwp->status_pending as WSTAT, and we need to clear
1466 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1467 refuses to resume. */
1470 maybe_move_out_of_jump_pad (struct lwp_info
*lwp
, int *wstat
)
1472 struct thread_info
*saved_inferior
;
1474 saved_inferior
= current_inferior
;
1475 current_inferior
= get_lwp_thread (lwp
);
1478 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1479 && supports_fast_tracepoints ()
1480 && agent_loaded_p ())
1482 struct fast_tpoint_collect_status status
;
1486 debug_printf ("Checking whether LWP %ld needs to move out of the "
1488 lwpid_of (current_inferior
));
1490 r
= linux_fast_tracepoint_collecting (lwp
, &status
);
1493 || (WSTOPSIG (*wstat
) != SIGILL
1494 && WSTOPSIG (*wstat
) != SIGFPE
1495 && WSTOPSIG (*wstat
) != SIGSEGV
1496 && WSTOPSIG (*wstat
) != SIGBUS
))
1498 lwp
->collecting_fast_tracepoint
= r
;
1502 if (r
== 1 && lwp
->exit_jump_pad_bkpt
== NULL
)
1504 /* Haven't executed the original instruction yet.
1505 Set breakpoint there, and wait till it's hit,
1506 then single-step until exiting the jump pad. */
1507 lwp
->exit_jump_pad_bkpt
1508 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
1512 debug_printf ("Checking whether LWP %ld needs to move out of "
1513 "the jump pad...it does\n",
1514 lwpid_of (current_inferior
));
1515 current_inferior
= saved_inferior
;
1522 /* If we get a synchronous signal while collecting, *and*
1523 while executing the (relocated) original instruction,
1524 reset the PC to point at the tpoint address, before
1525 reporting to GDB. Otherwise, it's an IPA lib bug: just
1526 report the signal to GDB, and pray for the best. */
1528 lwp
->collecting_fast_tracepoint
= 0;
1531 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
1532 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
1535 struct regcache
*regcache
;
1537 /* The si_addr on a few signals references the address
1538 of the faulting instruction. Adjust that as
1540 if ((WSTOPSIG (*wstat
) == SIGILL
1541 || WSTOPSIG (*wstat
) == SIGFPE
1542 || WSTOPSIG (*wstat
) == SIGBUS
1543 || WSTOPSIG (*wstat
) == SIGSEGV
)
1544 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_inferior
),
1545 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
1546 /* Final check just to make sure we don't clobber
1547 the siginfo of non-kernel-sent signals. */
1548 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
1550 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
1551 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_inferior
),
1552 (PTRACE_TYPE_ARG3
) 0, &info
);
1555 regcache
= get_thread_regcache (current_inferior
, 1);
1556 (*the_low_target
.set_pc
) (regcache
, status
.tpoint_addr
);
1557 lwp
->stop_pc
= status
.tpoint_addr
;
1559 /* Cancel any fast tracepoint lock this thread was
1561 force_unlock_trace_buffer ();
1564 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
1567 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1568 "stopping all threads momentarily.\n");
1570 stop_all_lwps (1, lwp
);
1571 cancel_breakpoints ();
1573 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
1574 lwp
->exit_jump_pad_bkpt
= NULL
;
1576 unstop_all_lwps (1, lwp
);
1578 gdb_assert (lwp
->suspended
>= 0);
1584 debug_printf ("Checking whether LWP %ld needs to move out of the "
1586 lwpid_of (current_inferior
));
1588 current_inferior
= saved_inferior
;
1592 /* Enqueue one signal in the "signals to report later when out of the
1596 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1598 struct pending_signals
*p_sig
;
1599 struct thread_info
*thread
= get_lwp_thread (lwp
);
1602 debug_printf ("Deferring signal %d for LWP %ld.\n",
1603 WSTOPSIG (*wstat
), lwpid_of (thread
));
1607 struct pending_signals
*sig
;
1609 for (sig
= lwp
->pending_signals_to_report
;
1612 debug_printf (" Already queued %d\n",
1615 debug_printf (" (no more currently queued signals)\n");
1618 /* Don't enqueue non-RT signals if they are already in the deferred
1619 queue. (SIGSTOP being the easiest signal to see ending up here
1621 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
1623 struct pending_signals
*sig
;
1625 for (sig
= lwp
->pending_signals_to_report
;
1629 if (sig
->signal
== WSTOPSIG (*wstat
))
1632 debug_printf ("Not requeuing already queued non-RT signal %d"
1641 p_sig
= xmalloc (sizeof (*p_sig
));
1642 p_sig
->prev
= lwp
->pending_signals_to_report
;
1643 p_sig
->signal
= WSTOPSIG (*wstat
);
1644 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
1645 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1648 lwp
->pending_signals_to_report
= p_sig
;
1651 /* Dequeue one signal from the "signals to report later when out of
1652 the jump pad" list. */
1655 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1657 struct thread_info
*thread
= get_lwp_thread (lwp
);
1659 if (lwp
->pending_signals_to_report
!= NULL
)
1661 struct pending_signals
**p_sig
;
1663 p_sig
= &lwp
->pending_signals_to_report
;
1664 while ((*p_sig
)->prev
!= NULL
)
1665 p_sig
= &(*p_sig
)->prev
;
1667 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
1668 if ((*p_sig
)->info
.si_signo
!= 0)
1669 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1675 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1676 WSTOPSIG (*wstat
), lwpid_of (thread
));
1680 struct pending_signals
*sig
;
1682 for (sig
= lwp
->pending_signals_to_report
;
1685 debug_printf (" Still queued %d\n",
1688 debug_printf (" (no more queued signals)\n");
1697 /* Arrange for a breakpoint to be hit again later. We don't keep the
1698 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1699 will handle the current event, eventually we will resume this LWP,
1700 and this breakpoint will trap again. */
1703 cancel_breakpoint (struct lwp_info
*lwp
)
1705 struct thread_info
*saved_inferior
;
1707 /* There's nothing to do if we don't support breakpoints. */
1708 if (!supports_breakpoints ())
1711 /* breakpoint_at reads from current inferior. */
1712 saved_inferior
= current_inferior
;
1713 current_inferior
= get_lwp_thread (lwp
);
1715 if ((*the_low_target
.breakpoint_at
) (lwp
->stop_pc
))
1718 debug_printf ("CB: Push back breakpoint for %s\n",
1719 target_pid_to_str (ptid_of (current_inferior
)));
1721 /* Back up the PC if necessary. */
1722 if (the_low_target
.decr_pc_after_break
)
1724 struct regcache
*regcache
1725 = get_thread_regcache (current_inferior
, 1);
1726 (*the_low_target
.set_pc
) (regcache
, lwp
->stop_pc
);
1729 current_inferior
= saved_inferior
;
1735 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1736 paddress (lwp
->stop_pc
),
1737 target_pid_to_str (ptid_of (current_inferior
)));
1740 current_inferior
= saved_inferior
;
1744 /* Do low-level handling of the event, and check if we should go on
1745 and pass it to caller code. Return the affected lwp if we are, or
1748 static struct lwp_info
*
1749 linux_low_filter_event (ptid_t filter_ptid
, int lwpid
, int wstat
)
1751 struct lwp_info
*child
;
1752 struct thread_info
*thread
;
1754 child
= find_lwp_pid (pid_to_ptid (lwpid
));
1756 /* If we didn't find a process, one of two things presumably happened:
1757 - A process we started and then detached from has exited. Ignore it.
1758 - A process we are controlling has forked and the new child's stop
1759 was reported to us by the kernel. Save its PID. */
1760 if (child
== NULL
&& WIFSTOPPED (wstat
))
1762 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
1765 else if (child
== NULL
)
1768 thread
= get_lwp_thread (child
);
1772 child
->last_status
= wstat
;
1774 if (WIFSTOPPED (wstat
))
1776 struct process_info
*proc
;
1778 /* Architecture-specific setup after inferior is running. This
1779 needs to happen after we have attached to the inferior and it
1780 is stopped for the first time, but before we access any
1781 inferior registers. */
1782 proc
= find_process_pid (pid_of (thread
));
1783 if (proc
->private->new_inferior
)
1785 struct thread_info
*saved_inferior
;
1787 saved_inferior
= current_inferior
;
1788 current_inferior
= thread
;
1790 the_low_target
.arch_setup ();
1792 current_inferior
= saved_inferior
;
1794 proc
->private->new_inferior
= 0;
1798 /* Store the STOP_PC, with adjustment applied. This depends on the
1799 architecture being defined already (so that CHILD has a valid
1800 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1802 if (WIFSTOPPED (wstat
))
1805 && the_low_target
.get_pc
!= NULL
)
1807 struct thread_info
*saved_inferior
;
1808 struct regcache
*regcache
;
1811 saved_inferior
= current_inferior
;
1812 current_inferior
= thread
;
1813 regcache
= get_thread_regcache (current_inferior
, 1);
1814 pc
= (*the_low_target
.get_pc
) (regcache
);
1815 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc
);
1816 current_inferior
= saved_inferior
;
1819 child
->stop_pc
= get_stop_pc (child
);
1822 /* Fetch the possibly triggered data watchpoint info and store it in
1825 On some archs, like x86, that use debug registers to set
1826 watchpoints, it's possible that the way to know which watched
1827 address trapped, is to check the register that is used to select
1828 which address to watch. Problem is, between setting the
1829 watchpoint and reading back which data address trapped, the user
1830 may change the set of watchpoints, and, as a consequence, GDB
1831 changes the debug registers in the inferior. To avoid reading
1832 back a stale stopped-data-address when that happens, we cache in
1833 LP the fact that a watchpoint trapped, and the corresponding data
1834 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1835 changes the debug registers meanwhile, we have the cached data we
1838 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
)
1840 if (the_low_target
.stopped_by_watchpoint
== NULL
)
1842 child
->stopped_by_watchpoint
= 0;
1846 struct thread_info
*saved_inferior
;
1848 saved_inferior
= current_inferior
;
1849 current_inferior
= thread
;
1851 child
->stopped_by_watchpoint
1852 = the_low_target
.stopped_by_watchpoint ();
1854 if (child
->stopped_by_watchpoint
)
1856 if (the_low_target
.stopped_data_address
!= NULL
)
1857 child
->stopped_data_address
1858 = the_low_target
.stopped_data_address ();
1860 child
->stopped_data_address
= 0;
1863 current_inferior
= saved_inferior
;
1867 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
1869 linux_enable_event_reporting (lwpid
);
1870 child
->must_set_ptrace_flags
= 0;
1873 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
1874 && wstat
>> 16 != 0)
1876 handle_extended_wait (child
, wstat
);
1880 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
1881 && child
->stop_expected
)
1884 debug_printf ("Expected stop.\n");
1885 child
->stop_expected
= 0;
1887 if (thread
->last_resume_kind
== resume_stop
)
1889 /* We want to report the stop to the core. Treat the
1890 SIGSTOP as a normal event. */
1892 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
1894 /* Stopping threads. We don't want this SIGSTOP to end up
1895 pending in the FILTER_PTID handling below. */
1900 /* Filter out the event. */
1901 linux_resume_one_lwp (child
, child
->stepping
, 0, NULL
);
1906 /* Check if the thread has exited. */
1907 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
))
1908 && num_lwps (pid_of (thread
)) > 1)
1911 debug_printf ("LLW: %d exited.\n", lwpid
);
1913 /* If there is at least one more LWP, then the exit signal
1914 was not the end of the debugged application and should be
1920 if (!ptid_match (ptid_of (thread
), filter_ptid
))
1923 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1926 if (WIFSTOPPED (wstat
))
1928 child
->status_pending_p
= 1;
1929 child
->status_pending
= wstat
;
1931 if (WSTOPSIG (wstat
) != SIGSTOP
)
1933 /* Cancel breakpoint hits. The breakpoint may be
1934 removed before we fetch events from this process to
1935 report to the core. It is best not to assume the
1936 moribund breakpoints heuristic always handles these
1937 cases --- it could be too many events go through to
1938 the core before this one is handled. All-stop always
1939 cancels breakpoint hits in all threads. */
1941 && WSTOPSIG (wstat
) == SIGTRAP
1942 && cancel_breakpoint (child
))
1944 /* Throw away the SIGTRAP. */
1945 child
->status_pending_p
= 0;
1948 debug_printf ("LLW: LWP %d hit a breakpoint while"
1949 " waiting for another process;"
1950 " cancelled it\n", lwpid
);
1954 else if (WIFEXITED (wstat
) || WIFSIGNALED (wstat
))
1957 debug_printf ("LLWE: process %d exited while fetching "
1958 "event from another LWP\n", lwpid
);
1960 /* This was the last lwp in the process. Since events are
1961 serialized to GDB core, and we can't report this one
1962 right now, but GDB core and the other target layers will
1963 want to be notified about the exit code/signal, leave the
1964 status pending for the next time we're able to report
1966 mark_lwp_dead (child
, wstat
);
1975 /* When the event-loop is doing a step-over, this points at the thread
1977 ptid_t step_over_bkpt
;
1979 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1980 match FILTER_PTID (leaving others pending). The PTIDs can be:
1981 minus_one_ptid, to specify any child; a pid PTID, specifying all
1982 lwps of a thread group; or a PTID representing a single lwp. Store
1983 the stop status through the status pointer WSTAT. OPTIONS is
1984 passed to the waitpid call. Return 0 if no event was found and
1985 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1986 was found. Return the PID of the stopped child otherwise. */
1989 linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
1990 int *wstatp
, int options
)
1992 struct thread_info
*event_thread
;
1993 struct lwp_info
*event_child
, *requested_child
;
1994 sigset_t block_mask
, prev_mask
;
1997 /* N.B. event_thread points to the thread_info struct that contains
1998 event_child. Keep them in sync. */
1999 event_thread
= NULL
;
2001 requested_child
= NULL
;
2003 /* Check for a lwp with a pending status. */
2005 if (ptid_equal (filter_ptid
, minus_one_ptid
) || ptid_is_pid (filter_ptid
))
2007 event_thread
= (struct thread_info
*)
2008 find_inferior (&all_threads
, status_pending_p_callback
, &filter_ptid
);
2009 if (event_thread
!= NULL
)
2010 event_child
= get_thread_lwp (event_thread
);
2011 if (debug_threads
&& event_thread
)
2012 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2014 else if (!ptid_equal (filter_ptid
, null_ptid
))
2016 requested_child
= find_lwp_pid (filter_ptid
);
2018 if (stopping_threads
== NOT_STOPPING_THREADS
2019 && requested_child
->status_pending_p
2020 && requested_child
->collecting_fast_tracepoint
)
2022 enqueue_one_deferred_signal (requested_child
,
2023 &requested_child
->status_pending
);
2024 requested_child
->status_pending_p
= 0;
2025 requested_child
->status_pending
= 0;
2026 linux_resume_one_lwp (requested_child
, 0, 0, NULL
);
2029 if (requested_child
->suspended
2030 && requested_child
->status_pending_p
)
2031 fatal ("requesting an event out of a suspended child?");
2033 if (requested_child
->status_pending_p
)
2035 event_child
= requested_child
;
2036 event_thread
= get_lwp_thread (event_child
);
2040 if (event_child
!= NULL
)
2043 debug_printf ("Got an event from pending child %ld (%04x)\n",
2044 lwpid_of (event_thread
), event_child
->status_pending
);
2045 *wstatp
= event_child
->status_pending
;
2046 event_child
->status_pending_p
= 0;
2047 event_child
->status_pending
= 0;
2048 current_inferior
= event_thread
;
2049 return lwpid_of (event_thread
);
2052 /* But if we don't find a pending event, we'll have to wait.
2054 We only enter this loop if no process has a pending wait status.
2055 Thus any action taken in response to a wait status inside this
2056 loop is responding as soon as we detect the status, not after any
2059 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2060 all signals while here. */
2061 sigfillset (&block_mask
);
2062 sigprocmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2064 while (event_child
== NULL
)
2068 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2071 - If the thread group leader exits while other threads in the
2072 thread group still exist, waitpid(TGID, ...) hangs. That
2073 waitpid won't return an exit status until the other threads
2074 in the group are reaped.
2076 - When a non-leader thread execs, that thread just vanishes
2077 without reporting an exit (so we'd hang if we waited for it
2078 explicitly in that case). The exec event is reported to
2079 the TGID pid (although we don't currently enable exec
2082 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2085 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2086 ret
, errno
? strerror (errno
) : "ERRNO-OK");
2092 debug_printf ("LLW: waitpid %ld received %s\n",
2093 (long) ret
, status_to_str (*wstatp
));
2096 event_child
= linux_low_filter_event (filter_ptid
,
2098 if (event_child
!= NULL
)
2100 /* We got an event to report to the core. */
2101 event_thread
= get_lwp_thread (event_child
);
2105 /* Retry until nothing comes out of waitpid. A single
2106 SIGCHLD can indicate more than one child stopped. */
2110 /* Check for zombie thread group leaders. Those can't be reaped
2111 until all other threads in the thread group are. */
2112 check_zombie_leaders ();
2114 /* If there are no resumed children left in the set of LWPs we
2115 want to wait for, bail. We can't just block in
2116 waitpid/sigsuspend, because lwps might have been left stopped
2117 in trace-stop state, and we'd be stuck forever waiting for
2118 their status to change (which would only happen if we resumed
2119 them). Even if WNOHANG is set, this return code is preferred
2120 over 0 (below), as it is more detailed. */
2121 if ((find_inferior (&all_threads
,
2122 not_stopped_callback
,
2123 &wait_ptid
) == NULL
))
2126 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2127 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2131 /* No interesting event to report to the caller. */
2132 if ((options
& WNOHANG
))
2135 debug_printf ("WNOHANG set, no event found\n");
2137 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2141 /* Block until we get an event reported with SIGCHLD. */
2143 debug_printf ("sigsuspend'ing\n");
2145 sigsuspend (&prev_mask
);
2146 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2150 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2152 current_inferior
= event_thread
;
2154 /* Check for thread exit. */
2155 if (! WIFSTOPPED (*wstatp
))
2157 gdb_assert (last_thread_of_process_p (pid_of (event_thread
)));
2160 debug_printf ("LWP %d is the last lwp of process. "
2161 "Process %ld exiting.\n",
2162 pid_of (event_thread
), lwpid_of (event_thread
));
2163 return lwpid_of (event_thread
);
2166 return lwpid_of (event_thread
);
2169 /* Wait for an event from child(ren) PTID. PTIDs can be:
2170 minus_one_ptid, to specify any child; a pid PTID, specifying all
2171 lwps of a thread group; or a PTID representing a single lwp. Store
2172 the stop status through the status pointer WSTAT. OPTIONS is
2173 passed to the waitpid call. Return 0 if no event was found and
2174 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2175 was found. Return the PID of the stopped child otherwise. */
2178 linux_wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2180 return linux_wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2183 /* Count the LWP's that have had events. */
2186 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
2188 struct thread_info
*thread
= (struct thread_info
*) entry
;
2189 struct lwp_info
*lp
= get_thread_lwp (thread
);
2192 gdb_assert (count
!= NULL
);
2194 /* Count only resumed LWPs that have a SIGTRAP event pending that
2195 should be reported to GDB. */
2196 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2197 && thread
->last_resume_kind
!= resume_stop
2198 && lp
->status_pending_p
2199 && WIFSTOPPED (lp
->status_pending
)
2200 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
2201 && !breakpoint_inserted_here (lp
->stop_pc
))
2207 /* Select the LWP (if any) that is currently being single-stepped. */
2210 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2212 struct thread_info
*thread
= (struct thread_info
*) entry
;
2213 struct lwp_info
*lp
= get_thread_lwp (thread
);
2215 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2216 && thread
->last_resume_kind
== resume_step
2217 && lp
->status_pending_p
)
2223 /* Select the Nth LWP that has had a SIGTRAP event that should be
2227 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2229 struct thread_info
*thread
= (struct thread_info
*) entry
;
2230 struct lwp_info
*lp
= get_thread_lwp (thread
);
2231 int *selector
= data
;
2233 gdb_assert (selector
!= NULL
);
2235 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2236 if (thread
->last_resume_kind
!= resume_stop
2237 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2238 && lp
->status_pending_p
2239 && WIFSTOPPED (lp
->status_pending
)
2240 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
2241 && !breakpoint_inserted_here (lp
->stop_pc
))
2242 if ((*selector
)-- == 0)
2249 cancel_breakpoints_callback (struct inferior_list_entry
*entry
, void *data
)
2251 struct thread_info
*thread
= (struct thread_info
*) entry
;
2252 struct lwp_info
*lp
= get_thread_lwp (thread
);
2253 struct lwp_info
*event_lp
= data
;
2255 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2259 /* If a LWP other than the LWP that we're reporting an event for has
2260 hit a GDB breakpoint (as opposed to some random trap signal),
2261 then just arrange for it to hit it again later. We don't keep
2262 the SIGTRAP status and don't forward the SIGTRAP signal to the
2263 LWP. We will handle the current event, eventually we will resume
2264 all LWPs, and this one will get its breakpoint trap again.
2266 If we do not do this, then we run the risk that the user will
2267 delete or disable the breakpoint, but the LWP will have already
2270 if (thread
->last_resume_kind
!= resume_stop
2271 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2272 && lp
->status_pending_p
2273 && WIFSTOPPED (lp
->status_pending
)
2274 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
2276 && !lp
->stopped_by_watchpoint
2277 && cancel_breakpoint (lp
))
2278 /* Throw away the SIGTRAP. */
2279 lp
->status_pending_p
= 0;
2285 linux_cancel_breakpoints (void)
2287 find_inferior (&all_threads
, cancel_breakpoints_callback
, NULL
);
2290 /* Select one LWP out of those that have events pending. */
2293 select_event_lwp (struct lwp_info
**orig_lp
)
2296 int random_selector
;
2297 struct thread_info
*event_thread
;
2299 /* Give preference to any LWP that is being single-stepped. */
2301 = (struct thread_info
*) find_inferior (&all_threads
,
2302 select_singlestep_lwp_callback
,
2304 if (event_thread
!= NULL
)
2307 debug_printf ("SEL: Select single-step %s\n",
2308 target_pid_to_str (ptid_of (event_thread
)));
2312 /* No single-stepping LWP. Select one at random, out of those
2313 which have had SIGTRAP events. */
2315 /* First see how many SIGTRAP events we have. */
2316 find_inferior (&all_threads
, count_events_callback
, &num_events
);
2318 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2319 random_selector
= (int)
2320 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2322 if (debug_threads
&& num_events
> 1)
2323 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2324 num_events
, random_selector
);
2327 = (struct thread_info
*) find_inferior (&all_threads
,
2328 select_event_lwp_callback
,
2332 if (event_thread
!= NULL
)
2334 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2336 /* Switch the event LWP. */
2337 *orig_lp
= event_lp
;
2341 /* Decrement the suspend count of an LWP. */
2344 unsuspend_one_lwp (struct inferior_list_entry
*entry
, void *except
)
2346 struct thread_info
*thread
= (struct thread_info
*) entry
;
2347 struct lwp_info
*lwp
= get_thread_lwp (thread
);
2349 /* Ignore EXCEPT. */
2355 gdb_assert (lwp
->suspended
>= 0);
2359 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2363 unsuspend_all_lwps (struct lwp_info
*except
)
2365 find_inferior (&all_threads
, unsuspend_one_lwp
, except
);
2368 static void move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
);
2369 static int stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
,
2371 static int lwp_running (struct inferior_list_entry
*entry
, void *data
);
2372 static ptid_t
linux_wait_1 (ptid_t ptid
,
2373 struct target_waitstatus
*ourstatus
,
2374 int target_options
);
2376 /* Stabilize threads (move out of jump pads).
2378 If a thread is midway collecting a fast tracepoint, we need to
2379 finish the collection and move it out of the jump pad before
2380 reporting the signal.
2382 This avoids recursion while collecting (when a signal arrives
2383 midway, and the signal handler itself collects), which would trash
2384 the trace buffer. In case the user set a breakpoint in a signal
2385 handler, this avoids the backtrace showing the jump pad, etc..
2386 Most importantly, there are certain things we can't do safely if
2387 threads are stopped in a jump pad (or in its callee's). For
2390 - starting a new trace run. A thread still collecting the
2391 previous run, could trash the trace buffer when resumed. The trace
2392 buffer control structures would have been reset but the thread had
2393 no way to tell. The thread could even midway memcpy'ing to the
2394 buffer, which would mean that when resumed, it would clobber the
2395 trace buffer that had been set for a new run.
2397 - we can't rewrite/reuse the jump pads for new tracepoints
2398 safely. Say you do tstart while a thread is stopped midway while
2399 collecting. When the thread is later resumed, it finishes the
2400 collection, and returns to the jump pad, to execute the original
2401 instruction that was under the tracepoint jump at the time the
2402 older run had been started. If the jump pad had been rewritten
2403 since for something else in the new run, the thread would now
2404 execute the wrong / random instructions. */
2407 linux_stabilize_threads (void)
2409 struct thread_info
*save_inferior
;
2410 struct thread_info
*thread_stuck
;
2413 = (struct thread_info
*) find_inferior (&all_threads
,
2414 stuck_in_jump_pad_callback
,
2416 if (thread_stuck
!= NULL
)
2419 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2420 lwpid_of (thread_stuck
));
2424 save_inferior
= current_inferior
;
2426 stabilizing_threads
= 1;
2429 for_each_inferior (&all_threads
, move_out_of_jump_pad_callback
);
2431 /* Loop until all are stopped out of the jump pads. */
2432 while (find_inferior (&all_threads
, lwp_running
, NULL
) != NULL
)
2434 struct target_waitstatus ourstatus
;
2435 struct lwp_info
*lwp
;
2438 /* Note that we go through the full wait even loop. While
2439 moving threads out of jump pad, we need to be able to step
2440 over internal breakpoints and such. */
2441 linux_wait_1 (minus_one_ptid
, &ourstatus
, 0);
2443 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2445 lwp
= get_thread_lwp (current_inferior
);
2450 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2451 || current_inferior
->last_resume_kind
== resume_stop
)
2453 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2454 enqueue_one_deferred_signal (lwp
, &wstat
);
2459 find_inferior (&all_threads
, unsuspend_one_lwp
, NULL
);
2461 stabilizing_threads
= 0;
2463 current_inferior
= save_inferior
;
2468 = (struct thread_info
*) find_inferior (&all_threads
,
2469 stuck_in_jump_pad_callback
,
2471 if (thread_stuck
!= NULL
)
2472 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2473 lwpid_of (thread_stuck
));
2477 /* Wait for process, returns status. */
2480 linux_wait_1 (ptid_t ptid
,
2481 struct target_waitstatus
*ourstatus
, int target_options
)
2484 struct lwp_info
*event_child
;
2487 int step_over_finished
;
2488 int bp_explains_trap
;
2489 int maybe_internal_trap
;
2497 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid
));
2500 /* Translate generic target options into linux options. */
2502 if (target_options
& TARGET_WNOHANG
)
2506 bp_explains_trap
= 0;
2509 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2511 /* If we were only supposed to resume one thread, only wait for
2512 that thread - if it's still alive. If it died, however - which
2513 can happen if we're coming from the thread death case below -
2514 then we need to make sure we restart the other threads. We could
2515 pick a thread at random or restart all; restarting all is less
2518 && !ptid_equal (cont_thread
, null_ptid
)
2519 && !ptid_equal (cont_thread
, minus_one_ptid
))
2521 struct thread_info
*thread
;
2523 thread
= (struct thread_info
*) find_inferior_id (&all_threads
,
2526 /* No stepping, no signal - unless one is pending already, of course. */
2529 struct thread_resume resume_info
;
2530 resume_info
.thread
= minus_one_ptid
;
2531 resume_info
.kind
= resume_continue
;
2532 resume_info
.sig
= 0;
2533 linux_resume (&resume_info
, 1);
2539 if (ptid_equal (step_over_bkpt
, null_ptid
))
2540 pid
= linux_wait_for_event (ptid
, &w
, options
);
2544 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2545 target_pid_to_str (step_over_bkpt
));
2546 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
2551 gdb_assert (target_options
& TARGET_WNOHANG
);
2555 debug_printf ("linux_wait_1 ret = null_ptid, "
2556 "TARGET_WAITKIND_IGNORE\n");
2560 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2567 debug_printf ("linux_wait_1 ret = null_ptid, "
2568 "TARGET_WAITKIND_NO_RESUMED\n");
2572 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
2576 event_child
= get_thread_lwp (current_inferior
);
2578 /* linux_wait_for_event only returns an exit status for the last
2579 child of a process. Report it. */
2580 if (WIFEXITED (w
) || WIFSIGNALED (w
))
2584 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
2585 ourstatus
->value
.integer
= WEXITSTATUS (w
);
2589 debug_printf ("linux_wait_1 ret = %s, exited with "
2591 target_pid_to_str (ptid_of (current_inferior
)),
2598 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
2599 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
2603 debug_printf ("linux_wait_1 ret = %s, terminated with "
2605 target_pid_to_str (ptid_of (current_inferior
)),
2611 return ptid_of (current_inferior
);
2614 /* If this event was not handled before, and is not a SIGTRAP, we
2615 report it. SIGILL and SIGSEGV are also treated as traps in case
2616 a breakpoint is inserted at the current PC. If this target does
2617 not support internal breakpoints at all, we also report the
2618 SIGTRAP without further processing; it's of no concern to us. */
2620 = (supports_breakpoints ()
2621 && (WSTOPSIG (w
) == SIGTRAP
2622 || ((WSTOPSIG (w
) == SIGILL
2623 || WSTOPSIG (w
) == SIGSEGV
)
2624 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
2626 if (maybe_internal_trap
)
2628 /* Handle anything that requires bookkeeping before deciding to
2629 report the event or continue waiting. */
2631 /* First check if we can explain the SIGTRAP with an internal
2632 breakpoint, or if we should possibly report the event to GDB.
2633 Do this before anything that may remove or insert a
2635 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
2637 /* We have a SIGTRAP, possibly a step-over dance has just
2638 finished. If so, tweak the state machine accordingly,
2639 reinsert breakpoints and delete any reinsert (software
2640 single-step) breakpoints. */
2641 step_over_finished
= finish_step_over (event_child
);
2643 /* Now invoke the callbacks of any internal breakpoints there. */
2644 check_breakpoints (event_child
->stop_pc
);
2646 /* Handle tracepoint data collecting. This may overflow the
2647 trace buffer, and cause a tracing stop, removing
2649 trace_event
= handle_tracepoints (event_child
);
2651 if (bp_explains_trap
)
2653 /* If we stepped or ran into an internal breakpoint, we've
2654 already handled it. So next time we resume (from this
2655 PC), we should step over it. */
2657 debug_printf ("Hit a gdbserver breakpoint.\n");
2659 if (breakpoint_here (event_child
->stop_pc
))
2660 event_child
->need_step_over
= 1;
2665 /* We have some other signal, possibly a step-over dance was in
2666 progress, and it should be cancelled too. */
2667 step_over_finished
= finish_step_over (event_child
);
2670 /* We have all the data we need. Either report the event to GDB, or
2671 resume threads and keep waiting for more. */
2673 /* If we're collecting a fast tracepoint, finish the collection and
2674 move out of the jump pad before delivering a signal. See
2675 linux_stabilize_threads. */
2678 && WSTOPSIG (w
) != SIGTRAP
2679 && supports_fast_tracepoints ()
2680 && agent_loaded_p ())
2683 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2684 "to defer or adjust it.\n",
2685 WSTOPSIG (w
), lwpid_of (current_inferior
));
2687 /* Allow debugging the jump pad itself. */
2688 if (current_inferior
->last_resume_kind
!= resume_step
2689 && maybe_move_out_of_jump_pad (event_child
, &w
))
2691 enqueue_one_deferred_signal (event_child
, &w
);
2694 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2695 WSTOPSIG (w
), lwpid_of (current_inferior
));
2697 linux_resume_one_lwp (event_child
, 0, 0, NULL
);
2702 if (event_child
->collecting_fast_tracepoint
)
2705 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2706 "Check if we're already there.\n",
2707 lwpid_of (current_inferior
),
2708 event_child
->collecting_fast_tracepoint
);
2712 event_child
->collecting_fast_tracepoint
2713 = linux_fast_tracepoint_collecting (event_child
, NULL
);
2715 if (event_child
->collecting_fast_tracepoint
!= 1)
2717 /* No longer need this breakpoint. */
2718 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
2721 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2722 "stopping all threads momentarily.\n");
2724 /* Other running threads could hit this breakpoint.
2725 We don't handle moribund locations like GDB does,
2726 instead we always pause all threads when removing
2727 breakpoints, so that any step-over or
2728 decr_pc_after_break adjustment is always taken
2729 care of while the breakpoint is still
2731 stop_all_lwps (1, event_child
);
2732 cancel_breakpoints ();
2734 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
2735 event_child
->exit_jump_pad_bkpt
= NULL
;
2737 unstop_all_lwps (1, event_child
);
2739 gdb_assert (event_child
->suspended
>= 0);
2743 if (event_child
->collecting_fast_tracepoint
== 0)
2746 debug_printf ("fast tracepoint finished "
2747 "collecting successfully.\n");
2749 /* We may have a deferred signal to report. */
2750 if (dequeue_one_deferred_signal (event_child
, &w
))
2753 debug_printf ("dequeued one signal.\n");
2758 debug_printf ("no deferred signals.\n");
2760 if (stabilizing_threads
)
2762 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
2763 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
2767 debug_printf ("linux_wait_1 ret = %s, stopped "
2768 "while stabilizing threads\n",
2769 target_pid_to_str (ptid_of (current_inferior
)));
2773 return ptid_of (current_inferior
);
2779 /* Check whether GDB would be interested in this event. */
2781 /* If GDB is not interested in this signal, don't stop other
2782 threads, and don't report it to GDB. Just resume the inferior
2783 right away. We do this for threading-related signals as well as
2784 any that GDB specifically requested we ignore. But never ignore
2785 SIGSTOP if we sent it ourselves, and do not ignore signals when
2786 stepping - they may require special handling to skip the signal
2788 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2791 && current_inferior
->last_resume_kind
!= resume_step
2793 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2794 (current_process ()->private->thread_db
!= NULL
2795 && (WSTOPSIG (w
) == __SIGRTMIN
2796 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
2799 (pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
2800 && !(WSTOPSIG (w
) == SIGSTOP
2801 && current_inferior
->last_resume_kind
== resume_stop
))))
2803 siginfo_t info
, *info_p
;
2806 debug_printf ("Ignored signal %d for LWP %ld.\n",
2807 WSTOPSIG (w
), lwpid_of (current_inferior
));
2809 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_inferior
),
2810 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
2814 linux_resume_one_lwp (event_child
, event_child
->stepping
,
2815 WSTOPSIG (w
), info_p
);
2819 /* Note that all addresses are always "out of the step range" when
2820 there's no range to begin with. */
2821 in_step_range
= lwp_in_step_range (event_child
);
2823 /* If GDB wanted this thread to single step, and the thread is out
2824 of the step range, we always want to report the SIGTRAP, and let
2825 GDB handle it. Watchpoints should always be reported. So should
2826 signals we can't explain. A SIGTRAP we can't explain could be a
2827 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2828 do, we're be able to handle GDB breakpoints on top of internal
2829 breakpoints, by handling the internal breakpoint and still
2830 reporting the event to GDB. If we don't, we're out of luck, GDB
2831 won't see the breakpoint hit. */
2832 report_to_gdb
= (!maybe_internal_trap
2833 || (current_inferior
->last_resume_kind
== resume_step
2835 || event_child
->stopped_by_watchpoint
2836 || (!step_over_finished
&& !in_step_range
2837 && !bp_explains_trap
&& !trace_event
)
2838 || (gdb_breakpoint_here (event_child
->stop_pc
)
2839 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
2840 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
)));
2842 run_breakpoint_commands (event_child
->stop_pc
);
2844 /* We found no reason GDB would want us to stop. We either hit one
2845 of our own breakpoints, or finished an internal step GDB
2846 shouldn't know about. */
2851 if (bp_explains_trap
)
2852 debug_printf ("Hit a gdbserver breakpoint.\n");
2853 if (step_over_finished
)
2854 debug_printf ("Step-over finished.\n");
2856 debug_printf ("Tracepoint event.\n");
2857 if (lwp_in_step_range (event_child
))
2858 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2859 paddress (event_child
->stop_pc
),
2860 paddress (event_child
->step_range_start
),
2861 paddress (event_child
->step_range_end
));
2864 /* We're not reporting this breakpoint to GDB, so apply the
2865 decr_pc_after_break adjustment to the inferior's regcache
2868 if (the_low_target
.set_pc
!= NULL
)
2870 struct regcache
*regcache
2871 = get_thread_regcache (current_inferior
, 1);
2872 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
2875 /* We may have finished stepping over a breakpoint. If so,
2876 we've stopped and suspended all LWPs momentarily except the
2877 stepping one. This is where we resume them all again. We're
2878 going to keep waiting, so use proceed, which handles stepping
2879 over the next breakpoint. */
2881 debug_printf ("proceeding all threads.\n");
2883 if (step_over_finished
)
2884 unsuspend_all_lwps (event_child
);
2886 proceed_all_lwps ();
2892 if (current_inferior
->last_resume_kind
== resume_step
)
2894 if (event_child
->step_range_start
== event_child
->step_range_end
)
2895 debug_printf ("GDB wanted to single-step, reporting event.\n");
2896 else if (!lwp_in_step_range (event_child
))
2897 debug_printf ("Out of step range, reporting event.\n");
2899 if (event_child
->stopped_by_watchpoint
)
2900 debug_printf ("Stopped by watchpoint.\n");
2901 if (gdb_breakpoint_here (event_child
->stop_pc
))
2902 debug_printf ("Stopped by GDB breakpoint.\n");
2904 debug_printf ("Hit a non-gdbserver trap event.\n");
2907 /* Alright, we're going to report a stop. */
2909 if (!non_stop
&& !stabilizing_threads
)
2911 /* In all-stop, stop all threads. */
2912 stop_all_lwps (0, NULL
);
2914 /* If we're not waiting for a specific LWP, choose an event LWP
2915 from among those that have had events. Giving equal priority
2916 to all LWPs that have had events helps prevent
2918 if (ptid_equal (ptid
, minus_one_ptid
))
2920 event_child
->status_pending_p
= 1;
2921 event_child
->status_pending
= w
;
2923 select_event_lwp (&event_child
);
2925 /* current_inferior and event_child must stay in sync. */
2926 current_inferior
= get_lwp_thread (event_child
);
2928 event_child
->status_pending_p
= 0;
2929 w
= event_child
->status_pending
;
2932 /* Now that we've selected our final event LWP, cancel any
2933 breakpoints in other LWPs that have hit a GDB breakpoint.
2934 See the comment in cancel_breakpoints_callback to find out
2936 find_inferior (&all_threads
, cancel_breakpoints_callback
, event_child
);
2938 /* If we were going a step-over, all other threads but the stepping one
2939 had been paused in start_step_over, with their suspend counts
2940 incremented. We don't want to do a full unstop/unpause, because we're
2941 in all-stop mode (so we want threads stopped), but we still need to
2942 unsuspend the other threads, to decrement their `suspended' count
2944 if (step_over_finished
)
2945 unsuspend_all_lwps (event_child
);
2947 /* Stabilize threads (move out of jump pads). */
2948 stabilize_threads ();
2952 /* If we just finished a step-over, then all threads had been
2953 momentarily paused. In all-stop, that's fine, we want
2954 threads stopped by now anyway. In non-stop, we need to
2955 re-resume threads that GDB wanted to be running. */
2956 if (step_over_finished
)
2957 unstop_all_lwps (1, event_child
);
2960 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
2962 if (current_inferior
->last_resume_kind
== resume_stop
2963 && WSTOPSIG (w
) == SIGSTOP
)
2965 /* A thread that has been requested to stop by GDB with vCont;t,
2966 and it stopped cleanly, so report as SIG0. The use of
2967 SIGSTOP is an implementation detail. */
2968 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
2970 else if (current_inferior
->last_resume_kind
== resume_stop
2971 && WSTOPSIG (w
) != SIGSTOP
)
2973 /* A thread that has been requested to stop by GDB with vCont;t,
2974 but, it stopped for other reasons. */
2975 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
2979 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
2982 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
2986 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2987 target_pid_to_str (ptid_of (current_inferior
)),
2988 ourstatus
->kind
, ourstatus
->value
.sig
);
2992 return ptid_of (current_inferior
);
2995 /* Get rid of any pending event in the pipe. */
2997 async_file_flush (void)
3003 ret
= read (linux_event_pipe
[0], &buf
, 1);
3004 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3007 /* Put something in the pipe, so the event loop wakes up. */
3009 async_file_mark (void)
3013 async_file_flush ();
3016 ret
= write (linux_event_pipe
[1], "+", 1);
3017 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3019 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3020 be awakened anyway. */
3024 linux_wait (ptid_t ptid
,
3025 struct target_waitstatus
*ourstatus
, int target_options
)
3029 /* Flush the async file first. */
3030 if (target_is_async_p ())
3031 async_file_flush ();
3033 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
3035 /* If at least one stop was reported, there may be more. A single
3036 SIGCHLD can signal more than one child stop. */
3037 if (target_is_async_p ()
3038 && (target_options
& TARGET_WNOHANG
) != 0
3039 && !ptid_equal (event_ptid
, null_ptid
))
3045 /* Send a signal to an LWP. */
3048 kill_lwp (unsigned long lwpid
, int signo
)
3050 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3051 fails, then we are not using nptl threads and we should be using kill. */
3055 static int tkill_failed
;
3062 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3063 if (errno
!= ENOSYS
)
3070 return kill (lwpid
, signo
);
3074 linux_stop_lwp (struct lwp_info
*lwp
)
3080 send_sigstop (struct lwp_info
*lwp
)
3084 pid
= lwpid_of (get_lwp_thread (lwp
));
3086 /* If we already have a pending stop signal for this process, don't
3088 if (lwp
->stop_expected
)
3091 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3097 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3099 lwp
->stop_expected
= 1;
3100 kill_lwp (pid
, SIGSTOP
);
3104 send_sigstop_callback (struct inferior_list_entry
*entry
, void *except
)
3106 struct thread_info
*thread
= (struct thread_info
*) entry
;
3107 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3109 /* Ignore EXCEPT. */
3120 /* Increment the suspend count of an LWP, and stop it, if not stopped
3123 suspend_and_send_sigstop_callback (struct inferior_list_entry
*entry
,
3126 struct thread_info
*thread
= (struct thread_info
*) entry
;
3127 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3129 /* Ignore EXCEPT. */
3135 return send_sigstop_callback (entry
, except
);
3139 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3141 /* It's dead, really. */
3144 /* Store the exit status for later. */
3145 lwp
->status_pending_p
= 1;
3146 lwp
->status_pending
= wstat
;
3148 /* Prevent trying to stop it. */
3151 /* No further stops are expected from a dead lwp. */
3152 lwp
->stop_expected
= 0;
3155 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3158 wait_for_sigstop (void)
3160 struct thread_info
*saved_inferior
;
3165 saved_inferior
= current_inferior
;
3166 if (saved_inferior
!= NULL
)
3167 saved_tid
= saved_inferior
->entry
.id
;
3169 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3172 debug_printf ("wait_for_sigstop: pulling events\n");
3174 /* Passing NULL_PTID as filter indicates we want all events to be
3175 left pending. Eventually this returns when there are no
3176 unwaited-for children left. */
3177 ret
= linux_wait_for_event_filtered (minus_one_ptid
, null_ptid
,
3179 gdb_assert (ret
== -1);
3181 if (saved_inferior
== NULL
|| linux_thread_alive (saved_tid
))
3182 current_inferior
= saved_inferior
;
3186 debug_printf ("Previously current thread died.\n");
3190 /* We can't change the current inferior behind GDB's back,
3191 otherwise, a subsequent command may apply to the wrong
3193 current_inferior
= NULL
;
3197 /* Set a valid thread as current. */
3198 set_desired_inferior (0);
3203 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3204 move it out, because we need to report the stop event to GDB. For
3205 example, if the user puts a breakpoint in the jump pad, it's
3206 because she wants to debug it. */
3209 stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
, void *data
)
3211 struct thread_info
*thread
= (struct thread_info
*) entry
;
3212 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3214 gdb_assert (lwp
->suspended
== 0);
3215 gdb_assert (lwp
->stopped
);
3217 /* Allow debugging the jump pad, gdb_collect, etc.. */
3218 return (supports_fast_tracepoints ()
3219 && agent_loaded_p ()
3220 && (gdb_breakpoint_here (lwp
->stop_pc
)
3221 || lwp
->stopped_by_watchpoint
3222 || thread
->last_resume_kind
== resume_step
)
3223 && linux_fast_tracepoint_collecting (lwp
, NULL
));
3227 move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
)
3229 struct thread_info
*thread
= (struct thread_info
*) entry
;
3230 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3233 gdb_assert (lwp
->suspended
== 0);
3234 gdb_assert (lwp
->stopped
);
3236 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3238 /* Allow debugging the jump pad, gdb_collect, etc. */
3239 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3240 && !lwp
->stopped_by_watchpoint
3241 && thread
->last_resume_kind
!= resume_step
3242 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3245 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3250 lwp
->status_pending_p
= 0;
3251 enqueue_one_deferred_signal (lwp
, wstat
);
3254 debug_printf ("Signal %d for LWP %ld deferred "
3256 WSTOPSIG (*wstat
), lwpid_of (thread
));
3259 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
3266 lwp_running (struct inferior_list_entry
*entry
, void *data
)
3268 struct thread_info
*thread
= (struct thread_info
*) entry
;
3269 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3278 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3279 If SUSPEND, then also increase the suspend count of every LWP,
3283 stop_all_lwps (int suspend
, struct lwp_info
*except
)
3285 /* Should not be called recursively. */
3286 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
3291 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3292 suspend
? "stop-and-suspend" : "stop",
3294 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
3298 stopping_threads
= (suspend
3299 ? STOPPING_AND_SUSPENDING_THREADS
3300 : STOPPING_THREADS
);
3303 find_inferior (&all_threads
, suspend_and_send_sigstop_callback
, except
);
3305 find_inferior (&all_threads
, send_sigstop_callback
, except
);
3306 wait_for_sigstop ();
3307 stopping_threads
= NOT_STOPPING_THREADS
;
3311 debug_printf ("stop_all_lwps done, setting stopping_threads "
3312 "back to !stopping\n");
3317 /* Resume execution of the inferior process.
3318 If STEP is nonzero, single-step it.
3319 If SIGNAL is nonzero, give it that signal. */
3322 linux_resume_one_lwp (struct lwp_info
*lwp
,
3323 int step
, int signal
, siginfo_t
*info
)
3325 struct thread_info
*thread
= get_lwp_thread (lwp
);
3326 struct thread_info
*saved_inferior
;
3327 int fast_tp_collecting
;
3329 if (lwp
->stopped
== 0)
3332 fast_tp_collecting
= lwp
->collecting_fast_tracepoint
;
3334 gdb_assert (!stabilizing_threads
|| fast_tp_collecting
);
3336 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3337 user used the "jump" command, or "set $pc = foo"). */
3338 if (lwp
->stop_pc
!= get_pc (lwp
))
3340 /* Collecting 'while-stepping' actions doesn't make sense
3342 release_while_stepping_state_list (thread
);
3345 /* If we have pending signals or status, and a new signal, enqueue the
3346 signal. Also enqueue the signal if we are waiting to reinsert a
3347 breakpoint; it will be picked up again below. */
3349 && (lwp
->status_pending_p
3350 || lwp
->pending_signals
!= NULL
3351 || lwp
->bp_reinsert
!= 0
3352 || fast_tp_collecting
))
3354 struct pending_signals
*p_sig
;
3355 p_sig
= xmalloc (sizeof (*p_sig
));
3356 p_sig
->prev
= lwp
->pending_signals
;
3357 p_sig
->signal
= signal
;
3359 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3361 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
3362 lwp
->pending_signals
= p_sig
;
3365 if (lwp
->status_pending_p
)
3368 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3369 " has pending status\n",
3370 lwpid_of (thread
), step
? "step" : "continue", signal
,
3371 lwp
->stop_expected
? "expected" : "not expected");
3375 saved_inferior
= current_inferior
;
3376 current_inferior
= thread
;
3379 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3380 lwpid_of (thread
), step
? "step" : "continue", signal
,
3381 lwp
->stop_expected
? "expected" : "not expected");
3383 /* This bit needs some thinking about. If we get a signal that
3384 we must report while a single-step reinsert is still pending,
3385 we often end up resuming the thread. It might be better to
3386 (ew) allow a stack of pending events; then we could be sure that
3387 the reinsert happened right away and not lose any signals.
3389 Making this stack would also shrink the window in which breakpoints are
3390 uninserted (see comment in linux_wait_for_lwp) but not enough for
3391 complete correctness, so it won't solve that problem. It may be
3392 worthwhile just to solve this one, however. */
3393 if (lwp
->bp_reinsert
!= 0)
3396 debug_printf (" pending reinsert at 0x%s\n",
3397 paddress (lwp
->bp_reinsert
));
3399 if (can_hardware_single_step ())
3401 if (fast_tp_collecting
== 0)
3404 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
3406 fprintf (stderr
, "BAD - reinserting and suspended(%d).\n",
3413 /* Postpone any pending signal. It was enqueued above. */
3417 if (fast_tp_collecting
== 1)
3420 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3421 " (exit-jump-pad-bkpt)\n",
3424 /* Postpone any pending signal. It was enqueued above. */
3427 else if (fast_tp_collecting
== 2)
3430 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3431 " single-stepping\n",
3434 if (can_hardware_single_step ())
3437 fatal ("moving out of jump pad single-stepping"
3438 " not implemented on this target");
3440 /* Postpone any pending signal. It was enqueued above. */
3444 /* If we have while-stepping actions in this thread set it stepping.
3445 If we have a signal to deliver, it may or may not be set to
3446 SIG_IGN, we don't know. Assume so, and allow collecting
3447 while-stepping into a signal handler. A possible smart thing to
3448 do would be to set an internal breakpoint at the signal return
3449 address, continue, and carry on catching this while-stepping
3450 action only when that breakpoint is hit. A future
3452 if (thread
->while_stepping
!= NULL
3453 && can_hardware_single_step ())
3456 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3461 if (debug_threads
&& the_low_target
.get_pc
!= NULL
)
3463 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 1);
3464 CORE_ADDR pc
= (*the_low_target
.get_pc
) (regcache
);
3465 debug_printf (" resuming from pc 0x%lx\n", (long) pc
);
3468 /* If we have pending signals, consume one unless we are trying to
3469 reinsert a breakpoint or we're trying to finish a fast tracepoint
3471 if (lwp
->pending_signals
!= NULL
3472 && lwp
->bp_reinsert
== 0
3473 && fast_tp_collecting
== 0)
3475 struct pending_signals
**p_sig
;
3477 p_sig
= &lwp
->pending_signals
;
3478 while ((*p_sig
)->prev
!= NULL
)
3479 p_sig
= &(*p_sig
)->prev
;
3481 signal
= (*p_sig
)->signal
;
3482 if ((*p_sig
)->info
.si_signo
!= 0)
3483 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
3490 if (the_low_target
.prepare_to_resume
!= NULL
)
3491 the_low_target
.prepare_to_resume (lwp
);
3493 regcache_invalidate_thread (thread
);
3496 lwp
->stopped_by_watchpoint
= 0;
3497 lwp
->stepping
= step
;
3498 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (thread
),
3499 (PTRACE_TYPE_ARG3
) 0,
3500 /* Coerce to a uintptr_t first to avoid potential gcc warning
3501 of coercing an 8 byte integer to a 4 byte pointer. */
3502 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
3504 current_inferior
= saved_inferior
;
3507 /* ESRCH from ptrace either means that the thread was already
3508 running (an error) or that it is gone (a race condition). If
3509 it's gone, we will get a notification the next time we wait,
3510 so we can ignore the error. We could differentiate these
3511 two, but it's tricky without waiting; the thread still exists
3512 as a zombie, so sending it signal 0 would succeed. So just
3517 perror_with_name ("ptrace");
3521 struct thread_resume_array
3523 struct thread_resume
*resume
;
3527 /* This function is called once per thread via find_inferior.
3528 ARG is a pointer to a thread_resume_array struct.
3529 We look up the thread specified by ENTRY in ARG, and mark the thread
3530 with a pointer to the appropriate resume request.
3532 This algorithm is O(threads * resume elements), but resume elements
3533 is small (and will remain small at least until GDB supports thread
3537 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
3539 struct thread_info
*thread
= (struct thread_info
*) entry
;
3540 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3542 struct thread_resume_array
*r
;
3546 for (ndx
= 0; ndx
< r
->n
; ndx
++)
3548 ptid_t ptid
= r
->resume
[ndx
].thread
;
3549 if (ptid_equal (ptid
, minus_one_ptid
)
3550 || ptid_equal (ptid
, entry
->id
)
3551 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3553 || (ptid_get_pid (ptid
) == pid_of (thread
)
3554 && (ptid_is_pid (ptid
)
3555 || ptid_get_lwp (ptid
) == -1)))
3557 if (r
->resume
[ndx
].kind
== resume_stop
3558 && thread
->last_resume_kind
== resume_stop
)
3561 debug_printf ("already %s LWP %ld at GDB's request\n",
3562 (thread
->last_status
.kind
3563 == TARGET_WAITKIND_STOPPED
)
3571 lwp
->resume
= &r
->resume
[ndx
];
3572 thread
->last_resume_kind
= lwp
->resume
->kind
;
3574 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
3575 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
3577 /* If we had a deferred signal to report, dequeue one now.
3578 This can happen if LWP gets more than one signal while
3579 trying to get out of a jump pad. */
3581 && !lwp
->status_pending_p
3582 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
3584 lwp
->status_pending_p
= 1;
3587 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3588 "leaving status pending.\n",
3589 WSTOPSIG (lwp
->status_pending
),
3597 /* No resume action for this thread. */
3603 /* find_inferior callback for linux_resume.
3604 Set *FLAG_P if this lwp has an interesting status pending. */
3607 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
3609 struct thread_info
*thread
= (struct thread_info
*) entry
;
3610 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3612 /* LWPs which will not be resumed are not interesting, because
3613 we might not wait for them next time through linux_wait. */
3614 if (lwp
->resume
== NULL
)
3617 if (lwp
->status_pending_p
)
3618 * (int *) flag_p
= 1;
3623 /* Return 1 if this lwp that GDB wants running is stopped at an
3624 internal breakpoint that we need to step over. It assumes that any
3625 required STOP_PC adjustment has already been propagated to the
3626 inferior's regcache. */
3629 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
3631 struct thread_info
*thread
= (struct thread_info
*) entry
;
3632 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3633 struct thread_info
*saved_inferior
;
3636 /* LWPs which will not be resumed are not interesting, because we
3637 might not wait for them next time through linux_wait. */
3642 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3647 if (thread
->last_resume_kind
== resume_stop
)
3650 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3656 gdb_assert (lwp
->suspended
>= 0);
3661 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3666 if (!lwp
->need_step_over
)
3669 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread
));
3672 if (lwp
->status_pending_p
)
3675 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3681 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3685 /* If the PC has changed since we stopped, then don't do anything,
3686 and let the breakpoint/tracepoint be hit. This happens if, for
3687 instance, GDB handled the decr_pc_after_break subtraction itself,
3688 GDB is OOL stepping this thread, or the user has issued a "jump"
3689 command, or poked thread's registers herself. */
3690 if (pc
!= lwp
->stop_pc
)
3693 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3694 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3696 paddress (lwp
->stop_pc
), paddress (pc
));
3698 lwp
->need_step_over
= 0;
3702 saved_inferior
= current_inferior
;
3703 current_inferior
= thread
;
3705 /* We can only step over breakpoints we know about. */
3706 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
3708 /* Don't step over a breakpoint that GDB expects to hit
3709 though. If the condition is being evaluated on the target's side
3710 and it evaluate to false, step over this breakpoint as well. */
3711 if (gdb_breakpoint_here (pc
)
3712 && gdb_condition_true_at_breakpoint (pc
)
3713 && gdb_no_commands_at_breakpoint (pc
))
3716 debug_printf ("Need step over [LWP %ld]? yes, but found"
3717 " GDB breakpoint at 0x%s; skipping step over\n",
3718 lwpid_of (thread
), paddress (pc
));
3720 current_inferior
= saved_inferior
;
3726 debug_printf ("Need step over [LWP %ld]? yes, "
3727 "found breakpoint at 0x%s\n",
3728 lwpid_of (thread
), paddress (pc
));
3730 /* We've found an lwp that needs stepping over --- return 1 so
3731 that find_inferior stops looking. */
3732 current_inferior
= saved_inferior
;
3734 /* If the step over is cancelled, this is set again. */
3735 lwp
->need_step_over
= 0;
3740 current_inferior
= saved_inferior
;
3743 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3745 lwpid_of (thread
), paddress (pc
));
3750 /* Start a step-over operation on LWP. When LWP stopped at a
3751 breakpoint, to make progress, we need to remove the breakpoint out
3752 of the way. If we let other threads run while we do that, they may
3753 pass by the breakpoint location and miss hitting it. To avoid
3754 that, a step-over momentarily stops all threads while LWP is
3755 single-stepped while the breakpoint is temporarily uninserted from
3756 the inferior. When the single-step finishes, we reinsert the
3757 breakpoint, and let all threads that are supposed to be running,
3760 On targets that don't support hardware single-step, we don't
3761 currently support full software single-stepping. Instead, we only
3762 support stepping over the thread event breakpoint, by asking the
3763 low target where to place a reinsert breakpoint. Since this
3764 routine assumes the breakpoint being stepped over is a thread event
3765 breakpoint, it usually assumes the return address of the current
3766 function is a good enough place to set the reinsert breakpoint. */
3769 start_step_over (struct lwp_info
*lwp
)
3771 struct thread_info
*thread
= get_lwp_thread (lwp
);
3772 struct thread_info
*saved_inferior
;
3777 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3780 stop_all_lwps (1, lwp
);
3781 gdb_assert (lwp
->suspended
== 0);
3784 debug_printf ("Done stopping all threads for step-over.\n");
3786 /* Note, we should always reach here with an already adjusted PC,
3787 either by GDB (if we're resuming due to GDB's request), or by our
3788 caller, if we just finished handling an internal breakpoint GDB
3789 shouldn't care about. */
3792 saved_inferior
= current_inferior
;
3793 current_inferior
= thread
;
3795 lwp
->bp_reinsert
= pc
;
3796 uninsert_breakpoints_at (pc
);
3797 uninsert_fast_tracepoint_jumps_at (pc
);
3799 if (can_hardware_single_step ())
3805 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
3806 set_reinsert_breakpoint (raddr
);
3810 current_inferior
= saved_inferior
;
3812 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
3814 /* Require next event from this LWP. */
3815 step_over_bkpt
= thread
->entry
.id
;
3819 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3820 start_step_over, if still there, and delete any reinsert
3821 breakpoints we've set, on non hardware single-step targets. */
3824 finish_step_over (struct lwp_info
*lwp
)
3826 if (lwp
->bp_reinsert
!= 0)
3829 debug_printf ("Finished step over.\n");
3831 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3832 may be no breakpoint to reinsert there by now. */
3833 reinsert_breakpoints_at (lwp
->bp_reinsert
);
3834 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
3836 lwp
->bp_reinsert
= 0;
3838 /* Delete any software-single-step reinsert breakpoints. No
3839 longer needed. We don't have to worry about other threads
3840 hitting this trap, and later not being able to explain it,
3841 because we were stepping over a breakpoint, and we hold all
3842 threads but LWP stopped while doing that. */
3843 if (!can_hardware_single_step ())
3844 delete_reinsert_breakpoints ();
3846 step_over_bkpt
= null_ptid
;
3853 /* This function is called once per thread. We check the thread's resume
3854 request, which will tell us whether to resume, step, or leave the thread
3855 stopped; and what signal, if any, it should be sent.
3857 For threads which we aren't explicitly told otherwise, we preserve
3858 the stepping flag; this is used for stepping over gdbserver-placed
3861 If pending_flags was set in any thread, we queue any needed
3862 signals, since we won't actually resume. We already have a pending
3863 event to report, so we don't need to preserve any step requests;
3864 they should be re-issued if necessary. */
3867 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
3869 struct thread_info
*thread
= (struct thread_info
*) entry
;
3870 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3872 int leave_all_stopped
= * (int *) arg
;
3875 if (lwp
->resume
== NULL
)
3878 if (lwp
->resume
->kind
== resume_stop
)
3881 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
3886 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
3888 /* Stop the thread, and wait for the event asynchronously,
3889 through the event loop. */
3895 debug_printf ("already stopped LWP %ld\n",
3898 /* The LWP may have been stopped in an internal event that
3899 was not meant to be notified back to GDB (e.g., gdbserver
3900 breakpoint), so we should be reporting a stop event in
3903 /* If the thread already has a pending SIGSTOP, this is a
3904 no-op. Otherwise, something later will presumably resume
3905 the thread and this will cause it to cancel any pending
3906 operation, due to last_resume_kind == resume_stop. If
3907 the thread already has a pending status to report, we
3908 will still report it the next time we wait - see
3909 status_pending_p_callback. */
3911 /* If we already have a pending signal to report, then
3912 there's no need to queue a SIGSTOP, as this means we're
3913 midway through moving the LWP out of the jumppad, and we
3914 will report the pending signal as soon as that is
3916 if (lwp
->pending_signals_to_report
== NULL
)
3920 /* For stop requests, we're done. */
3922 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
3926 /* If this thread which is about to be resumed has a pending status,
3927 then don't resume any threads - we can just report the pending
3928 status. Make sure to queue any signals that would otherwise be
3929 sent. In all-stop mode, we do this decision based on if *any*
3930 thread has a pending status. If there's a thread that needs the
3931 step-over-breakpoint dance, then don't resume any other thread
3932 but that particular one. */
3933 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
3938 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
3940 step
= (lwp
->resume
->kind
== resume_step
);
3941 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
3946 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
3948 /* If we have a new signal, enqueue the signal. */
3949 if (lwp
->resume
->sig
!= 0)
3951 struct pending_signals
*p_sig
;
3952 p_sig
= xmalloc (sizeof (*p_sig
));
3953 p_sig
->prev
= lwp
->pending_signals
;
3954 p_sig
->signal
= lwp
->resume
->sig
;
3955 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3957 /* If this is the same signal we were previously stopped by,
3958 make sure to queue its siginfo. We can ignore the return
3959 value of ptrace; if it fails, we'll skip
3960 PTRACE_SETSIGINFO. */
3961 if (WIFSTOPPED (lwp
->last_status
)
3962 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
3963 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
3966 lwp
->pending_signals
= p_sig
;
3970 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
3976 linux_resume (struct thread_resume
*resume_info
, size_t n
)
3978 struct thread_resume_array array
= { resume_info
, n
};
3979 struct thread_info
*need_step_over
= NULL
;
3981 int leave_all_stopped
;
3986 debug_printf ("linux_resume:\n");
3989 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
3991 /* If there is a thread which would otherwise be resumed, which has
3992 a pending status, then don't resume any threads - we can just
3993 report the pending status. Make sure to queue any signals that
3994 would otherwise be sent. In non-stop mode, we'll apply this
3995 logic to each thread individually. We consume all pending events
3996 before considering to start a step-over (in all-stop). */
3999 find_inferior (&all_threads
, resume_status_pending_p
, &any_pending
);
4001 /* If there is a thread which would otherwise be resumed, which is
4002 stopped at a breakpoint that needs stepping over, then don't
4003 resume any threads - have it step over the breakpoint with all
4004 other threads stopped, then resume all threads again. Make sure
4005 to queue any signals that would otherwise be delivered or
4007 if (!any_pending
&& supports_breakpoints ())
4009 = (struct thread_info
*) find_inferior (&all_threads
,
4010 need_step_over_p
, NULL
);
4012 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4016 if (need_step_over
!= NULL
)
4017 debug_printf ("Not resuming all, need step over\n");
4018 else if (any_pending
)
4019 debug_printf ("Not resuming, all-stop and found "
4020 "an LWP with pending status\n");
4022 debug_printf ("Resuming, no pending status or step over needed\n");
4025 /* Even if we're leaving threads stopped, queue all signals we'd
4026 otherwise deliver. */
4027 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
4030 start_step_over (get_thread_lwp (need_step_over
));
4034 debug_printf ("linux_resume done\n");
4039 /* This function is called once per thread. We check the thread's
4040 last resume request, which will tell us whether to resume, step, or
4041 leave the thread stopped. Any signal the client requested to be
4042 delivered has already been enqueued at this point.
4044 If any thread that GDB wants running is stopped at an internal
4045 breakpoint that needs stepping over, we start a step-over operation
4046 on that particular thread, and leave all others stopped. */
4049 proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4051 struct thread_info
*thread
= (struct thread_info
*) entry
;
4052 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4059 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4064 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4068 if (thread
->last_resume_kind
== resume_stop
4069 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
4072 debug_printf (" client wants LWP to remain %ld stopped\n",
4077 if (lwp
->status_pending_p
)
4080 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4085 gdb_assert (lwp
->suspended
>= 0);
4090 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
4094 if (thread
->last_resume_kind
== resume_stop
4095 && lwp
->pending_signals_to_report
== NULL
4096 && lwp
->collecting_fast_tracepoint
== 0)
4098 /* We haven't reported this LWP as stopped yet (otherwise, the
4099 last_status.kind check above would catch it, and we wouldn't
4100 reach here. This LWP may have been momentarily paused by a
4101 stop_all_lwps call while handling for example, another LWP's
4102 step-over. In that case, the pending expected SIGSTOP signal
4103 that was queued at vCont;t handling time will have already
4104 been consumed by wait_for_sigstop, and so we need to requeue
4105 another one here. Note that if the LWP already has a SIGSTOP
4106 pending, this is a no-op. */
4109 debug_printf ("Client wants LWP %ld to stop. "
4110 "Making sure it has a SIGSTOP pending\n",
4116 step
= thread
->last_resume_kind
== resume_step
;
4117 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4122 unsuspend_and_proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4124 struct thread_info
*thread
= (struct thread_info
*) entry
;
4125 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4131 gdb_assert (lwp
->suspended
>= 0);
4133 return proceed_one_lwp (entry
, except
);
4136 /* When we finish a step-over, set threads running again. If there's
4137 another thread that may need a step-over, now's the time to start
4138 it. Eventually, we'll move all threads past their breakpoints. */
4141 proceed_all_lwps (void)
4143 struct thread_info
*need_step_over
;
4145 /* If there is a thread which would otherwise be resumed, which is
4146 stopped at a breakpoint that needs stepping over, then don't
4147 resume any threads - have it step over the breakpoint with all
4148 other threads stopped, then resume all threads again. */
4150 if (supports_breakpoints ())
4153 = (struct thread_info
*) find_inferior (&all_threads
,
4154 need_step_over_p
, NULL
);
4156 if (need_step_over
!= NULL
)
4159 debug_printf ("proceed_all_lwps: found "
4160 "thread %ld needing a step-over\n",
4161 lwpid_of (need_step_over
));
4163 start_step_over (get_thread_lwp (need_step_over
));
4169 debug_printf ("Proceeding, no step-over needed\n");
4171 find_inferior (&all_threads
, proceed_one_lwp
, NULL
);
4174 /* Stopped LWPs that the client wanted to be running, that don't have
4175 pending statuses, are set to run again, except for EXCEPT, if not
4176 NULL. This undoes a stop_all_lwps call. */
4179 unstop_all_lwps (int unsuspend
, struct lwp_info
*except
)
4185 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4186 lwpid_of (get_lwp_thread (except
)));
4188 debug_printf ("unstopping all lwps\n");
4192 find_inferior (&all_threads
, unsuspend_and_proceed_one_lwp
, except
);
4194 find_inferior (&all_threads
, proceed_one_lwp
, except
);
4198 debug_printf ("unstop_all_lwps done\n");
4204 #ifdef HAVE_LINUX_REGSETS
4206 #define use_linux_regsets 1
4208 /* Returns true if REGSET has been disabled. */
4211 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
4213 return (info
->disabled_regsets
!= NULL
4214 && info
->disabled_regsets
[regset
- info
->regsets
]);
4217 /* Disable REGSET. */
4220 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
4224 dr_offset
= regset
- info
->regsets
;
4225 if (info
->disabled_regsets
== NULL
)
4226 info
->disabled_regsets
= xcalloc (1, info
->num_regsets
);
4227 info
->disabled_regsets
[dr_offset
] = 1;
4231 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
4232 struct regcache
*regcache
)
4234 struct regset_info
*regset
;
4235 int saw_general_regs
= 0;
4239 regset
= regsets_info
->regsets
;
4241 pid
= lwpid_of (current_inferior
);
4242 while (regset
->size
>= 0)
4247 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
4253 buf
= xmalloc (regset
->size
);
4255 nt_type
= regset
->nt_type
;
4259 iov
.iov_len
= regset
->size
;
4260 data
= (void *) &iov
;
4266 res
= ptrace (regset
->get_request
, pid
,
4267 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4269 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4275 /* If we get EIO on a regset, do not try it again for
4276 this process mode. */
4277 disable_regset (regsets_info
, regset
);
4284 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4289 else if (regset
->type
== GENERAL_REGS
)
4290 saw_general_regs
= 1;
4291 regset
->store_function (regcache
, buf
);
4295 if (saw_general_regs
)
4302 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
4303 struct regcache
*regcache
)
4305 struct regset_info
*regset
;
4306 int saw_general_regs
= 0;
4310 regset
= regsets_info
->regsets
;
4312 pid
= lwpid_of (current_inferior
);
4313 while (regset
->size
>= 0)
4318 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
4324 buf
= xmalloc (regset
->size
);
4326 /* First fill the buffer with the current register set contents,
4327 in case there are any items in the kernel's regset that are
4328 not in gdbserver's regcache. */
4330 nt_type
= regset
->nt_type
;
4334 iov
.iov_len
= regset
->size
;
4335 data
= (void *) &iov
;
4341 res
= ptrace (regset
->get_request
, pid
,
4342 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4344 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4349 /* Then overlay our cached registers on that. */
4350 regset
->fill_function (regcache
, buf
);
4352 /* Only now do we write the register set. */
4354 res
= ptrace (regset
->set_request
, pid
,
4355 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4357 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
4365 /* If we get EIO on a regset, do not try it again for
4366 this process mode. */
4367 disable_regset (regsets_info
, regset
);
4371 else if (errno
== ESRCH
)
4373 /* At this point, ESRCH should mean the process is
4374 already gone, in which case we simply ignore attempts
4375 to change its registers. See also the related
4376 comment in linux_resume_one_lwp. */
4382 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4385 else if (regset
->type
== GENERAL_REGS
)
4386 saw_general_regs
= 1;
4390 if (saw_general_regs
)
4396 #else /* !HAVE_LINUX_REGSETS */
4398 #define use_linux_regsets 0
4399 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4400 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4404 /* Return 1 if register REGNO is supported by one of the regset ptrace
4405 calls or 0 if it has to be transferred individually. */
4408 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
4410 unsigned char mask
= 1 << (regno
% 8);
4411 size_t index
= regno
/ 8;
4413 return (use_linux_regsets
4414 && (regs_info
->regset_bitmap
== NULL
4415 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
4418 #ifdef HAVE_LINUX_USRREGS
4421 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
4425 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
4426 error ("Invalid register number %d.", regnum
);
4428 addr
= usrregs
->regmap
[regnum
];
4433 /* Fetch one register. */
4435 fetch_register (const struct usrregs_info
*usrregs
,
4436 struct regcache
*regcache
, int regno
)
4443 if (regno
>= usrregs
->num_regs
)
4445 if ((*the_low_target
.cannot_fetch_register
) (regno
))
4448 regaddr
= register_addr (usrregs
, regno
);
4452 size
= ((register_size (regcache
->tdesc
, regno
)
4453 + sizeof (PTRACE_XFER_TYPE
) - 1)
4454 & -sizeof (PTRACE_XFER_TYPE
));
4455 buf
= alloca (size
);
4457 pid
= lwpid_of (current_inferior
);
4458 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4461 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
4462 ptrace (PTRACE_PEEKUSER
, pid
,
4463 /* Coerce to a uintptr_t first to avoid potential gcc warning
4464 of coercing an 8 byte integer to a 4 byte pointer. */
4465 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
4466 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4468 error ("reading register %d: %s", regno
, strerror (errno
));
4471 if (the_low_target
.supply_ptrace_register
)
4472 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
4474 supply_register (regcache
, regno
, buf
);
4477 /* Store one register. */
4479 store_register (const struct usrregs_info
*usrregs
,
4480 struct regcache
*regcache
, int regno
)
4487 if (regno
>= usrregs
->num_regs
)
4489 if ((*the_low_target
.cannot_store_register
) (regno
))
4492 regaddr
= register_addr (usrregs
, regno
);
4496 size
= ((register_size (regcache
->tdesc
, regno
)
4497 + sizeof (PTRACE_XFER_TYPE
) - 1)
4498 & -sizeof (PTRACE_XFER_TYPE
));
4499 buf
= alloca (size
);
4500 memset (buf
, 0, size
);
4502 if (the_low_target
.collect_ptrace_register
)
4503 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
4505 collect_register (regcache
, regno
, buf
);
4507 pid
= lwpid_of (current_inferior
);
4508 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4511 ptrace (PTRACE_POKEUSER
, pid
,
4512 /* Coerce to a uintptr_t first to avoid potential gcc warning
4513 about coercing an 8 byte integer to a 4 byte pointer. */
4514 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
4515 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
4518 /* At this point, ESRCH should mean the process is
4519 already gone, in which case we simply ignore attempts
4520 to change its registers. See also the related
4521 comment in linux_resume_one_lwp. */
4525 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
4526 error ("writing register %d: %s", regno
, strerror (errno
));
4528 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4532 /* Fetch all registers, or just one, from the child process.
4533 If REGNO is -1, do this for all registers, skipping any that are
4534 assumed to have been retrieved by regsets_fetch_inferior_registers,
4535 unless ALL is non-zero.
4536 Otherwise, REGNO specifies which register (so we can save time). */
4538 usr_fetch_inferior_registers (const struct regs_info
*regs_info
,
4539 struct regcache
*regcache
, int regno
, int all
)
4541 struct usrregs_info
*usr
= regs_info
->usrregs
;
4545 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4546 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4547 fetch_register (usr
, regcache
, regno
);
4550 fetch_register (usr
, regcache
, regno
);
4553 /* Store our register values back into the inferior.
4554 If REGNO is -1, do this for all registers, skipping any that are
4555 assumed to have been saved by regsets_store_inferior_registers,
4556 unless ALL is non-zero.
4557 Otherwise, REGNO specifies which register (so we can save time). */
4559 usr_store_inferior_registers (const struct regs_info
*regs_info
,
4560 struct regcache
*regcache
, int regno
, int all
)
4562 struct usrregs_info
*usr
= regs_info
->usrregs
;
4566 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4567 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4568 store_register (usr
, regcache
, regno
);
4571 store_register (usr
, regcache
, regno
);
4574 #else /* !HAVE_LINUX_USRREGS */
4576 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4577 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4583 linux_fetch_registers (struct regcache
*regcache
, int regno
)
4587 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4591 if (the_low_target
.fetch_register
!= NULL
4592 && regs_info
->usrregs
!= NULL
)
4593 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
4594 (*the_low_target
.fetch_register
) (regcache
, regno
);
4596 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
4597 if (regs_info
->usrregs
!= NULL
)
4598 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
4602 if (the_low_target
.fetch_register
!= NULL
4603 && (*the_low_target
.fetch_register
) (regcache
, regno
))
4606 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
4608 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
4610 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
4611 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
4616 linux_store_registers (struct regcache
*regcache
, int regno
)
4620 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4624 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
4626 if (regs_info
->usrregs
!= NULL
)
4627 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
4631 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
4633 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
4635 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
4636 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
4641 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4642 to debugger memory starting at MYADDR. */
4645 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
4647 int pid
= lwpid_of (current_inferior
);
4648 register PTRACE_XFER_TYPE
*buffer
;
4649 register CORE_ADDR addr
;
4656 /* Try using /proc. Don't bother for one word. */
4657 if (len
>= 3 * sizeof (long))
4661 /* We could keep this file open and cache it - possibly one per
4662 thread. That requires some juggling, but is even faster. */
4663 sprintf (filename
, "/proc/%d/mem", pid
);
4664 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
4668 /* If pread64 is available, use it. It's faster if the kernel
4669 supports it (only one syscall), and it's 64-bit safe even on
4670 32-bit platforms (for instance, SPARC debugging a SPARC64
4673 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
4676 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
4677 bytes
= read (fd
, myaddr
, len
);
4684 /* Some data was read, we'll try to get the rest with ptrace. */
4694 /* Round starting address down to longword boundary. */
4695 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4696 /* Round ending address up; get number of longwords that makes. */
4697 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4698 / sizeof (PTRACE_XFER_TYPE
));
4699 /* Allocate buffer of that many longwords. */
4700 buffer
= (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4702 /* Read all the longwords */
4704 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4706 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4707 about coercing an 8 byte integer to a 4 byte pointer. */
4708 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
4709 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4710 (PTRACE_TYPE_ARG4
) 0);
4716 /* Copy appropriate bytes out of the buffer. */
4719 i
*= sizeof (PTRACE_XFER_TYPE
);
4720 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
4722 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4729 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4730 memory at MEMADDR. On failure (cannot write to the inferior)
4731 returns the value of errno. Always succeeds if LEN is zero. */
4734 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
4737 /* Round starting address down to longword boundary. */
4738 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4739 /* Round ending address up; get number of longwords that makes. */
4741 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4742 / sizeof (PTRACE_XFER_TYPE
);
4744 /* Allocate buffer of that many longwords. */
4745 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*)
4746 alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4748 int pid
= lwpid_of (current_inferior
);
4752 /* Zero length write always succeeds. */
4758 /* Dump up to four bytes. */
4759 unsigned int val
= * (unsigned int *) myaddr
;
4765 val
= val
& 0xffffff;
4766 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len
< 4) ? len
: 4),
4767 val
, (long)memaddr
);
4770 /* Fill start and end extra bytes of buffer with existing memory data. */
4773 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4774 about coercing an 8 byte integer to a 4 byte pointer. */
4775 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
4776 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4777 (PTRACE_TYPE_ARG4
) 0);
4785 = ptrace (PTRACE_PEEKTEXT
, pid
,
4786 /* Coerce to a uintptr_t first to avoid potential gcc warning
4787 about coercing an 8 byte integer to a 4 byte pointer. */
4788 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
4789 * sizeof (PTRACE_XFER_TYPE
)),
4790 (PTRACE_TYPE_ARG4
) 0);
4795 /* Copy data to be written over corresponding part of buffer. */
4797 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4800 /* Write the entire buffer. */
4802 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4805 ptrace (PTRACE_POKETEXT
, pid
,
4806 /* Coerce to a uintptr_t first to avoid potential gcc warning
4807 about coercing an 8 byte integer to a 4 byte pointer. */
4808 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4809 (PTRACE_TYPE_ARG4
) buffer
[i
]);
4818 linux_look_up_symbols (void)
4820 #ifdef USE_THREAD_DB
4821 struct process_info
*proc
= current_process ();
4823 if (proc
->private->thread_db
!= NULL
)
4826 /* If the kernel supports tracing clones, then we don't need to
4827 use the magic thread event breakpoint to learn about
4829 thread_db_init (!linux_supports_traceclone ());
4834 linux_request_interrupt (void)
4836 extern unsigned long signal_pid
;
4838 if (!ptid_equal (cont_thread
, null_ptid
)
4839 && !ptid_equal (cont_thread
, minus_one_ptid
))
4843 lwpid
= lwpid_of (current_inferior
);
4844 kill_lwp (lwpid
, SIGINT
);
4847 kill_lwp (signal_pid
, SIGINT
);
4850 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4851 to debugger memory starting at MYADDR. */
4854 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
4856 char filename
[PATH_MAX
];
4858 int pid
= lwpid_of (current_inferior
);
4860 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
4862 fd
= open (filename
, O_RDONLY
);
4866 if (offset
!= (CORE_ADDR
) 0
4867 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4870 n
= read (fd
, myaddr
, len
);
4877 /* These breakpoint and watchpoint related wrapper functions simply
4878 pass on the function call if the target has registered a
4879 corresponding function. */
4882 linux_supports_z_point_type (char z_type
)
4884 return (the_low_target
.supports_z_point_type
!= NULL
4885 && the_low_target
.supports_z_point_type (z_type
));
4889 linux_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
4890 int size
, struct raw_breakpoint
*bp
)
4892 if (the_low_target
.insert_point
!= NULL
)
4893 return the_low_target
.insert_point (type
, addr
, size
, bp
);
4895 /* Unsupported (see target.h). */
4900 linux_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
4901 int size
, struct raw_breakpoint
*bp
)
4903 if (the_low_target
.remove_point
!= NULL
)
4904 return the_low_target
.remove_point (type
, addr
, size
, bp
);
4906 /* Unsupported (see target.h). */
4911 linux_stopped_by_watchpoint (void)
4913 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
4915 return lwp
->stopped_by_watchpoint
;
4919 linux_stopped_data_address (void)
4921 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
4923 return lwp
->stopped_data_address
;
4926 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4927 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4928 && defined(PT_TEXT_END_ADDR)
4930 /* This is only used for targets that define PT_TEXT_ADDR,
4931 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4932 the target has different ways of acquiring this information, like
4935 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4936 to tell gdb about. */
4939 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
4941 unsigned long text
, text_end
, data
;
4942 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4946 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
4947 (PTRACE_TYPE_ARG4
) 0);
4948 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
4949 (PTRACE_TYPE_ARG4
) 0);
4950 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
4951 (PTRACE_TYPE_ARG4
) 0);
4955 /* Both text and data offsets produced at compile-time (and so
4956 used by gdb) are relative to the beginning of the program,
4957 with the data segment immediately following the text segment.
4958 However, the actual runtime layout in memory may put the data
4959 somewhere else, so when we send gdb a data base-address, we
4960 use the real data base address and subtract the compile-time
4961 data base-address from it (which is just the length of the
4962 text segment). BSS immediately follows data in both
4965 *data_p
= data
- (text_end
- text
);
4974 linux_qxfer_osdata (const char *annex
,
4975 unsigned char *readbuf
, unsigned const char *writebuf
,
4976 CORE_ADDR offset
, int len
)
4978 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
4981 /* Convert a native/host siginfo object, into/from the siginfo in the
4982 layout of the inferiors' architecture. */
4985 siginfo_fixup (siginfo_t
*siginfo
, void *inf_siginfo
, int direction
)
4989 if (the_low_target
.siginfo_fixup
!= NULL
)
4990 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
4992 /* If there was no callback, or the callback didn't do anything,
4993 then just do a straight memcpy. */
4997 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
4999 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
5004 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
5005 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
5009 char inf_siginfo
[sizeof (siginfo_t
)];
5011 if (current_inferior
== NULL
)
5014 pid
= lwpid_of (current_inferior
);
5017 debug_printf ("%s siginfo for lwp %d.\n",
5018 readbuf
!= NULL
? "Reading" : "Writing",
5021 if (offset
>= sizeof (siginfo
))
5024 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5027 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5028 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5029 inferior with a 64-bit GDBSERVER should look the same as debugging it
5030 with a 32-bit GDBSERVER, we need to convert it. */
5031 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
5033 if (offset
+ len
> sizeof (siginfo
))
5034 len
= sizeof (siginfo
) - offset
;
5036 if (readbuf
!= NULL
)
5037 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
5040 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
5042 /* Convert back to ptrace layout before flushing it out. */
5043 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
5045 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5052 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5053 so we notice when children change state; as the handler for the
5054 sigsuspend in my_waitpid. */
5057 sigchld_handler (int signo
)
5059 int old_errno
= errno
;
5065 /* fprintf is not async-signal-safe, so call write
5067 if (write (2, "sigchld_handler\n",
5068 sizeof ("sigchld_handler\n") - 1) < 0)
5069 break; /* just ignore */
5073 if (target_is_async_p ())
5074 async_file_mark (); /* trigger a linux_wait */
5080 linux_supports_non_stop (void)
5086 linux_async (int enable
)
5088 int previous
= target_is_async_p ();
5091 debug_printf ("linux_async (%d), previous=%d\n",
5094 if (previous
!= enable
)
5097 sigemptyset (&mask
);
5098 sigaddset (&mask
, SIGCHLD
);
5100 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
5104 if (pipe (linux_event_pipe
) == -1)
5105 fatal ("creating event pipe failed.");
5107 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
5108 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
5110 /* Register the event loop handler. */
5111 add_file_handler (linux_event_pipe
[0],
5112 handle_target_event
, NULL
);
5114 /* Always trigger a linux_wait. */
5119 delete_file_handler (linux_event_pipe
[0]);
5121 close (linux_event_pipe
[0]);
5122 close (linux_event_pipe
[1]);
5123 linux_event_pipe
[0] = -1;
5124 linux_event_pipe
[1] = -1;
5127 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
5134 linux_start_non_stop (int nonstop
)
5136 /* Register or unregister from event-loop accordingly. */
5137 linux_async (nonstop
);
5142 linux_supports_multi_process (void)
5148 linux_supports_disable_randomization (void)
5150 #ifdef HAVE_PERSONALITY
5158 linux_supports_agent (void)
5164 linux_supports_range_stepping (void)
5166 if (*the_low_target
.supports_range_stepping
== NULL
)
5169 return (*the_low_target
.supports_range_stepping
) ();
5172 /* Enumerate spufs IDs for process PID. */
5174 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
5180 struct dirent
*entry
;
5182 sprintf (path
, "/proc/%ld/fd", pid
);
5183 dir
= opendir (path
);
5188 while ((entry
= readdir (dir
)) != NULL
)
5194 fd
= atoi (entry
->d_name
);
5198 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
5199 if (stat (path
, &st
) != 0)
5201 if (!S_ISDIR (st
.st_mode
))
5204 if (statfs (path
, &stfs
) != 0)
5206 if (stfs
.f_type
!= SPUFS_MAGIC
)
5209 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
5211 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
5221 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5222 object type, using the /proc file system. */
5224 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
5225 unsigned const char *writebuf
,
5226 CORE_ADDR offset
, int len
)
5228 long pid
= lwpid_of (current_inferior
);
5233 if (!writebuf
&& !readbuf
)
5241 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
5244 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
5245 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
5250 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5257 ret
= write (fd
, writebuf
, (size_t) len
);
5259 ret
= read (fd
, readbuf
, (size_t) len
);
5265 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5266 struct target_loadseg
5268 /* Core address to which the segment is mapped. */
5270 /* VMA recorded in the program header. */
5272 /* Size of this segment in memory. */
5276 # if defined PT_GETDSBT
5277 struct target_loadmap
5279 /* Protocol version number, must be zero. */
5281 /* Pointer to the DSBT table, its size, and the DSBT index. */
5282 unsigned *dsbt_table
;
5283 unsigned dsbt_size
, dsbt_index
;
5284 /* Number of segments in this map. */
5286 /* The actual memory map. */
5287 struct target_loadseg segs
[/*nsegs*/];
5289 # define LINUX_LOADMAP PT_GETDSBT
5290 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5291 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5293 struct target_loadmap
5295 /* Protocol version number, must be zero. */
5297 /* Number of segments in this map. */
5299 /* The actual memory map. */
5300 struct target_loadseg segs
[/*nsegs*/];
5302 # define LINUX_LOADMAP PTRACE_GETFDPIC
5303 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5304 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5308 linux_read_loadmap (const char *annex
, CORE_ADDR offset
,
5309 unsigned char *myaddr
, unsigned int len
)
5311 int pid
= lwpid_of (current_inferior
);
5313 struct target_loadmap
*data
= NULL
;
5314 unsigned int actual_length
, copy_length
;
5316 if (strcmp (annex
, "exec") == 0)
5317 addr
= (int) LINUX_LOADMAP_EXEC
;
5318 else if (strcmp (annex
, "interp") == 0)
5319 addr
= (int) LINUX_LOADMAP_INTERP
;
5323 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
5329 actual_length
= sizeof (struct target_loadmap
)
5330 + sizeof (struct target_loadseg
) * data
->nsegs
;
5332 if (offset
< 0 || offset
> actual_length
)
5335 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
5336 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
5340 # define linux_read_loadmap NULL
5341 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5344 linux_process_qsupported (const char *query
)
5346 if (the_low_target
.process_qsupported
!= NULL
)
5347 the_low_target
.process_qsupported (query
);
5351 linux_supports_tracepoints (void)
5353 if (*the_low_target
.supports_tracepoints
== NULL
)
5356 return (*the_low_target
.supports_tracepoints
) ();
5360 linux_read_pc (struct regcache
*regcache
)
5362 if (the_low_target
.get_pc
== NULL
)
5365 return (*the_low_target
.get_pc
) (regcache
);
5369 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
5371 gdb_assert (the_low_target
.set_pc
!= NULL
);
5373 (*the_low_target
.set_pc
) (regcache
, pc
);
5377 linux_thread_stopped (struct thread_info
*thread
)
5379 return get_thread_lwp (thread
)->stopped
;
5382 /* This exposes stop-all-threads functionality to other modules. */
5385 linux_pause_all (int freeze
)
5387 stop_all_lwps (freeze
, NULL
);
5390 /* This exposes unstop-all-threads functionality to other gdbserver
5394 linux_unpause_all (int unfreeze
)
5396 unstop_all_lwps (unfreeze
, NULL
);
5400 linux_prepare_to_access_memory (void)
5402 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5405 linux_pause_all (1);
5410 linux_done_accessing_memory (void)
5412 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5415 linux_unpause_all (1);
5419 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
5420 CORE_ADDR collector
,
5423 CORE_ADDR
*jump_entry
,
5424 CORE_ADDR
*trampoline
,
5425 ULONGEST
*trampoline_size
,
5426 unsigned char *jjump_pad_insn
,
5427 ULONGEST
*jjump_pad_insn_size
,
5428 CORE_ADDR
*adjusted_insn_addr
,
5429 CORE_ADDR
*adjusted_insn_addr_end
,
5432 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
5433 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
5434 jump_entry
, trampoline
, trampoline_size
,
5435 jjump_pad_insn
, jjump_pad_insn_size
,
5436 adjusted_insn_addr
, adjusted_insn_addr_end
,
5440 static struct emit_ops
*
5441 linux_emit_ops (void)
5443 if (the_low_target
.emit_ops
!= NULL
)
5444 return (*the_low_target
.emit_ops
) ();
5450 linux_get_min_fast_tracepoint_insn_len (void)
5452 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
5455 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5458 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
5459 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
5461 char filename
[PATH_MAX
];
5463 const int auxv_size
= is_elf64
5464 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
5465 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
5467 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5469 fd
= open (filename
, O_RDONLY
);
5475 while (read (fd
, buf
, auxv_size
) == auxv_size
5476 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
5480 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
5482 switch (aux
->a_type
)
5485 *phdr_memaddr
= aux
->a_un
.a_val
;
5488 *num_phdr
= aux
->a_un
.a_val
;
5494 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
5496 switch (aux
->a_type
)
5499 *phdr_memaddr
= aux
->a_un
.a_val
;
5502 *num_phdr
= aux
->a_un
.a_val
;
5510 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
5512 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5513 "phdr_memaddr = %ld, phdr_num = %d",
5514 (long) *phdr_memaddr
, *num_phdr
);
5521 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5524 get_dynamic (const int pid
, const int is_elf64
)
5526 CORE_ADDR phdr_memaddr
, relocation
;
5528 unsigned char *phdr_buf
;
5529 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
5531 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
5534 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
5535 phdr_buf
= alloca (num_phdr
* phdr_size
);
5537 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
5540 /* Compute relocation: it is expected to be 0 for "regular" executables,
5541 non-zero for PIE ones. */
5543 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
5546 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5548 if (p
->p_type
== PT_PHDR
)
5549 relocation
= phdr_memaddr
- p
->p_vaddr
;
5553 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5555 if (p
->p_type
== PT_PHDR
)
5556 relocation
= phdr_memaddr
- p
->p_vaddr
;
5559 if (relocation
== -1)
5561 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5562 any real world executables, including PIE executables, have always
5563 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5564 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5565 or present DT_DEBUG anyway (fpc binaries are statically linked).
5567 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5569 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5574 for (i
= 0; i
< num_phdr
; i
++)
5578 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5580 if (p
->p_type
== PT_DYNAMIC
)
5581 return p
->p_vaddr
+ relocation
;
5585 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5587 if (p
->p_type
== PT_DYNAMIC
)
5588 return p
->p_vaddr
+ relocation
;
5595 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5596 can be 0 if the inferior does not yet have the library list initialized.
5597 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5598 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5601 get_r_debug (const int pid
, const int is_elf64
)
5603 CORE_ADDR dynamic_memaddr
;
5604 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
5605 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
5608 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
5609 if (dynamic_memaddr
== 0)
5612 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
5616 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
5617 #ifdef DT_MIPS_RLD_MAP
5621 unsigned char buf
[sizeof (Elf64_Xword
)];
5625 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
5627 if (linux_read_memory (dyn
->d_un
.d_val
,
5628 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
5633 #endif /* DT_MIPS_RLD_MAP */
5635 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
5636 map
= dyn
->d_un
.d_val
;
5638 if (dyn
->d_tag
== DT_NULL
)
5643 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
5644 #ifdef DT_MIPS_RLD_MAP
5648 unsigned char buf
[sizeof (Elf32_Word
)];
5652 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
5654 if (linux_read_memory (dyn
->d_un
.d_val
,
5655 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
5660 #endif /* DT_MIPS_RLD_MAP */
5662 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
5663 map
= dyn
->d_un
.d_val
;
5665 if (dyn
->d_tag
== DT_NULL
)
5669 dynamic_memaddr
+= dyn_size
;
5675 /* Read one pointer from MEMADDR in the inferior. */
5678 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
5682 /* Go through a union so this works on either big or little endian
5683 hosts, when the inferior's pointer size is smaller than the size
5684 of CORE_ADDR. It is assumed the inferior's endianness is the
5685 same of the superior's. */
5688 CORE_ADDR core_addr
;
5693 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
5696 if (ptr_size
== sizeof (CORE_ADDR
))
5697 *ptr
= addr
.core_addr
;
5698 else if (ptr_size
== sizeof (unsigned int))
5701 gdb_assert_not_reached ("unhandled pointer size");
5706 struct link_map_offsets
5708 /* Offset and size of r_debug.r_version. */
5709 int r_version_offset
;
5711 /* Offset and size of r_debug.r_map. */
5714 /* Offset to l_addr field in struct link_map. */
5717 /* Offset to l_name field in struct link_map. */
5720 /* Offset to l_ld field in struct link_map. */
5723 /* Offset to l_next field in struct link_map. */
5726 /* Offset to l_prev field in struct link_map. */
5730 /* Construct qXfer:libraries-svr4:read reply. */
5733 linux_qxfer_libraries_svr4 (const char *annex
, unsigned char *readbuf
,
5734 unsigned const char *writebuf
,
5735 CORE_ADDR offset
, int len
)
5738 unsigned document_len
;
5739 struct process_info_private
*const priv
= current_process ()->private;
5740 char filename
[PATH_MAX
];
5743 static const struct link_map_offsets lmo_32bit_offsets
=
5745 0, /* r_version offset. */
5746 4, /* r_debug.r_map offset. */
5747 0, /* l_addr offset in link_map. */
5748 4, /* l_name offset in link_map. */
5749 8, /* l_ld offset in link_map. */
5750 12, /* l_next offset in link_map. */
5751 16 /* l_prev offset in link_map. */
5754 static const struct link_map_offsets lmo_64bit_offsets
=
5756 0, /* r_version offset. */
5757 8, /* r_debug.r_map offset. */
5758 0, /* l_addr offset in link_map. */
5759 8, /* l_name offset in link_map. */
5760 16, /* l_ld offset in link_map. */
5761 24, /* l_next offset in link_map. */
5762 32 /* l_prev offset in link_map. */
5764 const struct link_map_offsets
*lmo
;
5765 unsigned int machine
;
5767 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
5768 int allocated
= 1024;
5770 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
5771 int header_done
= 0;
5773 if (writebuf
!= NULL
)
5775 if (readbuf
== NULL
)
5778 pid
= lwpid_of (current_inferior
);
5779 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
5780 is_elf64
= elf_64_file_p (filename
, &machine
);
5781 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
5782 ptr_size
= is_elf64
? 8 : 4;
5784 while (annex
[0] != '\0')
5790 sep
= strchr (annex
, '=');
5795 if (len
== 5 && strncmp (annex
, "start", 5) == 0)
5797 else if (len
== 4 && strncmp (annex
, "prev", 4) == 0)
5801 annex
= strchr (sep
, ';');
5808 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
5815 if (priv
->r_debug
== 0)
5816 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
5818 /* We failed to find DT_DEBUG. Such situation will not change
5819 for this inferior - do not retry it. Report it to GDB as
5820 E01, see for the reasons at the GDB solib-svr4.c side. */
5821 if (priv
->r_debug
== (CORE_ADDR
) -1)
5824 if (priv
->r_debug
!= 0)
5826 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
5827 (unsigned char *) &r_version
,
5828 sizeof (r_version
)) != 0
5831 warning ("unexpected r_debug version %d", r_version
);
5833 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
5834 &lm_addr
, ptr_size
) != 0)
5836 warning ("unable to read r_map from 0x%lx",
5837 (long) priv
->r_debug
+ lmo
->r_map_offset
);
5842 document
= xmalloc (allocated
);
5843 strcpy (document
, "<library-list-svr4 version=\"1.0\"");
5844 p
= document
+ strlen (document
);
5847 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
5848 &l_name
, ptr_size
) == 0
5849 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
5850 &l_addr
, ptr_size
) == 0
5851 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
5852 &l_ld
, ptr_size
) == 0
5853 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
5854 &l_prev
, ptr_size
) == 0
5855 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
5856 &l_next
, ptr_size
) == 0)
5858 unsigned char libname
[PATH_MAX
];
5860 if (lm_prev
!= l_prev
)
5862 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5863 (long) lm_prev
, (long) l_prev
);
5867 /* Ignore the first entry even if it has valid name as the first entry
5868 corresponds to the main executable. The first entry should not be
5869 skipped if the dynamic loader was loaded late by a static executable
5870 (see solib-svr4.c parameter ignore_first). But in such case the main
5871 executable does not have PT_DYNAMIC present and this function already
5872 exited above due to failed get_r_debug. */
5875 sprintf (p
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
5880 /* Not checking for error because reading may stop before
5881 we've got PATH_MAX worth of characters. */
5883 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
5884 libname
[sizeof (libname
) - 1] = '\0';
5885 if (libname
[0] != '\0')
5887 /* 6x the size for xml_escape_text below. */
5888 size_t len
= 6 * strlen ((char *) libname
);
5893 /* Terminate `<library-list-svr4'. */
5898 while (allocated
< p
- document
+ len
+ 200)
5900 /* Expand to guarantee sufficient storage. */
5901 uintptr_t document_len
= p
- document
;
5903 document
= xrealloc (document
, 2 * allocated
);
5905 p
= document
+ document_len
;
5908 name
= xml_escape_text ((char *) libname
);
5909 p
+= sprintf (p
, "<library name=\"%s\" lm=\"0x%lx\" "
5910 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5911 name
, (unsigned long) lm_addr
,
5912 (unsigned long) l_addr
, (unsigned long) l_ld
);
5923 /* Empty list; terminate `<library-list-svr4'. */
5927 strcpy (p
, "</library-list-svr4>");
5929 document_len
= strlen (document
);
5930 if (offset
< document_len
)
5931 document_len
-= offset
;
5934 if (len
> document_len
)
5937 memcpy (readbuf
, document
+ offset
, len
);
5943 #ifdef HAVE_LINUX_BTRACE
5945 /* See to_enable_btrace target method. */
5947 static struct btrace_target_info
*
5948 linux_low_enable_btrace (ptid_t ptid
)
5950 struct btrace_target_info
*tinfo
;
5952 tinfo
= linux_enable_btrace (ptid
);
5956 struct thread_info
*thread
= find_thread_ptid (ptid
);
5957 struct regcache
*regcache
= get_thread_regcache (thread
, 0);
5959 tinfo
->ptr_bits
= register_size (regcache
->tdesc
, 0) * 8;
5965 /* See to_disable_btrace target method. */
5968 linux_low_disable_btrace (struct btrace_target_info
*tinfo
)
5970 enum btrace_error err
;
5972 err
= linux_disable_btrace (tinfo
);
5973 return (err
== BTRACE_ERR_NONE
? 0 : -1);
5976 /* See to_read_btrace target method. */
5979 linux_low_read_btrace (struct btrace_target_info
*tinfo
, struct buffer
*buffer
,
5982 VEC (btrace_block_s
) *btrace
;
5983 struct btrace_block
*block
;
5984 enum btrace_error err
;
5988 err
= linux_read_btrace (&btrace
, tinfo
, type
);
5989 if (err
!= BTRACE_ERR_NONE
)
5991 if (err
== BTRACE_ERR_OVERFLOW
)
5992 buffer_grow_str0 (buffer
, "E.Overflow.");
5994 buffer_grow_str0 (buffer
, "E.Generic Error.");
5999 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6000 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
6002 for (i
= 0; VEC_iterate (btrace_block_s
, btrace
, i
, block
); i
++)
6003 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6004 paddress (block
->begin
), paddress (block
->end
));
6006 buffer_grow_str0 (buffer
, "</btrace>\n");
6008 VEC_free (btrace_block_s
, btrace
);
6012 #endif /* HAVE_LINUX_BTRACE */
6014 static struct target_ops linux_target_ops
= {
6015 linux_create_inferior
,
6024 linux_fetch_registers
,
6025 linux_store_registers
,
6026 linux_prepare_to_access_memory
,
6027 linux_done_accessing_memory
,
6030 linux_look_up_symbols
,
6031 linux_request_interrupt
,
6033 linux_supports_z_point_type
,
6036 linux_stopped_by_watchpoint
,
6037 linux_stopped_data_address
,
6038 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6039 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6040 && defined(PT_TEXT_END_ADDR)
6045 #ifdef USE_THREAD_DB
6046 thread_db_get_tls_address
,
6051 hostio_last_error_from_errno
,
6054 linux_supports_non_stop
,
6056 linux_start_non_stop
,
6057 linux_supports_multi_process
,
6058 #ifdef USE_THREAD_DB
6059 thread_db_handle_monitor_command
,
6063 linux_common_core_of_thread
,
6065 linux_process_qsupported
,
6066 linux_supports_tracepoints
,
6069 linux_thread_stopped
,
6073 linux_cancel_breakpoints
,
6074 linux_stabilize_threads
,
6075 linux_install_fast_tracepoint_jump_pad
,
6077 linux_supports_disable_randomization
,
6078 linux_get_min_fast_tracepoint_insn_len
,
6079 linux_qxfer_libraries_svr4
,
6080 linux_supports_agent
,
6081 #ifdef HAVE_LINUX_BTRACE
6082 linux_supports_btrace
,
6083 linux_low_enable_btrace
,
6084 linux_low_disable_btrace
,
6085 linux_low_read_btrace
,
6092 linux_supports_range_stepping
,
6096 linux_init_signals ()
6098 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6099 to find what the cancel signal actually is. */
6100 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6101 signal (__SIGRTMIN
+1, SIG_IGN
);
6105 #ifdef HAVE_LINUX_REGSETS
6107 initialize_regsets_info (struct regsets_info
*info
)
6109 for (info
->num_regsets
= 0;
6110 info
->regsets
[info
->num_regsets
].size
>= 0;
6111 info
->num_regsets
++)
6117 initialize_low (void)
6119 struct sigaction sigchld_action
;
6120 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
6121 set_target_ops (&linux_target_ops
);
6122 set_breakpoint_data (the_low_target
.breakpoint
,
6123 the_low_target
.breakpoint_len
);
6124 linux_init_signals ();
6125 linux_ptrace_init_warnings ();
6127 sigchld_action
.sa_handler
= sigchld_handler
;
6128 sigemptyset (&sigchld_action
.sa_mask
);
6129 sigchld_action
.sa_flags
= SA_RESTART
;
6130 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
6132 initialize_low_arch ();