1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "linux-low.h"
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
28 #include <sys/ioctl.h>
34 #include <sys/syscall.h>
38 #include <sys/types.h>
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
52 #define SPUFS_MAGIC 0x23c9b64e
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
64 /* If the system headers did not provide the constants, hard-code the normal
66 #ifndef PTRACE_EVENT_FORK
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
88 #endif /* PTRACE_EVENT_FORK */
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
94 #define __WALL 0x40000000 /* Wait for any child. */
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
116 struct inferior_list all_lwps
;
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
122 struct inferior_list stopped_pids
;
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads
;
127 /* FIXME make into a target method? */
128 int using_threads
= 1;
130 /* This flag is true iff we've just created or attached to our first
131 inferior but it has not stopped yet. As soon as it does, we need
132 to call the low target's arch_setup callback. Doing this only on
133 the first inferior avoids reinializing the architecture on every
134 inferior, and avoids messing with the register caches of the
135 already running inferiors. NOTE: this assumes all inferiors under
136 control of gdbserver have the same architecture. */
137 static int new_inferior
;
139 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
140 int step
, int signal
, siginfo_t
*info
);
141 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
142 static void stop_all_lwps (void);
143 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
144 static void *add_lwp (ptid_t ptid
);
145 static int linux_stopped_by_watchpoint (void);
146 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
147 static int linux_core_of_thread (ptid_t ptid
);
148 static void proceed_all_lwps (void);
149 static void unstop_all_lwps (struct lwp_info
*except
);
150 static int finish_step_over (struct lwp_info
*lwp
);
151 static CORE_ADDR
get_stop_pc (struct lwp_info
*lwp
);
152 static int kill_lwp (unsigned long lwpid
, int signo
);
154 /* True if the low target can hardware single-step. Such targets
155 don't need a BREAKPOINT_REINSERT_ADDR callback. */
158 can_hardware_single_step (void)
160 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
163 /* True if the low target supports memory breakpoints. If so, we'll
164 have a GET_PC implementation. */
167 supports_breakpoints (void)
169 return (the_low_target
.get_pc
!= NULL
);
172 struct pending_signals
176 struct pending_signals
*prev
;
179 #define PTRACE_ARG3_TYPE void *
180 #define PTRACE_ARG4_TYPE void *
181 #define PTRACE_XFER_TYPE long
183 #ifdef HAVE_LINUX_REGSETS
184 static char *disabled_regsets
;
185 static int num_regsets
;
188 /* The read/write ends of the pipe registered as waitable file in the
190 static int linux_event_pipe
[2] = { -1, -1 };
192 /* True if we're currently in async mode. */
193 #define target_is_async_p() (linux_event_pipe[0] != -1)
195 static void send_sigstop (struct inferior_list_entry
*entry
);
196 static void wait_for_sigstop (struct inferior_list_entry
*entry
);
198 /* Accepts an integer PID; Returns a string representing a file that
199 can be opened to get info for the child process.
200 Space for the result is malloc'd, caller must free. */
203 linux_child_pid_to_exec_file (int pid
)
207 name1
= xmalloc (MAXPATHLEN
);
208 name2
= xmalloc (MAXPATHLEN
);
209 memset (name2
, 0, MAXPATHLEN
);
211 sprintf (name1
, "/proc/%d/exe", pid
);
212 if (readlink (name1
, name2
, MAXPATHLEN
) > 0)
224 /* Return non-zero if HEADER is a 64-bit ELF file. */
227 elf_64_header_p (const Elf64_Ehdr
*header
)
229 return (header
->e_ident
[EI_MAG0
] == ELFMAG0
230 && header
->e_ident
[EI_MAG1
] == ELFMAG1
231 && header
->e_ident
[EI_MAG2
] == ELFMAG2
232 && header
->e_ident
[EI_MAG3
] == ELFMAG3
233 && header
->e_ident
[EI_CLASS
] == ELFCLASS64
);
236 /* Return non-zero if FILE is a 64-bit ELF file,
237 zero if the file is not a 64-bit ELF file,
238 and -1 if the file is not accessible or doesn't exist. */
241 elf_64_file_p (const char *file
)
246 fd
= open (file
, O_RDONLY
);
250 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
257 return elf_64_header_p (&header
);
261 delete_lwp (struct lwp_info
*lwp
)
263 remove_thread (get_lwp_thread (lwp
));
264 remove_inferior (&all_lwps
, &lwp
->head
);
265 free (lwp
->arch_private
);
269 /* Add a process to the common process list, and set its private
272 static struct process_info
*
273 linux_add_process (int pid
, int attached
)
275 struct process_info
*proc
;
277 /* Is this the first process? If so, then set the arch. */
278 if (all_processes
.head
== NULL
)
281 proc
= add_process (pid
, attached
);
282 proc
->private = xcalloc (1, sizeof (*proc
->private));
284 if (the_low_target
.new_process
!= NULL
)
285 proc
->private->arch_private
= the_low_target
.new_process ();
290 /* Wrapper function for waitpid which handles EINTR, and emulates
291 __WALL for systems where that is not available. */
294 my_waitpid (int pid
, int *status
, int flags
)
299 fprintf (stderr
, "my_waitpid (%d, 0x%x)\n", pid
, flags
);
303 sigset_t block_mask
, org_mask
, wake_mask
;
306 wnohang
= (flags
& WNOHANG
) != 0;
307 flags
&= ~(__WALL
| __WCLONE
);
310 /* Block all signals while here. This avoids knowing about
311 LinuxThread's signals. */
312 sigfillset (&block_mask
);
313 sigprocmask (SIG_BLOCK
, &block_mask
, &org_mask
);
315 /* ... except during the sigsuspend below. */
316 sigemptyset (&wake_mask
);
320 /* Since all signals are blocked, there's no need to check
322 ret
= waitpid (pid
, status
, flags
);
325 if (ret
== -1 && out_errno
!= ECHILD
)
330 if (flags
& __WCLONE
)
332 /* We've tried both flavors now. If WNOHANG is set,
333 there's nothing else to do, just bail out. */
338 fprintf (stderr
, "blocking\n");
340 /* Block waiting for signals. */
341 sigsuspend (&wake_mask
);
347 sigprocmask (SIG_SETMASK
, &org_mask
, NULL
);
352 ret
= waitpid (pid
, status
, flags
);
353 while (ret
== -1 && errno
== EINTR
);
358 fprintf (stderr
, "my_waitpid (%d, 0x%x): status(%x), %d\n",
359 pid
, flags
, status
? *status
: -1, ret
);
365 /* Handle a GNU/Linux extended wait response. If we see a clone
366 event, we need to add the new LWP to our list (and not report the
367 trap to higher layers). */
370 handle_extended_wait (struct lwp_info
*event_child
, int wstat
)
372 int event
= wstat
>> 16;
373 struct lwp_info
*new_lwp
;
375 if (event
== PTRACE_EVENT_CLONE
)
378 unsigned long new_pid
;
379 int ret
, status
= W_STOPCODE (SIGSTOP
);
381 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_child
), 0, &new_pid
);
383 /* If we haven't already seen the new PID stop, wait for it now. */
384 if (! pull_pid_from_list (&stopped_pids
, new_pid
))
386 /* The new child has a pending SIGSTOP. We can't affect it until it
387 hits the SIGSTOP, but we're already attached. */
389 ret
= my_waitpid (new_pid
, &status
, __WALL
);
392 perror_with_name ("waiting for new child");
393 else if (ret
!= new_pid
)
394 warning ("wait returned unexpected PID %d", ret
);
395 else if (!WIFSTOPPED (status
))
396 warning ("wait returned unexpected status 0x%x", status
);
399 ptrace (PTRACE_SETOPTIONS
, new_pid
, 0, (PTRACE_ARG4_TYPE
) PTRACE_O_TRACECLONE
);
401 ptid
= ptid_build (pid_of (event_child
), new_pid
, 0);
402 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
403 add_thread (ptid
, new_lwp
);
405 /* Either we're going to immediately resume the new thread
406 or leave it stopped. linux_resume_one_lwp is a nop if it
407 thinks the thread is currently running, so set this first
408 before calling linux_resume_one_lwp. */
409 new_lwp
->stopped
= 1;
411 /* Normally we will get the pending SIGSTOP. But in some cases
412 we might get another signal delivered to the group first.
413 If we do get another signal, be sure not to lose it. */
414 if (WSTOPSIG (status
) == SIGSTOP
)
416 if (stopping_threads
)
417 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
419 linux_resume_one_lwp (new_lwp
, 0, 0, NULL
);
423 new_lwp
->stop_expected
= 1;
425 if (stopping_threads
)
427 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
428 new_lwp
->status_pending_p
= 1;
429 new_lwp
->status_pending
= status
;
432 /* Pass the signal on. This is what GDB does - except
433 shouldn't we really report it instead? */
434 linux_resume_one_lwp (new_lwp
, 0, WSTOPSIG (status
), NULL
);
437 /* Always resume the current thread. If we are stopping
438 threads, it will have a pending SIGSTOP; we may as well
440 linux_resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
444 /* Return the PC as read from the regcache of LWP, without any
448 get_pc (struct lwp_info
*lwp
)
450 struct thread_info
*saved_inferior
;
451 struct regcache
*regcache
;
454 if (the_low_target
.get_pc
== NULL
)
457 saved_inferior
= current_inferior
;
458 current_inferior
= get_lwp_thread (lwp
);
460 regcache
= get_thread_regcache (current_inferior
, 1);
461 pc
= (*the_low_target
.get_pc
) (regcache
);
464 fprintf (stderr
, "pc is 0x%lx\n", (long) pc
);
466 current_inferior
= saved_inferior
;
470 /* This function should only be called if LWP got a SIGTRAP.
471 The SIGTRAP could mean several things.
473 On i386, where decr_pc_after_break is non-zero:
474 If we were single-stepping this process using PTRACE_SINGLESTEP,
475 we will get only the one SIGTRAP (even if the instruction we
476 stepped over was a breakpoint). The value of $eip will be the
478 If we continue the process using PTRACE_CONT, we will get a
479 SIGTRAP when we hit a breakpoint. The value of $eip will be
480 the instruction after the breakpoint (i.e. needs to be
481 decremented). If we report the SIGTRAP to GDB, we must also
482 report the undecremented PC. If we cancel the SIGTRAP, we
483 must resume at the decremented PC.
485 (Presumably, not yet tested) On a non-decr_pc_after_break machine
486 with hardware or kernel single-step:
487 If we single-step over a breakpoint instruction, our PC will
488 point at the following instruction. If we continue and hit a
489 breakpoint instruction, our PC will point at the breakpoint
493 get_stop_pc (struct lwp_info
*lwp
)
497 if (the_low_target
.get_pc
== NULL
)
500 stop_pc
= get_pc (lwp
);
502 if (WSTOPSIG (lwp
->last_status
) == SIGTRAP
504 && !lwp
->stopped_by_watchpoint
505 && lwp
->last_status
>> 16 == 0)
506 stop_pc
-= the_low_target
.decr_pc_after_break
;
509 fprintf (stderr
, "stop pc is 0x%lx\n", (long) stop_pc
);
515 add_lwp (ptid_t ptid
)
517 struct lwp_info
*lwp
;
519 lwp
= (struct lwp_info
*) xmalloc (sizeof (*lwp
));
520 memset (lwp
, 0, sizeof (*lwp
));
524 if (the_low_target
.new_thread
!= NULL
)
525 lwp
->arch_private
= the_low_target
.new_thread ();
527 add_inferior_to_list (&all_lwps
, &lwp
->head
);
532 /* Start an inferior process and returns its pid.
533 ALLARGS is a vector of program-name and args. */
536 linux_create_inferior (char *program
, char **allargs
)
538 struct lwp_info
*new_lwp
;
542 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
548 perror_with_name ("fork");
552 ptrace (PTRACE_TRACEME
, 0, 0, 0);
554 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
555 signal (__SIGRTMIN
+ 1, SIG_DFL
);
560 execv (program
, allargs
);
562 execvp (program
, allargs
);
564 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
570 linux_add_process (pid
, 0);
572 ptid
= ptid_build (pid
, pid
, 0);
573 new_lwp
= add_lwp (ptid
);
574 add_thread (ptid
, new_lwp
);
575 new_lwp
->must_set_ptrace_flags
= 1;
580 /* Attach to an inferior process. */
583 linux_attach_lwp_1 (unsigned long lwpid
, int initial
)
586 struct lwp_info
*new_lwp
;
588 if (ptrace (PTRACE_ATTACH
, lwpid
, 0, 0) != 0)
592 /* If we fail to attach to an LWP, just warn. */
593 fprintf (stderr
, "Cannot attach to lwp %ld: %s (%d)\n", lwpid
,
594 strerror (errno
), errno
);
599 /* If we fail to attach to a process, report an error. */
600 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid
,
601 strerror (errno
), errno
);
605 /* NOTE/FIXME: This lwp might have not been the tgid. */
606 ptid
= ptid_build (lwpid
, lwpid
, 0);
609 /* Note that extracting the pid from the current inferior is
610 safe, since we're always called in the context of the same
611 process as this new thread. */
612 int pid
= pid_of (get_thread_lwp (current_inferior
));
613 ptid
= ptid_build (pid
, lwpid
, 0);
616 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
617 add_thread (ptid
, new_lwp
);
619 /* We need to wait for SIGSTOP before being able to make the next
620 ptrace call on this LWP. */
621 new_lwp
->must_set_ptrace_flags
= 1;
623 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
626 There are several cases to consider here:
628 1) gdbserver has already attached to the process and is being notified
629 of a new thread that is being created.
630 In this case we should ignore that SIGSTOP and resume the
631 process. This is handled below by setting stop_expected = 1,
632 and the fact that add_thread sets last_resume_kind ==
635 2) This is the first thread (the process thread), and we're attaching
636 to it via attach_inferior.
637 In this case we want the process thread to stop.
638 This is handled by having linux_attach set last_resume_kind ==
639 resume_stop after we return.
640 ??? If the process already has several threads we leave the other
643 3) GDB is connecting to gdbserver and is requesting an enumeration of all
645 In this case we want the thread to stop.
646 FIXME: This case is currently not properly handled.
647 We should wait for the SIGSTOP but don't. Things work apparently
648 because enough time passes between when we ptrace (ATTACH) and when
649 gdb makes the next ptrace call on the thread.
651 On the other hand, if we are currently trying to stop all threads, we
652 should treat the new thread as if we had sent it a SIGSTOP. This works
653 because we are guaranteed that the add_lwp call above added us to the
654 end of the list, and so the new thread has not yet reached
655 wait_for_sigstop (but will). */
656 new_lwp
->stop_expected
= 1;
660 linux_attach_lwp (unsigned long lwpid
)
662 linux_attach_lwp_1 (lwpid
, 0);
666 linux_attach (unsigned long pid
)
668 linux_attach_lwp_1 (pid
, 1);
669 linux_add_process (pid
, 1);
673 struct thread_info
*thread
;
675 /* Don't ignore the initial SIGSTOP if we just attached to this
676 process. It will be collected by wait shortly. */
677 thread
= find_thread_ptid (ptid_build (pid
, pid
, 0));
678 thread
->last_resume_kind
= resume_stop
;
691 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
693 struct counter
*counter
= args
;
695 if (ptid_get_pid (entry
->id
) == counter
->pid
)
697 if (++counter
->count
> 1)
705 last_thread_of_process_p (struct thread_info
*thread
)
707 ptid_t ptid
= ((struct inferior_list_entry
*)thread
)->id
;
708 int pid
= ptid_get_pid (ptid
);
709 struct counter counter
= { pid
, 0 };
711 return (find_inferior (&all_threads
,
712 second_thread_of_pid_p
, &counter
) == NULL
);
715 /* Kill the inferior lwp. */
718 linux_kill_one_lwp (struct inferior_list_entry
*entry
, void *args
)
720 struct thread_info
*thread
= (struct thread_info
*) entry
;
721 struct lwp_info
*lwp
= get_thread_lwp (thread
);
723 int pid
= * (int *) args
;
725 if (ptid_get_pid (entry
->id
) != pid
)
728 /* We avoid killing the first thread here, because of a Linux kernel (at
729 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
730 the children get a chance to be reaped, it will remain a zombie
733 if (lwpid_of (lwp
) == pid
)
736 fprintf (stderr
, "lkop: is last of process %s\n",
737 target_pid_to_str (entry
->id
));
741 /* If we're killing a running inferior, make sure it is stopped
742 first, as PTRACE_KILL will not work otherwise. */
744 send_sigstop (&lwp
->head
);
748 ptrace (PTRACE_KILL
, lwpid_of (lwp
), 0, 0);
750 /* Make sure it died. The loop is most likely unnecessary. */
751 pid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
752 } while (pid
> 0 && WIFSTOPPED (wstat
));
760 struct process_info
*process
;
761 struct lwp_info
*lwp
;
762 struct thread_info
*thread
;
766 process
= find_process_pid (pid
);
770 find_inferior (&all_threads
, linux_kill_one_lwp
, &pid
);
772 /* See the comment in linux_kill_one_lwp. We did not kill the first
773 thread in the list, so do so now. */
774 lwp
= find_lwp_pid (pid_to_ptid (pid
));
775 thread
= get_lwp_thread (lwp
);
778 fprintf (stderr
, "lk_1: killing lwp %ld, for pid: %d\n",
779 lwpid_of (lwp
), pid
);
781 /* If we're killing a running inferior, make sure it is stopped
782 first, as PTRACE_KILL will not work otherwise. */
784 send_sigstop (&lwp
->head
);
788 ptrace (PTRACE_KILL
, lwpid_of (lwp
), 0, 0);
790 /* Make sure it died. The loop is most likely unnecessary. */
791 lwpid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
792 } while (lwpid
> 0 && WIFSTOPPED (wstat
));
796 the_target
->mourn (process
);
801 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
803 struct thread_info
*thread
= (struct thread_info
*) entry
;
804 struct lwp_info
*lwp
= get_thread_lwp (thread
);
805 int pid
= * (int *) args
;
807 if (ptid_get_pid (entry
->id
) != pid
)
810 /* If we're detaching from a running inferior, make sure it is
811 stopped first, as PTRACE_DETACH will not work otherwise. */
814 int lwpid
= lwpid_of (lwp
);
816 stopping_threads
= 1;
817 send_sigstop (&lwp
->head
);
819 /* If this detects a new thread through a clone event, the new
820 thread is appended to the end of the lwp list, so we'll
821 eventually detach from it. */
822 wait_for_sigstop (&lwp
->head
);
823 stopping_threads
= 0;
825 /* If LWP exits while we're trying to stop it, there's nothing
827 lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
832 /* If this process is stopped but is expecting a SIGSTOP, then make
833 sure we take care of that now. This isn't absolutely guaranteed
834 to collect the SIGSTOP, but is fairly likely to. */
835 if (lwp
->stop_expected
)
838 /* Clear stop_expected, so that the SIGSTOP will be reported. */
839 lwp
->stop_expected
= 0;
841 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
842 linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
845 /* Flush any pending changes to the process's registers. */
846 regcache_invalidate_one ((struct inferior_list_entry
*)
847 get_lwp_thread (lwp
));
849 /* Finally, let it resume. */
850 ptrace (PTRACE_DETACH
, lwpid_of (lwp
), 0, 0);
857 any_thread_of (struct inferior_list_entry
*entry
, void *args
)
861 if (ptid_get_pid (entry
->id
) == *pid_p
)
868 linux_detach (int pid
)
870 struct process_info
*process
;
872 process
= find_process_pid (pid
);
877 thread_db_detach (process
);
881 (struct thread_info
*) find_inferior (&all_threads
, any_thread_of
, &pid
);
883 delete_all_breakpoints ();
884 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
886 the_target
->mourn (process
);
891 linux_mourn (struct process_info
*process
)
893 struct process_info_private
*priv
;
896 thread_db_mourn (process
);
899 /* Freeing all private data. */
900 priv
= process
->private;
901 free (priv
->arch_private
);
903 process
->private = NULL
;
910 struct process_info
*process
;
912 process
= find_process_pid (pid
);
917 ret
= my_waitpid (pid
, &status
, 0);
918 if (WIFEXITED (status
) || WIFSIGNALED (status
))
920 } while (ret
!= -1 || errno
!= ECHILD
);
923 /* Return nonzero if the given thread is still alive. */
925 linux_thread_alive (ptid_t ptid
)
927 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
929 /* We assume we always know if a thread exits. If a whole process
930 exited but we still haven't been able to report it to GDB, we'll
931 hold on to the last lwp of the dead process. */
938 /* Return 1 if this lwp has an interesting status pending. */
940 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
942 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
943 ptid_t ptid
= * (ptid_t
*) arg
;
944 struct thread_info
*thread
= get_lwp_thread (lwp
);
946 /* Check if we're only interested in events from a specific process
948 if (!ptid_equal (minus_one_ptid
, ptid
)
949 && ptid_get_pid (ptid
) != ptid_get_pid (lwp
->head
.id
))
952 thread
= get_lwp_thread (lwp
);
954 /* If we got a `vCont;t', but we haven't reported a stop yet, do
955 report any status pending the LWP may have. */
956 if (thread
->last_resume_kind
== resume_stop
957 && thread
->last_status
.kind
== TARGET_WAITKIND_STOPPED
)
960 return lwp
->status_pending_p
;
964 same_lwp (struct inferior_list_entry
*entry
, void *data
)
966 ptid_t ptid
= *(ptid_t
*) data
;
969 if (ptid_get_lwp (ptid
) != 0)
970 lwp
= ptid_get_lwp (ptid
);
972 lwp
= ptid_get_pid (ptid
);
974 if (ptid_get_lwp (entry
->id
) == lwp
)
981 find_lwp_pid (ptid_t ptid
)
983 return (struct lwp_info
*) find_inferior (&all_lwps
, same_lwp
, &ptid
);
986 static struct lwp_info
*
987 linux_wait_for_lwp (ptid_t ptid
, int *wstatp
, int options
)
990 int to_wait_for
= -1;
991 struct lwp_info
*child
= NULL
;
994 fprintf (stderr
, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid
));
996 if (ptid_equal (ptid
, minus_one_ptid
))
997 to_wait_for
= -1; /* any child */
999 to_wait_for
= ptid_get_lwp (ptid
); /* this lwp only */
1005 ret
= my_waitpid (to_wait_for
, wstatp
, options
);
1006 if (ret
== 0 || (ret
== -1 && errno
== ECHILD
&& (options
& WNOHANG
)))
1009 perror_with_name ("waitpid");
1012 && (!WIFSTOPPED (*wstatp
)
1013 || (WSTOPSIG (*wstatp
) != 32
1014 && WSTOPSIG (*wstatp
) != 33)))
1015 fprintf (stderr
, "Got an event from %d (%x)\n", ret
, *wstatp
);
1017 child
= find_lwp_pid (pid_to_ptid (ret
));
1019 /* If we didn't find a process, one of two things presumably happened:
1020 - A process we started and then detached from has exited. Ignore it.
1021 - A process we are controlling has forked and the new child's stop
1022 was reported to us by the kernel. Save its PID. */
1023 if (child
== NULL
&& WIFSTOPPED (*wstatp
))
1025 add_pid_to_list (&stopped_pids
, ret
);
1028 else if (child
== NULL
)
1033 child
->last_status
= *wstatp
;
1035 /* Architecture-specific setup after inferior is running.
1036 This needs to happen after we have attached to the inferior
1037 and it is stopped for the first time, but before we access
1038 any inferior registers. */
1041 the_low_target
.arch_setup ();
1042 #ifdef HAVE_LINUX_REGSETS
1043 memset (disabled_regsets
, 0, num_regsets
);
1048 /* Fetch the possibly triggered data watchpoint info and store it in
1051 On some archs, like x86, that use debug registers to set
1052 watchpoints, it's possible that the way to know which watched
1053 address trapped, is to check the register that is used to select
1054 which address to watch. Problem is, between setting the
1055 watchpoint and reading back which data address trapped, the user
1056 may change the set of watchpoints, and, as a consequence, GDB
1057 changes the debug registers in the inferior. To avoid reading
1058 back a stale stopped-data-address when that happens, we cache in
1059 LP the fact that a watchpoint trapped, and the corresponding data
1060 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1061 changes the debug registers meanwhile, we have the cached data we
1064 if (WIFSTOPPED (*wstatp
) && WSTOPSIG (*wstatp
) == SIGTRAP
)
1066 if (the_low_target
.stopped_by_watchpoint
== NULL
)
1068 child
->stopped_by_watchpoint
= 0;
1072 struct thread_info
*saved_inferior
;
1074 saved_inferior
= current_inferior
;
1075 current_inferior
= get_lwp_thread (child
);
1077 child
->stopped_by_watchpoint
1078 = the_low_target
.stopped_by_watchpoint ();
1080 if (child
->stopped_by_watchpoint
)
1082 if (the_low_target
.stopped_data_address
!= NULL
)
1083 child
->stopped_data_address
1084 = the_low_target
.stopped_data_address ();
1086 child
->stopped_data_address
= 0;
1089 current_inferior
= saved_inferior
;
1093 /* Store the STOP_PC, with adjustment applied. This depends on the
1094 architecture being defined already (so that CHILD has a valid
1095 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1097 if (WIFSTOPPED (*wstatp
))
1098 child
->stop_pc
= get_stop_pc (child
);
1101 && WIFSTOPPED (*wstatp
)
1102 && the_low_target
.get_pc
!= NULL
)
1104 struct thread_info
*saved_inferior
= current_inferior
;
1105 struct regcache
*regcache
;
1108 current_inferior
= get_lwp_thread (child
);
1109 regcache
= get_thread_regcache (current_inferior
, 1);
1110 pc
= (*the_low_target
.get_pc
) (regcache
);
1111 fprintf (stderr
, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc
);
1112 current_inferior
= saved_inferior
;
1118 /* This function should only be called if the LWP got a SIGTRAP.
1120 Handle any tracepoint steps or hits. Return true if a tracepoint
1121 event was handled, 0 otherwise. */
1124 handle_tracepoints (struct lwp_info
*lwp
)
1126 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1127 int tpoint_related_event
= 0;
1129 /* And we need to be sure that any all-threads-stopping doesn't try
1130 to move threads out of the jump pads, as it could deadlock the
1131 inferior (LWP could be in the jump pad, maybe even holding the
1134 /* Do any necessary step collect actions. */
1135 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1137 /* See if we just hit a tracepoint and do its main collect
1139 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1141 if (tpoint_related_event
)
1144 fprintf (stderr
, "got a tracepoint event\n");
1151 /* Arrange for a breakpoint to be hit again later. We don't keep the
1152 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1153 will handle the current event, eventually we will resume this LWP,
1154 and this breakpoint will trap again. */
1157 cancel_breakpoint (struct lwp_info
*lwp
)
1159 struct thread_info
*saved_inferior
;
1161 /* There's nothing to do if we don't support breakpoints. */
1162 if (!supports_breakpoints ())
1165 /* breakpoint_at reads from current inferior. */
1166 saved_inferior
= current_inferior
;
1167 current_inferior
= get_lwp_thread (lwp
);
1169 if ((*the_low_target
.breakpoint_at
) (lwp
->stop_pc
))
1173 "CB: Push back breakpoint for %s\n",
1174 target_pid_to_str (ptid_of (lwp
)));
1176 /* Back up the PC if necessary. */
1177 if (the_low_target
.decr_pc_after_break
)
1179 struct regcache
*regcache
1180 = get_thread_regcache (current_inferior
, 1);
1181 (*the_low_target
.set_pc
) (regcache
, lwp
->stop_pc
);
1184 current_inferior
= saved_inferior
;
1191 "CB: No breakpoint found at %s for [%s]\n",
1192 paddress (lwp
->stop_pc
),
1193 target_pid_to_str (ptid_of (lwp
)));
1196 current_inferior
= saved_inferior
;
1200 /* When the event-loop is doing a step-over, this points at the thread
1202 ptid_t step_over_bkpt
;
1204 /* Wait for an event from child PID. If PID is -1, wait for any
1205 child. Store the stop status through the status pointer WSTAT.
1206 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1207 event was found and OPTIONS contains WNOHANG. Return the PID of
1208 the stopped child otherwise. */
1211 linux_wait_for_event_1 (ptid_t ptid
, int *wstat
, int options
)
1213 struct lwp_info
*event_child
, *requested_child
;
1216 requested_child
= NULL
;
1218 /* Check for a lwp with a pending status. */
1220 if (ptid_equal (ptid
, minus_one_ptid
)
1221 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid
)), ptid
))
1223 event_child
= (struct lwp_info
*)
1224 find_inferior (&all_lwps
, status_pending_p_callback
, &ptid
);
1225 if (debug_threads
&& event_child
)
1226 fprintf (stderr
, "Got a pending child %ld\n", lwpid_of (event_child
));
1230 requested_child
= find_lwp_pid (ptid
);
1232 if (requested_child
->status_pending_p
)
1233 event_child
= requested_child
;
1236 if (event_child
!= NULL
)
1239 fprintf (stderr
, "Got an event from pending child %ld (%04x)\n",
1240 lwpid_of (event_child
), event_child
->status_pending
);
1241 *wstat
= event_child
->status_pending
;
1242 event_child
->status_pending_p
= 0;
1243 event_child
->status_pending
= 0;
1244 current_inferior
= get_lwp_thread (event_child
);
1245 return lwpid_of (event_child
);
1248 /* We only enter this loop if no process has a pending wait status. Thus
1249 any action taken in response to a wait status inside this loop is
1250 responding as soon as we detect the status, not after any pending
1254 event_child
= linux_wait_for_lwp (ptid
, wstat
, options
);
1256 if ((options
& WNOHANG
) && event_child
== NULL
)
1259 fprintf (stderr
, "WNOHANG set, no event found\n");
1263 if (event_child
== NULL
)
1264 error ("event from unknown child");
1266 current_inferior
= get_lwp_thread (event_child
);
1268 /* Check for thread exit. */
1269 if (! WIFSTOPPED (*wstat
))
1272 fprintf (stderr
, "LWP %ld exiting\n", lwpid_of (event_child
));
1274 /* If the last thread is exiting, just return. */
1275 if (last_thread_of_process_p (current_inferior
))
1278 fprintf (stderr
, "LWP %ld is last lwp of process\n",
1279 lwpid_of (event_child
));
1280 return lwpid_of (event_child
);
1285 current_inferior
= (struct thread_info
*) all_threads
.head
;
1287 fprintf (stderr
, "Current inferior is now %ld\n",
1288 lwpid_of (get_thread_lwp (current_inferior
)));
1292 current_inferior
= NULL
;
1294 fprintf (stderr
, "Current inferior is now <NULL>\n");
1297 /* If we were waiting for this particular child to do something...
1298 well, it did something. */
1299 if (requested_child
!= NULL
)
1301 int lwpid
= lwpid_of (event_child
);
1303 /* Cancel the step-over operation --- the thread that
1304 started it is gone. */
1305 if (finish_step_over (event_child
))
1306 unstop_all_lwps (event_child
);
1307 delete_lwp (event_child
);
1311 delete_lwp (event_child
);
1313 /* Wait for a more interesting event. */
1317 if (event_child
->must_set_ptrace_flags
)
1319 ptrace (PTRACE_SETOPTIONS
, lwpid_of (event_child
),
1320 0, (PTRACE_ARG4_TYPE
) PTRACE_O_TRACECLONE
);
1321 event_child
->must_set_ptrace_flags
= 0;
1324 if (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) == SIGTRAP
1325 && *wstat
>> 16 != 0)
1327 handle_extended_wait (event_child
, *wstat
);
1331 /* If GDB is not interested in this signal, don't stop other
1332 threads, and don't report it to GDB. Just resume the
1333 inferior right away. We do this for threading-related
1334 signals as well as any that GDB specifically requested we
1335 ignore. But never ignore SIGSTOP if we sent it ourselves,
1336 and do not ignore signals when stepping - they may require
1337 special handling to skip the signal handler. */
1338 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1340 if (WIFSTOPPED (*wstat
)
1341 && !event_child
->stepping
1343 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1344 (current_process ()->private->thread_db
!= NULL
1345 && (WSTOPSIG (*wstat
) == __SIGRTMIN
1346 || WSTOPSIG (*wstat
) == __SIGRTMIN
+ 1))
1349 (pass_signals
[target_signal_from_host (WSTOPSIG (*wstat
))]
1350 && !(WSTOPSIG (*wstat
) == SIGSTOP
1351 && event_child
->stop_expected
))))
1353 siginfo_t info
, *info_p
;
1356 fprintf (stderr
, "Ignored signal %d for LWP %ld.\n",
1357 WSTOPSIG (*wstat
), lwpid_of (event_child
));
1359 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (event_child
), 0, &info
) == 0)
1363 linux_resume_one_lwp (event_child
, event_child
->stepping
,
1364 WSTOPSIG (*wstat
), info_p
);
1368 if (WIFSTOPPED (*wstat
)
1369 && WSTOPSIG (*wstat
) == SIGSTOP
1370 && event_child
->stop_expected
)
1375 fprintf (stderr
, "Expected stop.\n");
1376 event_child
->stop_expected
= 0;
1378 should_stop
= (current_inferior
->last_resume_kind
== resume_stop
1379 || stopping_threads
);
1383 linux_resume_one_lwp (event_child
,
1384 event_child
->stepping
, 0, NULL
);
1389 return lwpid_of (event_child
);
1397 linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
)
1401 if (ptid_is_pid (ptid
))
1403 /* A request to wait for a specific tgid. This is not possible
1404 with waitpid, so instead, we wait for any child, and leave
1405 children we're not interested in right now with a pending
1406 status to report later. */
1407 wait_ptid
= minus_one_ptid
;
1416 event_pid
= linux_wait_for_event_1 (wait_ptid
, wstat
, options
);
1419 && ptid_is_pid (ptid
) && ptid_get_pid (ptid
) != event_pid
)
1421 struct lwp_info
*event_child
= find_lwp_pid (pid_to_ptid (event_pid
));
1423 if (! WIFSTOPPED (*wstat
))
1424 mark_lwp_dead (event_child
, *wstat
);
1427 event_child
->status_pending_p
= 1;
1428 event_child
->status_pending
= *wstat
;
1437 /* Count the LWP's that have had events. */
1440 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
1442 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1443 struct thread_info
*thread
= get_lwp_thread (lp
);
1446 gdb_assert (count
!= NULL
);
1448 /* Count only resumed LWPs that have a SIGTRAP event pending that
1449 should be reported to GDB. */
1450 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1451 && thread
->last_resume_kind
!= resume_stop
1452 && lp
->status_pending_p
1453 && WIFSTOPPED (lp
->status_pending
)
1454 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1455 && !breakpoint_inserted_here (lp
->stop_pc
))
1461 /* Select the LWP (if any) that is currently being single-stepped. */
1464 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
1466 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1467 struct thread_info
*thread
= get_lwp_thread (lp
);
1469 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1470 && thread
->last_resume_kind
== resume_step
1471 && lp
->status_pending_p
)
1477 /* Select the Nth LWP that has had a SIGTRAP event that should be
1481 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
1483 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1484 struct thread_info
*thread
= get_lwp_thread (lp
);
1485 int *selector
= data
;
1487 gdb_assert (selector
!= NULL
);
1489 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1490 if (thread
->last_resume_kind
!= resume_stop
1491 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1492 && lp
->status_pending_p
1493 && WIFSTOPPED (lp
->status_pending
)
1494 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1495 && !breakpoint_inserted_here (lp
->stop_pc
))
1496 if ((*selector
)-- == 0)
1503 cancel_breakpoints_callback (struct inferior_list_entry
*entry
, void *data
)
1505 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1506 struct thread_info
*thread
= get_lwp_thread (lp
);
1507 struct lwp_info
*event_lp
= data
;
1509 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1513 /* If a LWP other than the LWP that we're reporting an event for has
1514 hit a GDB breakpoint (as opposed to some random trap signal),
1515 then just arrange for it to hit it again later. We don't keep
1516 the SIGTRAP status and don't forward the SIGTRAP signal to the
1517 LWP. We will handle the current event, eventually we will resume
1518 all LWPs, and this one will get its breakpoint trap again.
1520 If we do not do this, then we run the risk that the user will
1521 delete or disable the breakpoint, but the LWP will have already
1524 if (thread
->last_resume_kind
!= resume_stop
1525 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1526 && lp
->status_pending_p
1527 && WIFSTOPPED (lp
->status_pending
)
1528 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1530 && !lp
->stopped_by_watchpoint
1531 && cancel_breakpoint (lp
))
1532 /* Throw away the SIGTRAP. */
1533 lp
->status_pending_p
= 0;
1538 /* Select one LWP out of those that have events pending. */
1541 select_event_lwp (struct lwp_info
**orig_lp
)
1544 int random_selector
;
1545 struct lwp_info
*event_lp
;
1547 /* Give preference to any LWP that is being single-stepped. */
1549 = (struct lwp_info
*) find_inferior (&all_lwps
,
1550 select_singlestep_lwp_callback
, NULL
);
1551 if (event_lp
!= NULL
)
1555 "SEL: Select single-step %s\n",
1556 target_pid_to_str (ptid_of (event_lp
)));
1560 /* No single-stepping LWP. Select one at random, out of those
1561 which have had SIGTRAP events. */
1563 /* First see how many SIGTRAP events we have. */
1564 find_inferior (&all_lwps
, count_events_callback
, &num_events
);
1566 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1567 random_selector
= (int)
1568 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
1570 if (debug_threads
&& num_events
> 1)
1572 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1573 num_events
, random_selector
);
1575 event_lp
= (struct lwp_info
*) find_inferior (&all_lwps
,
1576 select_event_lwp_callback
,
1580 if (event_lp
!= NULL
)
1582 /* Switch the event LWP. */
1583 *orig_lp
= event_lp
;
1587 /* Set this inferior LWP's state as "want-stopped". We won't resume
1588 this LWP until the client gives us another action for it. */
1591 gdb_wants_lwp_stopped (struct inferior_list_entry
*entry
)
1593 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
1594 struct thread_info
*thread
= get_lwp_thread (lwp
);
1596 /* Most threads are stopped implicitly (all-stop); tag that with
1597 signal 0. The thread being explicitly reported stopped to the
1598 client, gets it's status fixed up afterwards. */
1599 thread
->last_status
.kind
= TARGET_WAITKIND_STOPPED
;
1600 thread
->last_status
.value
.sig
= TARGET_SIGNAL_0
;
1602 thread
->last_resume_kind
= resume_stop
;
1605 /* Set all LWP's states as "want-stopped". */
1608 gdb_wants_all_stopped (void)
1610 for_each_inferior (&all_lwps
, gdb_wants_lwp_stopped
);
1613 /* Wait for process, returns status. */
1616 linux_wait_1 (ptid_t ptid
,
1617 struct target_waitstatus
*ourstatus
, int target_options
)
1620 struct lwp_info
*event_child
;
1623 int step_over_finished
;
1624 int bp_explains_trap
;
1625 int maybe_internal_trap
;
1629 /* Translate generic target options into linux options. */
1631 if (target_options
& TARGET_WNOHANG
)
1635 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
1637 /* If we were only supposed to resume one thread, only wait for
1638 that thread - if it's still alive. If it died, however - which
1639 can happen if we're coming from the thread death case below -
1640 then we need to make sure we restart the other threads. We could
1641 pick a thread at random or restart all; restarting all is less
1644 && !ptid_equal (cont_thread
, null_ptid
)
1645 && !ptid_equal (cont_thread
, minus_one_ptid
))
1647 struct thread_info
*thread
;
1649 thread
= (struct thread_info
*) find_inferior_id (&all_threads
,
1652 /* No stepping, no signal - unless one is pending already, of course. */
1655 struct thread_resume resume_info
;
1656 resume_info
.thread
= minus_one_ptid
;
1657 resume_info
.kind
= resume_continue
;
1658 resume_info
.sig
= 0;
1659 linux_resume (&resume_info
, 1);
1665 if (ptid_equal (step_over_bkpt
, null_ptid
))
1666 pid
= linux_wait_for_event (ptid
, &w
, options
);
1670 fprintf (stderr
, "step_over_bkpt set [%s], doing a blocking wait\n",
1671 target_pid_to_str (step_over_bkpt
));
1672 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
1675 if (pid
== 0) /* only if TARGET_WNOHANG */
1678 event_child
= get_thread_lwp (current_inferior
);
1680 /* If we are waiting for a particular child, and it exited,
1681 linux_wait_for_event will return its exit status. Similarly if
1682 the last child exited. If this is not the last child, however,
1683 do not report it as exited until there is a 'thread exited' response
1684 available in the remote protocol. Instead, just wait for another event.
1685 This should be safe, because if the thread crashed we will already
1686 have reported the termination signal to GDB; that should stop any
1687 in-progress stepping operations, etc.
1689 Report the exit status of the last thread to exit. This matches
1690 LinuxThreads' behavior. */
1692 if (last_thread_of_process_p (current_inferior
))
1694 if (WIFEXITED (w
) || WIFSIGNALED (w
))
1696 delete_lwp (event_child
);
1698 current_inferior
= NULL
;
1702 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
1703 ourstatus
->value
.integer
= WEXITSTATUS (w
);
1706 fprintf (stderr
, "\nChild exited with retcode = %x \n", WEXITSTATUS (w
));
1710 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
1711 ourstatus
->value
.sig
= target_signal_from_host (WTERMSIG (w
));
1714 fprintf (stderr
, "\nChild terminated with signal = %x \n", WTERMSIG (w
));
1718 return pid_to_ptid (pid
);
1723 if (!WIFSTOPPED (w
))
1727 /* If this event was not handled before, and is not a SIGTRAP, we
1728 report it. SIGILL and SIGSEGV are also treated as traps in case
1729 a breakpoint is inserted at the current PC. If this target does
1730 not support internal breakpoints at all, we also report the
1731 SIGTRAP without further processing; it's of no concern to us. */
1733 = (supports_breakpoints ()
1734 && (WSTOPSIG (w
) == SIGTRAP
1735 || ((WSTOPSIG (w
) == SIGILL
1736 || WSTOPSIG (w
) == SIGSEGV
)
1737 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
1739 if (maybe_internal_trap
)
1741 /* Handle anything that requires bookkeeping before deciding to
1742 report the event or continue waiting. */
1744 /* First check if we can explain the SIGTRAP with an internal
1745 breakpoint, or if we should possibly report the event to GDB.
1746 Do this before anything that may remove or insert a
1748 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
1750 /* We have a SIGTRAP, possibly a step-over dance has just
1751 finished. If so, tweak the state machine accordingly,
1752 reinsert breakpoints and delete any reinsert (software
1753 single-step) breakpoints. */
1754 step_over_finished
= finish_step_over (event_child
);
1756 /* Now invoke the callbacks of any internal breakpoints there. */
1757 check_breakpoints (event_child
->stop_pc
);
1759 /* Handle tracepoint data collecting. This may overflow the
1760 trace buffer, and cause a tracing stop, removing
1762 trace_event
= handle_tracepoints (event_child
);
1764 if (bp_explains_trap
)
1766 /* If we stepped or ran into an internal breakpoint, we've
1767 already handled it. So next time we resume (from this
1768 PC), we should step over it. */
1770 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
1772 if (breakpoint_here (event_child
->stop_pc
))
1773 event_child
->need_step_over
= 1;
1778 /* We have some other signal, possibly a step-over dance was in
1779 progress, and it should be cancelled too. */
1780 step_over_finished
= finish_step_over (event_child
);
1785 /* We have all the data we need. Either report the event to GDB, or
1786 resume threads and keep waiting for more. */
1788 /* Check If GDB would be interested in this event. If GDB wanted
1789 this thread to single step, we always want to report the SIGTRAP,
1790 and let GDB handle it. Watchpoints should always be reported.
1791 So should signals we can't explain. A SIGTRAP we can't explain
1792 could be a GDB breakpoint --- we may or not support Z0
1793 breakpoints. If we do, we're be able to handle GDB breakpoints
1794 on top of internal breakpoints, by handling the internal
1795 breakpoint and still reporting the event to GDB. If we don't,
1796 we're out of luck, GDB won't see the breakpoint hit. */
1797 report_to_gdb
= (!maybe_internal_trap
1798 || current_inferior
->last_resume_kind
== resume_step
1799 || event_child
->stopped_by_watchpoint
1800 || (!step_over_finished
&& !bp_explains_trap
&& !trace_event
)
1801 || gdb_breakpoint_here (event_child
->stop_pc
));
1803 /* We found no reason GDB would want us to stop. We either hit one
1804 of our own breakpoints, or finished an internal step GDB
1805 shouldn't know about. */
1810 if (bp_explains_trap
)
1811 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
1812 if (step_over_finished
)
1813 fprintf (stderr
, "Step-over finished.\n");
1815 fprintf (stderr
, "Tracepoint event.\n");
1818 /* We're not reporting this breakpoint to GDB, so apply the
1819 decr_pc_after_break adjustment to the inferior's regcache
1822 if (the_low_target
.set_pc
!= NULL
)
1824 struct regcache
*regcache
1825 = get_thread_regcache (get_lwp_thread (event_child
), 1);
1826 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
1829 /* We've finished stepping over a breakpoint. We've stopped all
1830 LWPs momentarily except the stepping one. This is where we
1831 resume them all again. We're going to keep waiting, so use
1832 proceed, which handles stepping over the next breakpoint. */
1834 fprintf (stderr
, "proceeding all threads.\n");
1835 proceed_all_lwps ();
1841 if (current_inferior
->last_resume_kind
== resume_step
)
1842 fprintf (stderr
, "GDB wanted to single-step, reporting event.\n");
1843 if (event_child
->stopped_by_watchpoint
)
1844 fprintf (stderr
, "Stopped by watchpoint.\n");
1845 if (gdb_breakpoint_here (event_child
->stop_pc
))
1846 fprintf (stderr
, "Stopped by GDB breakpoint.\n");
1848 fprintf (stderr
, "Hit a non-gdbserver trap event.\n");
1851 /* Alright, we're going to report a stop. */
1855 /* In all-stop, stop all threads. */
1858 /* If we're not waiting for a specific LWP, choose an event LWP
1859 from among those that have had events. Giving equal priority
1860 to all LWPs that have had events helps prevent
1862 if (ptid_equal (ptid
, minus_one_ptid
))
1864 event_child
->status_pending_p
= 1;
1865 event_child
->status_pending
= w
;
1867 select_event_lwp (&event_child
);
1869 event_child
->status_pending_p
= 0;
1870 w
= event_child
->status_pending
;
1873 /* Now that we've selected our final event LWP, cancel any
1874 breakpoints in other LWPs that have hit a GDB breakpoint.
1875 See the comment in cancel_breakpoints_callback to find out
1877 find_inferior (&all_lwps
, cancel_breakpoints_callback
, event_child
);
1881 /* If we just finished a step-over, then all threads had been
1882 momentarily paused. In all-stop, that's fine, we want
1883 threads stopped by now anyway. In non-stop, we need to
1884 re-resume threads that GDB wanted to be running. */
1885 if (step_over_finished
)
1886 unstop_all_lwps (event_child
);
1889 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
1891 /* Do this before the gdb_wants_all_stopped calls below, since they
1892 always set last_resume_kind to resume_stop. */
1893 if (current_inferior
->last_resume_kind
== resume_stop
1894 && WSTOPSIG (w
) == SIGSTOP
)
1896 /* A thread that has been requested to stop by GDB with vCont;t,
1897 and it stopped cleanly, so report as SIG0. The use of
1898 SIGSTOP is an implementation detail. */
1899 ourstatus
->value
.sig
= TARGET_SIGNAL_0
;
1901 else if (current_inferior
->last_resume_kind
== resume_stop
1902 && WSTOPSIG (w
) != SIGSTOP
)
1904 /* A thread that has been requested to stop by GDB with vCont;t,
1905 but, it stopped for other reasons. */
1906 ourstatus
->value
.sig
= target_signal_from_host (WSTOPSIG (w
));
1910 ourstatus
->value
.sig
= target_signal_from_host (WSTOPSIG (w
));
1913 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
1917 /* From GDB's perspective, all-stop mode always stops all
1918 threads implicitly. Tag all threads as "want-stopped". */
1919 gdb_wants_all_stopped ();
1923 /* We're reporting this LWP as stopped. Update it's
1924 "want-stopped" state to what the client wants, until it gets
1925 a new resume action. */
1926 gdb_wants_lwp_stopped (&event_child
->head
);
1930 fprintf (stderr
, "linux_wait ret = %s, %d, %d\n",
1931 target_pid_to_str (ptid_of (event_child
)),
1933 ourstatus
->value
.sig
);
1935 get_lwp_thread (event_child
)->last_status
= *ourstatus
;
1936 return ptid_of (event_child
);
1939 /* Get rid of any pending event in the pipe. */
1941 async_file_flush (void)
1947 ret
= read (linux_event_pipe
[0], &buf
, 1);
1948 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
1951 /* Put something in the pipe, so the event loop wakes up. */
1953 async_file_mark (void)
1957 async_file_flush ();
1960 ret
= write (linux_event_pipe
[1], "+", 1);
1961 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
1963 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1964 be awakened anyway. */
1968 linux_wait (ptid_t ptid
,
1969 struct target_waitstatus
*ourstatus
, int target_options
)
1974 fprintf (stderr
, "linux_wait: [%s]\n", target_pid_to_str (ptid
));
1976 /* Flush the async file first. */
1977 if (target_is_async_p ())
1978 async_file_flush ();
1980 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
1982 /* If at least one stop was reported, there may be more. A single
1983 SIGCHLD can signal more than one child stop. */
1984 if (target_is_async_p ()
1985 && (target_options
& TARGET_WNOHANG
) != 0
1986 && !ptid_equal (event_ptid
, null_ptid
))
1992 /* Send a signal to an LWP. */
1995 kill_lwp (unsigned long lwpid
, int signo
)
1997 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1998 fails, then we are not using nptl threads and we should be using kill. */
2002 static int tkill_failed
;
2009 ret
= syscall (__NR_tkill
, lwpid
, signo
);
2010 if (errno
!= ENOSYS
)
2017 return kill (lwpid
, signo
);
2021 send_sigstop (struct inferior_list_entry
*entry
)
2023 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2029 pid
= lwpid_of (lwp
);
2031 /* If we already have a pending stop signal for this process, don't
2033 if (lwp
->stop_expected
)
2036 fprintf (stderr
, "Have pending sigstop for lwp %d\n", pid
);
2042 fprintf (stderr
, "Sending sigstop to lwp %d\n", pid
);
2044 lwp
->stop_expected
= 1;
2045 kill_lwp (pid
, SIGSTOP
);
2049 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
2051 /* It's dead, really. */
2054 /* Store the exit status for later. */
2055 lwp
->status_pending_p
= 1;
2056 lwp
->status_pending
= wstat
;
2058 /* Prevent trying to stop it. */
2061 /* No further stops are expected from a dead lwp. */
2062 lwp
->stop_expected
= 0;
2066 wait_for_sigstop (struct inferior_list_entry
*entry
)
2068 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2069 struct thread_info
*saved_inferior
;
2078 fprintf (stderr
, "wait_for_sigstop: LWP %ld already stopped\n",
2083 saved_inferior
= current_inferior
;
2084 if (saved_inferior
!= NULL
)
2085 saved_tid
= ((struct inferior_list_entry
*) saved_inferior
)->id
;
2087 saved_tid
= null_ptid
; /* avoid bogus unused warning */
2089 ptid
= lwp
->head
.id
;
2092 fprintf (stderr
, "wait_for_sigstop: pulling one event\n");
2094 pid
= linux_wait_for_event (ptid
, &wstat
, __WALL
);
2096 /* If we stopped with a non-SIGSTOP signal, save it for later
2097 and record the pending SIGSTOP. If the process exited, just
2099 if (WIFSTOPPED (wstat
))
2102 fprintf (stderr
, "LWP %ld stopped with signal %d\n",
2103 lwpid_of (lwp
), WSTOPSIG (wstat
));
2105 if (WSTOPSIG (wstat
) != SIGSTOP
)
2108 fprintf (stderr
, "LWP %ld stopped with non-sigstop status %06x\n",
2109 lwpid_of (lwp
), wstat
);
2111 lwp
->status_pending_p
= 1;
2112 lwp
->status_pending
= wstat
;
2118 fprintf (stderr
, "Process %d exited while stopping LWPs\n", pid
);
2120 lwp
= find_lwp_pid (pid_to_ptid (pid
));
2123 /* Leave this status pending for the next time we're able to
2124 report it. In the mean time, we'll report this lwp as
2125 dead to GDB, so GDB doesn't try to read registers and
2126 memory from it. This can only happen if this was the
2127 last thread of the process; otherwise, PID is removed
2128 from the thread tables before linux_wait_for_event
2130 mark_lwp_dead (lwp
, wstat
);
2134 if (saved_inferior
== NULL
|| linux_thread_alive (saved_tid
))
2135 current_inferior
= saved_inferior
;
2139 fprintf (stderr
, "Previously current thread died.\n");
2143 /* We can't change the current inferior behind GDB's back,
2144 otherwise, a subsequent command may apply to the wrong
2146 current_inferior
= NULL
;
2150 /* Set a valid thread as current. */
2151 set_desired_inferior (0);
2157 stop_all_lwps (void)
2159 stopping_threads
= 1;
2160 for_each_inferior (&all_lwps
, send_sigstop
);
2161 for_each_inferior (&all_lwps
, wait_for_sigstop
);
2162 stopping_threads
= 0;
2165 /* Resume execution of the inferior process.
2166 If STEP is nonzero, single-step it.
2167 If SIGNAL is nonzero, give it that signal. */
2170 linux_resume_one_lwp (struct lwp_info
*lwp
,
2171 int step
, int signal
, siginfo_t
*info
)
2173 struct thread_info
*saved_inferior
;
2175 if (lwp
->stopped
== 0)
2178 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2179 user used the "jump" command, or "set $pc = foo"). */
2180 if (lwp
->stop_pc
!= get_pc (lwp
))
2182 /* Collecting 'while-stepping' actions doesn't make sense
2184 release_while_stepping_state_list (get_lwp_thread (lwp
));
2187 /* If we have pending signals or status, and a new signal, enqueue the
2188 signal. Also enqueue the signal if we are waiting to reinsert a
2189 breakpoint; it will be picked up again below. */
2191 && (lwp
->status_pending_p
|| lwp
->pending_signals
!= NULL
2192 || lwp
->bp_reinsert
!= 0))
2194 struct pending_signals
*p_sig
;
2195 p_sig
= xmalloc (sizeof (*p_sig
));
2196 p_sig
->prev
= lwp
->pending_signals
;
2197 p_sig
->signal
= signal
;
2199 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
2201 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
2202 lwp
->pending_signals
= p_sig
;
2205 if (lwp
->status_pending_p
)
2208 fprintf (stderr
, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2209 " has pending status\n",
2210 lwpid_of (lwp
), step
? "step" : "continue", signal
,
2211 lwp
->stop_expected
? "expected" : "not expected");
2215 saved_inferior
= current_inferior
;
2216 current_inferior
= get_lwp_thread (lwp
);
2219 fprintf (stderr
, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2220 lwpid_of (lwp
), step
? "step" : "continue", signal
,
2221 lwp
->stop_expected
? "expected" : "not expected");
2223 /* This bit needs some thinking about. If we get a signal that
2224 we must report while a single-step reinsert is still pending,
2225 we often end up resuming the thread. It might be better to
2226 (ew) allow a stack of pending events; then we could be sure that
2227 the reinsert happened right away and not lose any signals.
2229 Making this stack would also shrink the window in which breakpoints are
2230 uninserted (see comment in linux_wait_for_lwp) but not enough for
2231 complete correctness, so it won't solve that problem. It may be
2232 worthwhile just to solve this one, however. */
2233 if (lwp
->bp_reinsert
!= 0)
2236 fprintf (stderr
, " pending reinsert at 0x%s\n",
2237 paddress (lwp
->bp_reinsert
));
2239 if (lwp
->bp_reinsert
!= 0 && can_hardware_single_step ())
2242 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
2247 /* Postpone any pending signal. It was enqueued above. */
2251 /* If we have while-stepping actions in this thread set it stepping.
2252 If we have a signal to deliver, it may or may not be set to
2253 SIG_IGN, we don't know. Assume so, and allow collecting
2254 while-stepping into a signal handler. A possible smart thing to
2255 do would be to set an internal breakpoint at the signal return
2256 address, continue, and carry on catching this while-stepping
2257 action only when that breakpoint is hit. A future
2259 if (get_lwp_thread (lwp
)->while_stepping
!= NULL
2260 && can_hardware_single_step ())
2264 "lwp %ld has a while-stepping action -> forcing step.\n",
2269 if (debug_threads
&& the_low_target
.get_pc
!= NULL
)
2271 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 1);
2272 CORE_ADDR pc
= (*the_low_target
.get_pc
) (regcache
);
2273 fprintf (stderr
, " resuming from pc 0x%lx\n", (long) pc
);
2276 /* If we have pending signals, consume one unless we are trying to reinsert
2278 if (lwp
->pending_signals
!= NULL
&& lwp
->bp_reinsert
== 0)
2280 struct pending_signals
**p_sig
;
2282 p_sig
= &lwp
->pending_signals
;
2283 while ((*p_sig
)->prev
!= NULL
)
2284 p_sig
= &(*p_sig
)->prev
;
2286 signal
= (*p_sig
)->signal
;
2287 if ((*p_sig
)->info
.si_signo
!= 0)
2288 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
), 0, &(*p_sig
)->info
);
2294 if (the_low_target
.prepare_to_resume
!= NULL
)
2295 the_low_target
.prepare_to_resume (lwp
);
2297 regcache_invalidate_one ((struct inferior_list_entry
*)
2298 get_lwp_thread (lwp
));
2301 lwp
->stopped_by_watchpoint
= 0;
2302 lwp
->stepping
= step
;
2303 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (lwp
), 0,
2304 /* Coerce to a uintptr_t first to avoid potential gcc warning
2305 of coercing an 8 byte integer to a 4 byte pointer. */
2306 (PTRACE_ARG4_TYPE
) (uintptr_t) signal
);
2308 current_inferior
= saved_inferior
;
2311 /* ESRCH from ptrace either means that the thread was already
2312 running (an error) or that it is gone (a race condition). If
2313 it's gone, we will get a notification the next time we wait,
2314 so we can ignore the error. We could differentiate these
2315 two, but it's tricky without waiting; the thread still exists
2316 as a zombie, so sending it signal 0 would succeed. So just
2321 perror_with_name ("ptrace");
2325 struct thread_resume_array
2327 struct thread_resume
*resume
;
2331 /* This function is called once per thread. We look up the thread
2332 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2335 This algorithm is O(threads * resume elements), but resume elements
2336 is small (and will remain small at least until GDB supports thread
2339 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
2341 struct lwp_info
*lwp
;
2342 struct thread_info
*thread
;
2344 struct thread_resume_array
*r
;
2346 thread
= (struct thread_info
*) entry
;
2347 lwp
= get_thread_lwp (thread
);
2350 for (ndx
= 0; ndx
< r
->n
; ndx
++)
2352 ptid_t ptid
= r
->resume
[ndx
].thread
;
2353 if (ptid_equal (ptid
, minus_one_ptid
)
2354 || ptid_equal (ptid
, entry
->id
)
2355 || (ptid_is_pid (ptid
)
2356 && (ptid_get_pid (ptid
) == pid_of (lwp
)))
2357 || (ptid_get_lwp (ptid
) == -1
2358 && (ptid_get_pid (ptid
) == pid_of (lwp
))))
2360 if (r
->resume
[ndx
].kind
== resume_stop
2361 && thread
->last_resume_kind
== resume_stop
)
2364 fprintf (stderr
, "already %s LWP %ld at GDB's request\n",
2365 thread
->last_status
.kind
== TARGET_WAITKIND_STOPPED
2373 lwp
->resume
= &r
->resume
[ndx
];
2374 thread
->last_resume_kind
= lwp
->resume
->kind
;
2379 /* No resume action for this thread. */
2386 /* Set *FLAG_P if this lwp has an interesting status pending. */
2388 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
2390 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2392 /* LWPs which will not be resumed are not interesting, because
2393 we might not wait for them next time through linux_wait. */
2394 if (lwp
->resume
== NULL
)
2397 if (lwp
->status_pending_p
)
2398 * (int *) flag_p
= 1;
2403 /* Return 1 if this lwp that GDB wants running is stopped at an
2404 internal breakpoint that we need to step over. It assumes that any
2405 required STOP_PC adjustment has already been propagated to the
2406 inferior's regcache. */
2409 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
2411 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2412 struct thread_info
*thread
;
2413 struct thread_info
*saved_inferior
;
2416 /* LWPs which will not be resumed are not interesting, because we
2417 might not wait for them next time through linux_wait. */
2423 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2428 thread
= get_lwp_thread (lwp
);
2430 if (thread
->last_resume_kind
== resume_stop
)
2434 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2439 if (!lwp
->need_step_over
)
2443 "Need step over [LWP %ld]? No\n", lwpid_of (lwp
));
2446 if (lwp
->status_pending_p
)
2450 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2455 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2459 /* If the PC has changed since we stopped, then don't do anything,
2460 and let the breakpoint/tracepoint be hit. This happens if, for
2461 instance, GDB handled the decr_pc_after_break subtraction itself,
2462 GDB is OOL stepping this thread, or the user has issued a "jump"
2463 command, or poked thread's registers herself. */
2464 if (pc
!= lwp
->stop_pc
)
2468 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2469 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2470 lwpid_of (lwp
), paddress (lwp
->stop_pc
), paddress (pc
));
2472 lwp
->need_step_over
= 0;
2476 saved_inferior
= current_inferior
;
2477 current_inferior
= thread
;
2479 /* We can only step over breakpoints we know about. */
2480 if (breakpoint_here (pc
))
2482 /* Don't step over a breakpoint that GDB expects to hit
2484 if (gdb_breakpoint_here (pc
))
2488 "Need step over [LWP %ld]? yes, but found"
2489 " GDB breakpoint at 0x%s; skipping step over\n",
2490 lwpid_of (lwp
), paddress (pc
));
2492 current_inferior
= saved_inferior
;
2499 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2500 lwpid_of (lwp
), paddress (pc
));
2502 /* We've found an lwp that needs stepping over --- return 1 so
2503 that find_inferior stops looking. */
2504 current_inferior
= saved_inferior
;
2506 /* If the step over is cancelled, this is set again. */
2507 lwp
->need_step_over
= 0;
2512 current_inferior
= saved_inferior
;
2516 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2517 lwpid_of (lwp
), paddress (pc
));
2522 /* Start a step-over operation on LWP. When LWP stopped at a
2523 breakpoint, to make progress, we need to remove the breakpoint out
2524 of the way. If we let other threads run while we do that, they may
2525 pass by the breakpoint location and miss hitting it. To avoid
2526 that, a step-over momentarily stops all threads while LWP is
2527 single-stepped while the breakpoint is temporarily uninserted from
2528 the inferior. When the single-step finishes, we reinsert the
2529 breakpoint, and let all threads that are supposed to be running,
2532 On targets that don't support hardware single-step, we don't
2533 currently support full software single-stepping. Instead, we only
2534 support stepping over the thread event breakpoint, by asking the
2535 low target where to place a reinsert breakpoint. Since this
2536 routine assumes the breakpoint being stepped over is a thread event
2537 breakpoint, it usually assumes the return address of the current
2538 function is a good enough place to set the reinsert breakpoint. */
2541 start_step_over (struct lwp_info
*lwp
)
2543 struct thread_info
*saved_inferior
;
2549 "Starting step-over on LWP %ld. Stopping all threads\n",
2555 fprintf (stderr
, "Done stopping all threads for step-over.\n");
2557 /* Note, we should always reach here with an already adjusted PC,
2558 either by GDB (if we're resuming due to GDB's request), or by our
2559 caller, if we just finished handling an internal breakpoint GDB
2560 shouldn't care about. */
2563 saved_inferior
= current_inferior
;
2564 current_inferior
= get_lwp_thread (lwp
);
2566 lwp
->bp_reinsert
= pc
;
2567 uninsert_breakpoints_at (pc
);
2569 if (can_hardware_single_step ())
2575 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
2576 set_reinsert_breakpoint (raddr
);
2580 current_inferior
= saved_inferior
;
2582 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
2584 /* Require next event from this LWP. */
2585 step_over_bkpt
= lwp
->head
.id
;
2589 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2590 start_step_over, if still there, and delete any reinsert
2591 breakpoints we've set, on non hardware single-step targets. */
2594 finish_step_over (struct lwp_info
*lwp
)
2596 if (lwp
->bp_reinsert
!= 0)
2599 fprintf (stderr
, "Finished step over.\n");
2601 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2602 may be no breakpoint to reinsert there by now. */
2603 reinsert_breakpoints_at (lwp
->bp_reinsert
);
2605 lwp
->bp_reinsert
= 0;
2607 /* Delete any software-single-step reinsert breakpoints. No
2608 longer needed. We don't have to worry about other threads
2609 hitting this trap, and later not being able to explain it,
2610 because we were stepping over a breakpoint, and we hold all
2611 threads but LWP stopped while doing that. */
2612 if (!can_hardware_single_step ())
2613 delete_reinsert_breakpoints ();
2615 step_over_bkpt
= null_ptid
;
2622 /* This function is called once per thread. We check the thread's resume
2623 request, which will tell us whether to resume, step, or leave the thread
2624 stopped; and what signal, if any, it should be sent.
2626 For threads which we aren't explicitly told otherwise, we preserve
2627 the stepping flag; this is used for stepping over gdbserver-placed
2630 If pending_flags was set in any thread, we queue any needed
2631 signals, since we won't actually resume. We already have a pending
2632 event to report, so we don't need to preserve any step requests;
2633 they should be re-issued if necessary. */
2636 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
2638 struct lwp_info
*lwp
;
2639 struct thread_info
*thread
;
2641 int leave_all_stopped
= * (int *) arg
;
2644 thread
= (struct thread_info
*) entry
;
2645 lwp
= get_thread_lwp (thread
);
2647 if (lwp
->resume
== NULL
)
2650 if (lwp
->resume
->kind
== resume_stop
)
2653 fprintf (stderr
, "resume_stop request for LWP %ld\n", lwpid_of (lwp
));
2658 fprintf (stderr
, "stopping LWP %ld\n", lwpid_of (lwp
));
2660 /* Stop the thread, and wait for the event asynchronously,
2661 through the event loop. */
2662 send_sigstop (&lwp
->head
);
2667 fprintf (stderr
, "already stopped LWP %ld\n",
2670 /* The LWP may have been stopped in an internal event that
2671 was not meant to be notified back to GDB (e.g., gdbserver
2672 breakpoint), so we should be reporting a stop event in
2675 /* If the thread already has a pending SIGSTOP, this is a
2676 no-op. Otherwise, something later will presumably resume
2677 the thread and this will cause it to cancel any pending
2678 operation, due to last_resume_kind == resume_stop. If
2679 the thread already has a pending status to report, we
2680 will still report it the next time we wait - see
2681 status_pending_p_callback. */
2682 send_sigstop (&lwp
->head
);
2685 /* For stop requests, we're done. */
2687 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
2691 /* If this thread which is about to be resumed has a pending status,
2692 then don't resume any threads - we can just report the pending
2693 status. Make sure to queue any signals that would otherwise be
2694 sent. In all-stop mode, we do this decision based on if *any*
2695 thread has a pending status. If there's a thread that needs the
2696 step-over-breakpoint dance, then don't resume any other thread
2697 but that particular one. */
2698 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
2703 fprintf (stderr
, "resuming LWP %ld\n", lwpid_of (lwp
));
2705 step
= (lwp
->resume
->kind
== resume_step
);
2706 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
2711 fprintf (stderr
, "leaving LWP %ld stopped\n", lwpid_of (lwp
));
2713 /* If we have a new signal, enqueue the signal. */
2714 if (lwp
->resume
->sig
!= 0)
2716 struct pending_signals
*p_sig
;
2717 p_sig
= xmalloc (sizeof (*p_sig
));
2718 p_sig
->prev
= lwp
->pending_signals
;
2719 p_sig
->signal
= lwp
->resume
->sig
;
2720 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
2722 /* If this is the same signal we were previously stopped by,
2723 make sure to queue its siginfo. We can ignore the return
2724 value of ptrace; if it fails, we'll skip
2725 PTRACE_SETSIGINFO. */
2726 if (WIFSTOPPED (lwp
->last_status
)
2727 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
2728 ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
), 0, &p_sig
->info
);
2730 lwp
->pending_signals
= p_sig
;
2734 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
2740 linux_resume (struct thread_resume
*resume_info
, size_t n
)
2742 struct thread_resume_array array
= { resume_info
, n
};
2743 struct lwp_info
*need_step_over
= NULL
;
2745 int leave_all_stopped
;
2747 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
2749 /* If there is a thread which would otherwise be resumed, which has
2750 a pending status, then don't resume any threads - we can just
2751 report the pending status. Make sure to queue any signals that
2752 would otherwise be sent. In non-stop mode, we'll apply this
2753 logic to each thread individually. We consume all pending events
2754 before considering to start a step-over (in all-stop). */
2757 find_inferior (&all_lwps
, resume_status_pending_p
, &any_pending
);
2759 /* If there is a thread which would otherwise be resumed, which is
2760 stopped at a breakpoint that needs stepping over, then don't
2761 resume any threads - have it step over the breakpoint with all
2762 other threads stopped, then resume all threads again. Make sure
2763 to queue any signals that would otherwise be delivered or
2765 if (!any_pending
&& supports_breakpoints ())
2767 = (struct lwp_info
*) find_inferior (&all_lwps
,
2768 need_step_over_p
, NULL
);
2770 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
2774 if (need_step_over
!= NULL
)
2775 fprintf (stderr
, "Not resuming all, need step over\n");
2776 else if (any_pending
)
2778 "Not resuming, all-stop and found "
2779 "an LWP with pending status\n");
2781 fprintf (stderr
, "Resuming, no pending status or step over needed\n");
2784 /* Even if we're leaving threads stopped, queue all signals we'd
2785 otherwise deliver. */
2786 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
2789 start_step_over (need_step_over
);
2792 /* This function is called once per thread. We check the thread's
2793 last resume request, which will tell us whether to resume, step, or
2794 leave the thread stopped. Any signal the client requested to be
2795 delivered has already been enqueued at this point.
2797 If any thread that GDB wants running is stopped at an internal
2798 breakpoint that needs stepping over, we start a step-over operation
2799 on that particular thread, and leave all others stopped. */
2802 proceed_one_lwp (struct inferior_list_entry
*entry
)
2804 struct lwp_info
*lwp
;
2805 struct thread_info
*thread
;
2808 lwp
= (struct lwp_info
*) entry
;
2812 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp
));
2817 fprintf (stderr
, " LWP %ld already running\n", lwpid_of (lwp
));
2821 thread
= get_lwp_thread (lwp
);
2823 if (thread
->last_resume_kind
== resume_stop
)
2826 fprintf (stderr
, " client wants LWP %ld stopped\n", lwpid_of (lwp
));
2830 if (lwp
->status_pending_p
)
2833 fprintf (stderr
, " LWP %ld has pending status, leaving stopped\n",
2841 fprintf (stderr
, " LWP %ld is suspended\n", lwpid_of (lwp
));
2845 step
= thread
->last_resume_kind
== resume_step
;
2846 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
2849 /* When we finish a step-over, set threads running again. If there's
2850 another thread that may need a step-over, now's the time to start
2851 it. Eventually, we'll move all threads past their breakpoints. */
2854 proceed_all_lwps (void)
2856 struct lwp_info
*need_step_over
;
2858 /* If there is a thread which would otherwise be resumed, which is
2859 stopped at a breakpoint that needs stepping over, then don't
2860 resume any threads - have it step over the breakpoint with all
2861 other threads stopped, then resume all threads again. */
2863 if (supports_breakpoints ())
2866 = (struct lwp_info
*) find_inferior (&all_lwps
,
2867 need_step_over_p
, NULL
);
2869 if (need_step_over
!= NULL
)
2872 fprintf (stderr
, "proceed_all_lwps: found "
2873 "thread %ld needing a step-over\n",
2874 lwpid_of (need_step_over
));
2876 start_step_over (need_step_over
);
2882 fprintf (stderr
, "Proceeding, no step-over needed\n");
2884 for_each_inferior (&all_lwps
, proceed_one_lwp
);
2887 /* Stopped LWPs that the client wanted to be running, that don't have
2888 pending statuses, are set to run again, except for EXCEPT, if not
2889 NULL. This undoes a stop_all_lwps call. */
2892 unstop_all_lwps (struct lwp_info
*except
)
2898 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except
));
2901 "unstopping all lwps\n");
2904 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2906 ++except
->suspended
;
2908 for_each_inferior (&all_lwps
, proceed_one_lwp
);
2911 --except
->suspended
;
2914 #ifdef HAVE_LINUX_USRREGS
2917 register_addr (int regnum
)
2921 if (regnum
< 0 || regnum
>= the_low_target
.num_regs
)
2922 error ("Invalid register number %d.", regnum
);
2924 addr
= the_low_target
.regmap
[regnum
];
2929 /* Fetch one register. */
2931 fetch_register (struct regcache
*regcache
, int regno
)
2938 if (regno
>= the_low_target
.num_regs
)
2940 if ((*the_low_target
.cannot_fetch_register
) (regno
))
2943 regaddr
= register_addr (regno
);
2947 pid
= lwpid_of (get_thread_lwp (current_inferior
));
2948 size
= ((register_size (regno
) + sizeof (PTRACE_XFER_TYPE
) - 1)
2949 & - sizeof (PTRACE_XFER_TYPE
));
2950 buf
= alloca (size
);
2951 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
2954 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
2955 ptrace (PTRACE_PEEKUSER
, pid
,
2956 /* Coerce to a uintptr_t first to avoid potential gcc warning
2957 of coercing an 8 byte integer to a 4 byte pointer. */
2958 (PTRACE_ARG3_TYPE
) (uintptr_t) regaddr
, 0);
2959 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
2961 error ("reading register %d: %s", regno
, strerror (errno
));
2964 if (the_low_target
.supply_ptrace_register
)
2965 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
2967 supply_register (regcache
, regno
, buf
);
2970 /* Fetch all registers, or just one, from the child process. */
2972 usr_fetch_inferior_registers (struct regcache
*regcache
, int regno
)
2975 for (regno
= 0; regno
< the_low_target
.num_regs
; regno
++)
2976 fetch_register (regcache
, regno
);
2978 fetch_register (regcache
, regno
);
2981 /* Store our register values back into the inferior.
2982 If REGNO is -1, do this for all registers.
2983 Otherwise, REGNO specifies which register (so we can save time). */
2985 usr_store_inferior_registers (struct regcache
*regcache
, int regno
)
2994 if (regno
>= the_low_target
.num_regs
)
2997 if ((*the_low_target
.cannot_store_register
) (regno
) == 1)
3000 regaddr
= register_addr (regno
);
3004 size
= (register_size (regno
) + sizeof (PTRACE_XFER_TYPE
) - 1)
3005 & - sizeof (PTRACE_XFER_TYPE
);
3006 buf
= alloca (size
);
3007 memset (buf
, 0, size
);
3009 if (the_low_target
.collect_ptrace_register
)
3010 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
3012 collect_register (regcache
, regno
, buf
);
3014 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3015 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
3018 ptrace (PTRACE_POKEUSER
, pid
,
3019 /* Coerce to a uintptr_t first to avoid potential gcc warning
3020 about coercing an 8 byte integer to a 4 byte pointer. */
3021 (PTRACE_ARG3_TYPE
) (uintptr_t) regaddr
,
3022 (PTRACE_ARG4_TYPE
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
3025 /* At this point, ESRCH should mean the process is
3026 already gone, in which case we simply ignore attempts
3027 to change its registers. See also the related
3028 comment in linux_resume_one_lwp. */
3032 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
3033 error ("writing register %d: %s", regno
, strerror (errno
));
3035 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
3039 for (regno
= 0; regno
< the_low_target
.num_regs
; regno
++)
3040 usr_store_inferior_registers (regcache
, regno
);
3042 #endif /* HAVE_LINUX_USRREGS */
3046 #ifdef HAVE_LINUX_REGSETS
3049 regsets_fetch_inferior_registers (struct regcache
*regcache
)
3051 struct regset_info
*regset
;
3052 int saw_general_regs
= 0;
3056 regset
= target_regsets
;
3058 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3059 while (regset
->size
>= 0)
3064 if (regset
->size
== 0 || disabled_regsets
[regset
- target_regsets
])
3070 buf
= xmalloc (regset
->size
);
3072 nt_type
= regset
->nt_type
;
3076 iov
.iov_len
= regset
->size
;
3077 data
= (void *) &iov
;
3083 res
= ptrace (regset
->get_request
, pid
, nt_type
, data
);
3085 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
3091 /* If we get EIO on a regset, do not try it again for
3093 disabled_regsets
[regset
- target_regsets
] = 1;
3100 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3105 else if (regset
->type
== GENERAL_REGS
)
3106 saw_general_regs
= 1;
3107 regset
->store_function (regcache
, buf
);
3111 if (saw_general_regs
)
3118 regsets_store_inferior_registers (struct regcache
*regcache
)
3120 struct regset_info
*regset
;
3121 int saw_general_regs
= 0;
3125 regset
= target_regsets
;
3127 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3128 while (regset
->size
>= 0)
3133 if (regset
->size
== 0 || disabled_regsets
[regset
- target_regsets
])
3139 buf
= xmalloc (regset
->size
);
3141 /* First fill the buffer with the current register set contents,
3142 in case there are any items in the kernel's regset that are
3143 not in gdbserver's regcache. */
3145 nt_type
= regset
->nt_type
;
3149 iov
.iov_len
= regset
->size
;
3150 data
= (void *) &iov
;
3156 res
= ptrace (regset
->get_request
, pid
, nt_type
, data
);
3158 res
= ptrace (regset
->get_request
, pid
, &iov
, data
);
3163 /* Then overlay our cached registers on that. */
3164 regset
->fill_function (regcache
, buf
);
3166 /* Only now do we write the register set. */
3168 res
= ptrace (regset
->set_request
, pid
, nt_type
, data
);
3170 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
3178 /* If we get EIO on a regset, do not try it again for
3180 disabled_regsets
[regset
- target_regsets
] = 1;
3184 else if (errno
== ESRCH
)
3186 /* At this point, ESRCH should mean the process is
3187 already gone, in which case we simply ignore attempts
3188 to change its registers. See also the related
3189 comment in linux_resume_one_lwp. */
3195 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3198 else if (regset
->type
== GENERAL_REGS
)
3199 saw_general_regs
= 1;
3203 if (saw_general_regs
)
3210 #endif /* HAVE_LINUX_REGSETS */
3214 linux_fetch_registers (struct regcache
*regcache
, int regno
)
3216 #ifdef HAVE_LINUX_REGSETS
3217 if (regsets_fetch_inferior_registers (regcache
) == 0)
3220 #ifdef HAVE_LINUX_USRREGS
3221 usr_fetch_inferior_registers (regcache
, regno
);
3226 linux_store_registers (struct regcache
*regcache
, int regno
)
3228 #ifdef HAVE_LINUX_REGSETS
3229 if (regsets_store_inferior_registers (regcache
) == 0)
3232 #ifdef HAVE_LINUX_USRREGS
3233 usr_store_inferior_registers (regcache
, regno
);
3238 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3239 to debugger memory starting at MYADDR. */
3242 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
3245 /* Round starting address down to longword boundary. */
3246 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
3247 /* Round ending address up; get number of longwords that makes. */
3249 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
3250 / sizeof (PTRACE_XFER_TYPE
);
3251 /* Allocate buffer of that many longwords. */
3252 register PTRACE_XFER_TYPE
*buffer
3253 = (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
3256 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3258 /* Try using /proc. Don't bother for one word. */
3259 if (len
>= 3 * sizeof (long))
3261 /* We could keep this file open and cache it - possibly one per
3262 thread. That requires some juggling, but is even faster. */
3263 sprintf (filename
, "/proc/%d/mem", pid
);
3264 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
3268 /* If pread64 is available, use it. It's faster if the kernel
3269 supports it (only one syscall), and it's 64-bit safe even on
3270 32-bit platforms (for instance, SPARC debugging a SPARC64
3273 if (pread64 (fd
, myaddr
, len
, memaddr
) != len
)
3275 if (lseek (fd
, memaddr
, SEEK_SET
) == -1 || read (fd
, myaddr
, len
) != len
)
3287 /* Read all the longwords */
3288 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
3291 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3292 about coercing an 8 byte integer to a 4 byte pointer. */
3293 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
3294 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
, 0);
3299 /* Copy appropriate bytes out of the buffer. */
3301 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
3307 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3308 memory at MEMADDR. On failure (cannot write to the inferior)
3309 returns the value of errno. */
3312 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
3315 /* Round starting address down to longword boundary. */
3316 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
3317 /* Round ending address up; get number of longwords that makes. */
3319 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1) / sizeof (PTRACE_XFER_TYPE
);
3320 /* Allocate buffer of that many longwords. */
3321 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
3322 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3326 /* Dump up to four bytes. */
3327 unsigned int val
= * (unsigned int *) myaddr
;
3333 val
= val
& 0xffffff;
3334 fprintf (stderr
, "Writing %0*x to 0x%08lx\n", 2 * ((len
< 4) ? len
: 4),
3335 val
, (long)memaddr
);
3338 /* Fill start and end extra bytes of buffer with existing memory data. */
3341 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3342 about coercing an 8 byte integer to a 4 byte pointer. */
3343 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
3344 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
, 0);
3352 = ptrace (PTRACE_PEEKTEXT
, pid
,
3353 /* Coerce to a uintptr_t first to avoid potential gcc warning
3354 about coercing an 8 byte integer to a 4 byte pointer. */
3355 (PTRACE_ARG3_TYPE
) (uintptr_t) (addr
+ (count
- 1)
3356 * sizeof (PTRACE_XFER_TYPE
)),
3362 /* Copy data to be written over corresponding part of buffer. */
3364 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)), myaddr
, len
);
3366 /* Write the entire buffer. */
3368 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
3371 ptrace (PTRACE_POKETEXT
, pid
,
3372 /* Coerce to a uintptr_t first to avoid potential gcc warning
3373 about coercing an 8 byte integer to a 4 byte pointer. */
3374 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
,
3375 (PTRACE_ARG4_TYPE
) buffer
[i
]);
3383 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3384 static int linux_supports_tracefork_flag
;
3386 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3389 linux_tracefork_grandchild (void *arg
)
3394 #define STACK_SIZE 4096
3397 linux_tracefork_child (void *arg
)
3399 ptrace (PTRACE_TRACEME
, 0, 0, 0);
3400 kill (getpid (), SIGSTOP
);
3402 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3405 linux_tracefork_grandchild (NULL
);
3407 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3410 __clone2 (linux_tracefork_grandchild
, arg
, STACK_SIZE
,
3411 CLONE_VM
| SIGCHLD
, NULL
);
3413 clone (linux_tracefork_grandchild
, arg
+ STACK_SIZE
,
3414 CLONE_VM
| SIGCHLD
, NULL
);
3417 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3422 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3423 sure that we can enable the option, and that it had the desired
3427 linux_test_for_tracefork (void)
3429 int child_pid
, ret
, status
;
3431 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3432 char *stack
= xmalloc (STACK_SIZE
* 4);
3433 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3435 linux_supports_tracefork_flag
= 0;
3437 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3439 child_pid
= fork ();
3441 linux_tracefork_child (NULL
);
3443 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3445 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3447 child_pid
= __clone2 (linux_tracefork_child
, stack
, STACK_SIZE
,
3448 CLONE_VM
| SIGCHLD
, stack
+ STACK_SIZE
* 2);
3449 #else /* !__ia64__ */
3450 child_pid
= clone (linux_tracefork_child
, stack
+ STACK_SIZE
,
3451 CLONE_VM
| SIGCHLD
, stack
+ STACK_SIZE
* 2);
3452 #endif /* !__ia64__ */
3454 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3456 if (child_pid
== -1)
3457 perror_with_name ("clone");
3459 ret
= my_waitpid (child_pid
, &status
, 0);
3461 perror_with_name ("waitpid");
3462 else if (ret
!= child_pid
)
3463 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret
);
3464 if (! WIFSTOPPED (status
))
3465 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status
);
3467 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
3468 (PTRACE_ARG4_TYPE
) PTRACE_O_TRACEFORK
);
3471 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
3474 warning ("linux_test_for_tracefork: failed to kill child");
3478 ret
= my_waitpid (child_pid
, &status
, 0);
3479 if (ret
!= child_pid
)
3480 warning ("linux_test_for_tracefork: failed to wait for killed child");
3481 else if (!WIFSIGNALED (status
))
3482 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3483 "killed child", status
);
3488 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
3490 warning ("linux_test_for_tracefork: failed to resume child");
3492 ret
= my_waitpid (child_pid
, &status
, 0);
3494 if (ret
== child_pid
&& WIFSTOPPED (status
)
3495 && status
>> 16 == PTRACE_EVENT_FORK
)
3498 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
3499 if (ret
== 0 && second_pid
!= 0)
3503 linux_supports_tracefork_flag
= 1;
3504 my_waitpid (second_pid
, &second_status
, 0);
3505 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
3507 warning ("linux_test_for_tracefork: failed to kill second child");
3508 my_waitpid (second_pid
, &status
, 0);
3512 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3513 "(%d, status 0x%x)", ret
, status
);
3517 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
3519 warning ("linux_test_for_tracefork: failed to kill child");
3520 my_waitpid (child_pid
, &status
, 0);
3522 while (WIFSTOPPED (status
));
3524 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3526 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3531 linux_look_up_symbols (void)
3533 #ifdef USE_THREAD_DB
3534 struct process_info
*proc
= current_process ();
3536 if (proc
->private->thread_db
!= NULL
)
3539 /* If the kernel supports tracing forks then it also supports tracing
3540 clones, and then we don't need to use the magic thread event breakpoint
3541 to learn about threads. */
3542 thread_db_init (!linux_supports_tracefork_flag
);
3547 linux_request_interrupt (void)
3549 extern unsigned long signal_pid
;
3551 if (!ptid_equal (cont_thread
, null_ptid
)
3552 && !ptid_equal (cont_thread
, minus_one_ptid
))
3554 struct lwp_info
*lwp
;
3557 lwp
= get_thread_lwp (current_inferior
);
3558 lwpid
= lwpid_of (lwp
);
3559 kill_lwp (lwpid
, SIGINT
);
3562 kill_lwp (signal_pid
, SIGINT
);
3565 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3566 to debugger memory starting at MYADDR. */
3569 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
3571 char filename
[PATH_MAX
];
3573 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3575 snprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
3577 fd
= open (filename
, O_RDONLY
);
3581 if (offset
!= (CORE_ADDR
) 0
3582 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
3585 n
= read (fd
, myaddr
, len
);
3592 /* These breakpoint and watchpoint related wrapper functions simply
3593 pass on the function call if the target has registered a
3594 corresponding function. */
3597 linux_insert_point (char type
, CORE_ADDR addr
, int len
)
3599 if (the_low_target
.insert_point
!= NULL
)
3600 return the_low_target
.insert_point (type
, addr
, len
);
3602 /* Unsupported (see target.h). */
3607 linux_remove_point (char type
, CORE_ADDR addr
, int len
)
3609 if (the_low_target
.remove_point
!= NULL
)
3610 return the_low_target
.remove_point (type
, addr
, len
);
3612 /* Unsupported (see target.h). */
3617 linux_stopped_by_watchpoint (void)
3619 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
3621 return lwp
->stopped_by_watchpoint
;
3625 linux_stopped_data_address (void)
3627 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
3629 return lwp
->stopped_data_address
;
3632 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3633 #if defined(__mcoldfire__)
3634 /* These should really be defined in the kernel's ptrace.h header. */
3635 #define PT_TEXT_ADDR 49*4
3636 #define PT_DATA_ADDR 50*4
3637 #define PT_TEXT_END_ADDR 51*4
3640 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3641 to tell gdb about. */
3644 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
3646 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3647 unsigned long text
, text_end
, data
;
3648 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3652 text
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_TEXT_ADDR
, 0);
3653 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_TEXT_END_ADDR
, 0);
3654 data
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_DATA_ADDR
, 0);
3658 /* Both text and data offsets produced at compile-time (and so
3659 used by gdb) are relative to the beginning of the program,
3660 with the data segment immediately following the text segment.
3661 However, the actual runtime layout in memory may put the data
3662 somewhere else, so when we send gdb a data base-address, we
3663 use the real data base address and subtract the compile-time
3664 data base-address from it (which is just the length of the
3665 text segment). BSS immediately follows data in both
3668 *data_p
= data
- (text_end
- text
);
3678 compare_ints (const void *xa
, const void *xb
)
3680 int a
= *(const int *)xa
;
3681 int b
= *(const int *)xb
;
3687 unique (int *b
, int *e
)
3696 /* Given PID, iterates over all threads in that process.
3698 Information about each thread, in a format suitable for qXfer:osdata:thread
3699 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3700 initialized, and the caller is responsible for finishing and appending '\0'
3703 The list of cores that threads are running on is assigned to *CORES, if it
3704 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3705 should free *CORES. */
3708 list_threads (int pid
, struct buffer
*buffer
, char **cores
)
3712 int *core_numbers
= xmalloc (sizeof (int) * allocated
);
3716 struct stat statbuf
;
3718 sprintf (pathname
, "/proc/%d/task", pid
);
3719 if (stat (pathname
, &statbuf
) == 0 && S_ISDIR (statbuf
.st_mode
))
3721 dir
= opendir (pathname
);
3724 free (core_numbers
);
3728 while ((dp
= readdir (dir
)) != NULL
)
3730 unsigned long lwp
= strtoul (dp
->d_name
, NULL
, 10);
3734 unsigned core
= linux_core_of_thread (ptid_build (pid
, lwp
, 0));
3738 char s
[sizeof ("4294967295")];
3739 sprintf (s
, "%u", core
);
3741 if (count
== allocated
)
3744 core_numbers
= realloc (core_numbers
,
3745 sizeof (int) * allocated
);
3747 core_numbers
[count
++] = core
;
3749 buffer_xml_printf (buffer
,
3751 "<column name=\"pid\">%d</column>"
3752 "<column name=\"tid\">%s</column>"
3753 "<column name=\"core\">%s</column>"
3754 "</item>", pid
, dp
->d_name
, s
);
3759 buffer_xml_printf (buffer
,
3761 "<column name=\"pid\">%d</column>"
3762 "<column name=\"tid\">%s</column>"
3763 "</item>", pid
, dp
->d_name
);
3774 struct buffer buffer2
;
3777 qsort (core_numbers
, count
, sizeof (int), compare_ints
);
3779 /* Remove duplicates. */
3781 e
= unique (b
, core_numbers
+ count
);
3783 buffer_init (&buffer2
);
3785 for (b
= core_numbers
; b
!= e
; ++b
)
3787 char number
[sizeof ("4294967295")];
3788 sprintf (number
, "%u", *b
);
3789 buffer_xml_printf (&buffer2
, "%s%s",
3790 (b
== core_numbers
) ? "" : ",", number
);
3792 buffer_grow_str0 (&buffer2
, "");
3794 *cores
= buffer_finish (&buffer2
);
3797 free (core_numbers
);
3801 show_process (int pid
, const char *username
, struct buffer
*buffer
)
3805 char cmd
[MAXPATHLEN
+ 1];
3807 sprintf (pathname
, "/proc/%d/cmdline", pid
);
3809 if ((f
= fopen (pathname
, "r")) != NULL
)
3811 size_t len
= fread (cmd
, 1, sizeof (cmd
) - 1, f
);
3816 for (i
= 0; i
< len
; i
++)
3821 buffer_xml_printf (buffer
,
3823 "<column name=\"pid\">%d</column>"
3824 "<column name=\"user\">%s</column>"
3825 "<column name=\"command\">%s</column>",
3830 /* This only collects core numbers, and does not print threads. */
3831 list_threads (pid
, NULL
, &cores
);
3835 buffer_xml_printf (buffer
,
3836 "<column name=\"cores\">%s</column>", cores
);
3840 buffer_xml_printf (buffer
, "</item>");
3847 linux_qxfer_osdata (const char *annex
,
3848 unsigned char *readbuf
, unsigned const char *writebuf
,
3849 CORE_ADDR offset
, int len
)
3851 /* We make the process list snapshot when the object starts to be
3853 static const char *buf
;
3854 static long len_avail
= -1;
3855 static struct buffer buffer
;
3861 if (strcmp (annex
, "processes") == 0)
3863 else if (strcmp (annex
, "threads") == 0)
3868 if (!readbuf
|| writebuf
)
3873 if (len_avail
!= -1 && len_avail
!= 0)
3874 buffer_free (&buffer
);
3877 buffer_init (&buffer
);
3879 buffer_grow_str (&buffer
, "<osdata type=\"processes\">");
3881 buffer_grow_str (&buffer
, "<osdata type=\"threads\">");
3883 dirp
= opendir ("/proc");
3887 while ((dp
= readdir (dirp
)) != NULL
)
3889 struct stat statbuf
;
3890 char procentry
[sizeof ("/proc/4294967295")];
3892 if (!isdigit (dp
->d_name
[0])
3893 || strlen (dp
->d_name
) > sizeof ("4294967295") - 1)
3896 sprintf (procentry
, "/proc/%s", dp
->d_name
);
3897 if (stat (procentry
, &statbuf
) == 0
3898 && S_ISDIR (statbuf
.st_mode
))
3900 int pid
= (int) strtoul (dp
->d_name
, NULL
, 10);
3904 struct passwd
*entry
= getpwuid (statbuf
.st_uid
);
3905 show_process (pid
, entry
? entry
->pw_name
: "?", &buffer
);
3909 list_threads (pid
, &buffer
, NULL
);
3916 buffer_grow_str0 (&buffer
, "</osdata>\n");
3917 buf
= buffer_finish (&buffer
);
3918 len_avail
= strlen (buf
);
3921 if (offset
>= len_avail
)
3923 /* Done. Get rid of the data. */
3924 buffer_free (&buffer
);
3930 if (len
> len_avail
- offset
)
3931 len
= len_avail
- offset
;
3932 memcpy (readbuf
, buf
+ offset
, len
);
3937 /* Convert a native/host siginfo object, into/from the siginfo in the
3938 layout of the inferiors' architecture. */
3941 siginfo_fixup (struct siginfo
*siginfo
, void *inf_siginfo
, int direction
)
3945 if (the_low_target
.siginfo_fixup
!= NULL
)
3946 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
3948 /* If there was no callback, or the callback didn't do anything,
3949 then just do a straight memcpy. */
3953 memcpy (siginfo
, inf_siginfo
, sizeof (struct siginfo
));
3955 memcpy (inf_siginfo
, siginfo
, sizeof (struct siginfo
));
3960 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
3961 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
3964 struct siginfo siginfo
;
3965 char inf_siginfo
[sizeof (struct siginfo
)];
3967 if (current_inferior
== NULL
)
3970 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3973 fprintf (stderr
, "%s siginfo for lwp %d.\n",
3974 readbuf
!= NULL
? "Reading" : "Writing",
3977 if (offset
> sizeof (siginfo
))
3980 if (ptrace (PTRACE_GETSIGINFO
, pid
, 0, &siginfo
) != 0)
3983 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3984 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3985 inferior with a 64-bit GDBSERVER should look the same as debugging it
3986 with a 32-bit GDBSERVER, we need to convert it. */
3987 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
3989 if (offset
+ len
> sizeof (siginfo
))
3990 len
= sizeof (siginfo
) - offset
;
3992 if (readbuf
!= NULL
)
3993 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
3996 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
3998 /* Convert back to ptrace layout before flushing it out. */
3999 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
4001 if (ptrace (PTRACE_SETSIGINFO
, pid
, 0, &siginfo
) != 0)
4008 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4009 so we notice when children change state; as the handler for the
4010 sigsuspend in my_waitpid. */
4013 sigchld_handler (int signo
)
4015 int old_errno
= errno
;
4018 /* fprintf is not async-signal-safe, so call write directly. */
4019 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4021 if (target_is_async_p ())
4022 async_file_mark (); /* trigger a linux_wait */
4028 linux_supports_non_stop (void)
4034 linux_async (int enable
)
4036 int previous
= (linux_event_pipe
[0] != -1);
4039 fprintf (stderr
, "linux_async (%d), previous=%d\n",
4042 if (previous
!= enable
)
4045 sigemptyset (&mask
);
4046 sigaddset (&mask
, SIGCHLD
);
4048 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4052 if (pipe (linux_event_pipe
) == -1)
4053 fatal ("creating event pipe failed.");
4055 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4056 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4058 /* Register the event loop handler. */
4059 add_file_handler (linux_event_pipe
[0],
4060 handle_target_event
, NULL
);
4062 /* Always trigger a linux_wait. */
4067 delete_file_handler (linux_event_pipe
[0]);
4069 close (linux_event_pipe
[0]);
4070 close (linux_event_pipe
[1]);
4071 linux_event_pipe
[0] = -1;
4072 linux_event_pipe
[1] = -1;
4075 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
4082 linux_start_non_stop (int nonstop
)
4084 /* Register or unregister from event-loop accordingly. */
4085 linux_async (nonstop
);
4090 linux_supports_multi_process (void)
4096 /* Enumerate spufs IDs for process PID. */
4098 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
4104 struct dirent
*entry
;
4106 sprintf (path
, "/proc/%ld/fd", pid
);
4107 dir
= opendir (path
);
4112 while ((entry
= readdir (dir
)) != NULL
)
4118 fd
= atoi (entry
->d_name
);
4122 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
4123 if (stat (path
, &st
) != 0)
4125 if (!S_ISDIR (st
.st_mode
))
4128 if (statfs (path
, &stfs
) != 0)
4130 if (stfs
.f_type
!= SPUFS_MAGIC
)
4133 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
4135 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
4145 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4146 object type, using the /proc file system. */
4148 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
4149 unsigned const char *writebuf
,
4150 CORE_ADDR offset
, int len
)
4152 long pid
= lwpid_of (get_thread_lwp (current_inferior
));
4157 if (!writebuf
&& !readbuf
)
4165 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
4168 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
4169 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
4174 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4181 ret
= write (fd
, writebuf
, (size_t) len
);
4183 ret
= read (fd
, readbuf
, (size_t) len
);
4190 linux_core_of_thread (ptid_t ptid
)
4192 char filename
[sizeof ("/proc//task//stat")
4193 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4196 char *content
= NULL
;
4199 int content_read
= 0;
4203 sprintf (filename
, "/proc/%d/task/%ld/stat",
4204 ptid_get_pid (ptid
), ptid_get_lwp (ptid
));
4205 f
= fopen (filename
, "r");
4212 content
= realloc (content
, content_read
+ 1024);
4213 n
= fread (content
+ content_read
, 1, 1024, f
);
4217 content
[content_read
] = '\0';
4222 p
= strchr (content
, '(');
4223 p
= strchr (p
, ')') + 2; /* skip ")" and a whitespace. */
4225 p
= strtok_r (p
, " ", &ts
);
4226 for (i
= 0; i
!= 36; ++i
)
4227 p
= strtok_r (NULL
, " ", &ts
);
4229 if (sscanf (p
, "%d", &core
) == 0)
4239 linux_process_qsupported (const char *query
)
4241 if (the_low_target
.process_qsupported
!= NULL
)
4242 the_low_target
.process_qsupported (query
);
4246 linux_supports_tracepoints (void)
4248 if (*the_low_target
.supports_tracepoints
== NULL
)
4251 return (*the_low_target
.supports_tracepoints
) ();
4255 linux_read_pc (struct regcache
*regcache
)
4257 if (the_low_target
.get_pc
== NULL
)
4260 return (*the_low_target
.get_pc
) (regcache
);
4264 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
4266 gdb_assert (the_low_target
.set_pc
!= NULL
);
4268 (*the_low_target
.set_pc
) (regcache
, pc
);
4272 linux_thread_stopped (struct thread_info
*thread
)
4274 return get_thread_lwp (thread
)->stopped
;
4277 /* This exposes stop-all-threads functionality to other modules. */
4280 linux_pause_all (void)
4285 static struct target_ops linux_target_ops
= {
4286 linux_create_inferior
,
4295 linux_fetch_registers
,
4296 linux_store_registers
,
4299 linux_look_up_symbols
,
4300 linux_request_interrupt
,
4304 linux_stopped_by_watchpoint
,
4305 linux_stopped_data_address
,
4306 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4311 #ifdef USE_THREAD_DB
4312 thread_db_get_tls_address
,
4317 hostio_last_error_from_errno
,
4320 linux_supports_non_stop
,
4322 linux_start_non_stop
,
4323 linux_supports_multi_process
,
4324 #ifdef USE_THREAD_DB
4325 thread_db_handle_monitor_command
,
4329 linux_core_of_thread
,
4330 linux_process_qsupported
,
4331 linux_supports_tracepoints
,
4334 linux_thread_stopped
,
4339 linux_init_signals ()
4341 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4342 to find what the cancel signal actually is. */
4343 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4344 signal (__SIGRTMIN
+1, SIG_IGN
);
4349 initialize_low (void)
4351 struct sigaction sigchld_action
;
4352 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
4353 set_target_ops (&linux_target_ops
);
4354 set_breakpoint_data (the_low_target
.breakpoint
,
4355 the_low_target
.breakpoint_len
);
4356 linux_init_signals ();
4357 linux_test_for_tracefork ();
4358 #ifdef HAVE_LINUX_REGSETS
4359 for (num_regsets
= 0; target_regsets
[num_regsets
].size
>= 0; num_regsets
++)
4361 disabled_regsets
= xmalloc (num_regsets
);
4364 sigchld_action
.sa_handler
= sigchld_handler
;
4365 sigemptyset (&sigchld_action
.sa_mask
);
4366 sigchld_action
.sa_flags
= SA_RESTART
;
4367 sigaction (SIGCHLD
, &sigchld_action
, NULL
);