X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gdb%2Fgdbserver%2Flinux-low.c;h=3e37722e784ad6d5330948eca5c455f8de0e028f;hb=219f2f2398a678322264121a25214b3046180dec;hp=8a882376e6944d11db4251c23be339960f3e97e2;hpb=cdbfd4198ec38a42766a578d4058bd752d25011c;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/gdbserver/linux-low.c b/gdb/gdbserver/linux-low.c index 8a882376e6..3e37722e78 100644 --- a/gdb/gdbserver/linux-low.c +++ b/gdb/gdbserver/linux-low.c @@ -1,6 +1,6 @@ /* Low level interface to ptrace, for the remote server for GDB. Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, - 2006, 2007, 2008, 2009 Free Software Foundation, Inc. + 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. This file is part of GDB. @@ -19,9 +19,6 @@ #include "server.h" #include "linux-low.h" -#include "ansidecl.h" /* For ATTRIBUTE_PACKED, must be bug in external.h. */ -#include "elf/common.h" -#include "elf/external.h" #include #include @@ -42,6 +39,14 @@ #include #include #include +#include +#ifndef ELFMAG0 +/* Don't include here. If it got included by gdb_proc_service.h + then ELFMAG0 will have been defined. If it didn't get included by + gdb_proc_service.h then including it will likely introduce a duplicate + definition of elf_fpregset_t. */ +#include +#endif #ifndef SPUFS_MAGIC #define SPUFS_MAGIC 0x23c9b64e @@ -89,6 +94,10 @@ #define __WALL 0x40000000 /* Wait for any child. */ #endif +#ifndef W_STOPCODE +#define W_STOPCODE(sig) ((sig) << 8 | 0x7f) +#endif + #ifdef __UCLIBC__ #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__)) #define HAS_NOMMU @@ -132,11 +141,33 @@ static void linux_resume_one_lwp (struct lwp_info *lwp, static void linux_resume (struct thread_resume *resume_info, size_t n); static void stop_all_lwps (void); static int linux_wait_for_event (ptid_t ptid, int *wstat, int options); -static int check_removed_breakpoint (struct lwp_info *event_child); static void *add_lwp (ptid_t ptid); -static int my_waitpid (int pid, int *status, int flags); static int linux_stopped_by_watchpoint (void); static void mark_lwp_dead (struct lwp_info *lwp, int wstat); +static int linux_core_of_thread (ptid_t ptid); +static void proceed_all_lwps (void); +static void unstop_all_lwps (struct lwp_info *except); +static int finish_step_over (struct lwp_info *lwp); +static CORE_ADDR get_stop_pc (struct lwp_info *lwp); +static int kill_lwp (unsigned long lwpid, int signo); + +/* True if the low target can hardware single-step. Such targets + don't need a BREAKPOINT_REINSERT_ADDR callback. */ + +static int +can_hardware_single_step (void) +{ + return (the_low_target.breakpoint_reinsert_addr == NULL); +} + +/* True if the low target supports memory breakpoints. If so, we'll + have a GET_PC implementation. */ + +static int +supports_breakpoints (void) +{ + return (the_low_target.get_pc != NULL); +} struct pending_signals { @@ -145,7 +176,8 @@ struct pending_signals struct pending_signals *prev; }; -#define PTRACE_ARG3_TYPE long +#define PTRACE_ARG3_TYPE void * +#define PTRACE_ARG4_TYPE void * #define PTRACE_XFER_TYPE long #ifdef HAVE_LINUX_REGSETS @@ -192,7 +224,7 @@ linux_child_pid_to_exec_file (int pid) /* Return non-zero if HEADER is a 64-bit ELF file. */ static int -elf_64_header_p (const Elf64_External_Ehdr *header) +elf_64_header_p (const Elf64_Ehdr *header) { return (header->e_ident[EI_MAG0] == ELFMAG0 && header->e_ident[EI_MAG1] == ELFMAG1 @@ -208,7 +240,7 @@ elf_64_header_p (const Elf64_External_Ehdr *header) int elf_64_file_p (const char *file) { - Elf64_External_Ehdr header; + Elf64_Ehdr header; int fd; fd = open (file, O_RDONLY); @@ -263,15 +295,86 @@ linux_remove_process (struct process_info *process) { struct process_info_private *priv = process->private; -#ifdef USE_THREAD_DB - thread_db_free (process); -#endif - free (priv->arch_private); free (priv); remove_process (process); } +/* Wrapper function for waitpid which handles EINTR, and emulates + __WALL for systems where that is not available. */ + +static int +my_waitpid (int pid, int *status, int flags) +{ + int ret, out_errno; + + if (debug_threads) + fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags); + + if (flags & __WALL) + { + sigset_t block_mask, org_mask, wake_mask; + int wnohang; + + wnohang = (flags & WNOHANG) != 0; + flags &= ~(__WALL | __WCLONE); + flags |= WNOHANG; + + /* Block all signals while here. This avoids knowing about + LinuxThread's signals. */ + sigfillset (&block_mask); + sigprocmask (SIG_BLOCK, &block_mask, &org_mask); + + /* ... except during the sigsuspend below. */ + sigemptyset (&wake_mask); + + while (1) + { + /* Since all signals are blocked, there's no need to check + for EINTR here. */ + ret = waitpid (pid, status, flags); + out_errno = errno; + + if (ret == -1 && out_errno != ECHILD) + break; + else if (ret > 0) + break; + + if (flags & __WCLONE) + { + /* We've tried both flavors now. If WNOHANG is set, + there's nothing else to do, just bail out. */ + if (wnohang) + break; + + if (debug_threads) + fprintf (stderr, "blocking\n"); + + /* Block waiting for signals. */ + sigsuspend (&wake_mask); + } + + flags ^= __WCLONE; + } + + sigprocmask (SIG_SETMASK, &org_mask, NULL); + } + else + { + do + ret = waitpid (pid, status, flags); + while (ret == -1 && errno == EINTR); + out_errno = errno; + } + + if (debug_threads) + fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n", + pid, flags, status ? *status : -1, ret); + + errno = out_errno; + return ret; +} + /* Handle a GNU/Linux extended wait response. If we see a clone event, we need to add the new LWP to our list (and not report the trap to higher layers). */ @@ -306,7 +409,7 @@ handle_extended_wait (struct lwp_info *event_child, int wstat) warning ("wait returned unexpected status 0x%x", status); } - ptrace (PTRACE_SETOPTIONS, new_pid, 0, PTRACE_O_TRACECLONE); + ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE); ptid = ptid_build (pid_of (event_child), new_pid, 0); new_lwp = (struct lwp_info *) add_lwp (ptid); @@ -323,14 +426,18 @@ handle_extended_wait (struct lwp_info *event_child, int wstat) If we do get another signal, be sure not to lose it. */ if (WSTOPSIG (status) == SIGSTOP) { - if (! stopping_threads) + if (stopping_threads) + new_lwp->stop_pc = get_stop_pc (new_lwp); + else linux_resume_one_lwp (new_lwp, 0, 0, NULL); } else { new_lwp->stop_expected = 1; + if (stopping_threads) { + new_lwp->stop_pc = get_stop_pc (new_lwp); new_lwp->status_pending_p = 1; new_lwp->status_pending = status; } @@ -347,7 +454,33 @@ handle_extended_wait (struct lwp_info *event_child, int wstat) } } -/* This function should only be called if the process got a SIGTRAP. +/* Return the PC as read from the regcache of LWP, without any + adjustment. */ + +static CORE_ADDR +get_pc (struct lwp_info *lwp) +{ + struct thread_info *saved_inferior; + struct regcache *regcache; + CORE_ADDR pc; + + if (the_low_target.get_pc == NULL) + return 0; + + saved_inferior = current_inferior; + current_inferior = get_lwp_thread (lwp); + + regcache = get_thread_regcache (current_inferior, 1); + pc = (*the_low_target.get_pc) (regcache); + + if (debug_threads) + fprintf (stderr, "pc is 0x%lx\n", (long) pc); + + current_inferior = saved_inferior; + return pc; +} + +/* This function should only be called if LWP got a SIGTRAP. The SIGTRAP could mean several things. On i386, where decr_pc_after_break is non-zero: @@ -370,11 +503,19 @@ handle_extended_wait (struct lwp_info *event_child, int wstat) instruction. */ static CORE_ADDR -get_stop_pc (void) +get_stop_pc (struct lwp_info *lwp) { - CORE_ADDR stop_pc = (*the_low_target.get_pc) (); + CORE_ADDR stop_pc; + + if (the_low_target.get_pc == NULL) + return 0; + + stop_pc = get_pc (lwp); - if (! get_thread_lwp (current_inferior)->stepping) + if (WSTOPSIG (lwp->last_status) == SIGTRAP + && !lwp->stepping + && !lwp->stopped_by_watchpoint + && lwp->last_status >> 16 == 0) stop_pc -= the_low_target.decr_pc_after_break; if (debug_threads) @@ -393,6 +534,8 @@ add_lwp (ptid_t ptid) lwp->head.id = ptid; + lwp->last_resume_kind = resume_continue; + if (the_low_target.new_thread != NULL) lwp->arch_private = the_low_target.new_thread (); @@ -423,7 +566,9 @@ linux_create_inferior (char *program, char **allargs) { ptrace (PTRACE_TRACEME, 0, 0, 0); +#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */ signal (__SIGRTMIN + 1, SIG_DFL); +#endif setpgid (0, 0); @@ -497,14 +642,16 @@ linux_attach_lwp_1 (unsigned long lwpid, int initial) 1) gdbserver has already attached to the process and is being notified of a new thread that is being created. - In this case we should ignore that SIGSTOP and resume the process. - This is handled below by setting stop_expected = 1. + In this case we should ignore that SIGSTOP and resume the + process. This is handled below by setting stop_expected = 1, + and the fact that add_lwp sets last_resume_kind == + resume_continue. 2) This is the first thread (the process thread), and we're attaching to it via attach_inferior. In this case we want the process thread to stop. - This is handled by having linux_attach clear stop_expected after - we return. + This is handled by having linux_attach set last_resume_kind == + resume_stop after we return. ??? If the process already has several threads we leave the other threads running. @@ -521,8 +668,7 @@ linux_attach_lwp_1 (unsigned long lwpid, int initial) because we are guaranteed that the add_lwp call above added us to the end of the list, and so the new thread has not yet reached wait_for_sigstop (but will). */ - if (! stopping_threads) - new_lwp->stop_expected = 1; + new_lwp->stop_expected = 1; } void @@ -546,7 +692,7 @@ linux_attach (unsigned long pid) process. It will be collected by wait shortly. */ lwp = (struct lwp_info *) find_inferior_id (&all_lwps, ptid_build (pid, pid, 0)); - lwp->stop_expected = 0; + lwp->last_resume_kind = resume_stop; } return 0; @@ -662,6 +808,9 @@ linux_kill (int pid) lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL); } while (lwpid > 0 && WIFSTOPPED (wstat)); +#ifdef USE_THREAD_DB + thread_db_free (process, 0); +#endif delete_lwp (lwp); linux_remove_process (process); return 0; @@ -699,10 +848,6 @@ linux_detach_one_lwp (struct inferior_list_entry *entry, void *args) return 0; } - /* Make sure the process isn't stopped at a breakpoint that's - no longer there. */ - check_removed_breakpoint (lwp); - /* If this process is stopped but is expecting a SIGSTOP, then make sure we take care of that now. This isn't absolutely guaranteed to collect the SIGSTOP, but is fairly likely to. */ @@ -747,6 +892,10 @@ linux_detach (int pid) if (process == NULL) return -1; +#ifdef USE_THREAD_DB + thread_db_free (process, 1); +#endif + current_inferior = (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid); @@ -788,79 +937,13 @@ linux_thread_alive (ptid_t ptid) return 0; } -/* Return nonzero if this process stopped at a breakpoint which - no longer appears to be inserted. Also adjust the PC - appropriately to resume where the breakpoint used to be. */ -static int -check_removed_breakpoint (struct lwp_info *event_child) -{ - CORE_ADDR stop_pc; - struct thread_info *saved_inferior; - - if (event_child->pending_is_breakpoint == 0) - return 0; - - if (debug_threads) - fprintf (stderr, "Checking for breakpoint in lwp %ld.\n", - lwpid_of (event_child)); - - saved_inferior = current_inferior; - current_inferior = get_lwp_thread (event_child); - - stop_pc = get_stop_pc (); - - /* If the PC has changed since we stopped, then we shouldn't do - anything. This happens if, for instance, GDB handled the - decr_pc_after_break subtraction itself. */ - if (stop_pc != event_child->pending_stop_pc) - { - if (debug_threads) - fprintf (stderr, "Ignoring, PC was changed. Old PC was 0x%08llx\n", - event_child->pending_stop_pc); - - event_child->pending_is_breakpoint = 0; - current_inferior = saved_inferior; - return 0; - } - - /* If the breakpoint is still there, we will report hitting it. */ - if ((*the_low_target.breakpoint_at) (stop_pc)) - { - if (debug_threads) - fprintf (stderr, "Ignoring, breakpoint is still present.\n"); - current_inferior = saved_inferior; - return 0; - } - - if (debug_threads) - fprintf (stderr, "Removed breakpoint.\n"); - - /* For decr_pc_after_break targets, here is where we perform the - decrement. We go immediately from this function to resuming, - and can not safely call get_stop_pc () again. */ - if (the_low_target.set_pc != NULL) - { - if (debug_threads) - fprintf (stderr, "Set pc to 0x%lx\n", (long) stop_pc); - (*the_low_target.set_pc) (stop_pc); - } - - /* We consumed the pending SIGTRAP. */ - event_child->pending_is_breakpoint = 0; - event_child->status_pending_p = 0; - event_child->status_pending = 0; - - current_inferior = saved_inferior; - return 1; -} - -/* Return 1 if this lwp has an interesting status pending. This - function may silently resume an inferior lwp. */ +/* Return 1 if this lwp has an interesting status pending. */ static int -status_pending_p (struct inferior_list_entry *entry, void *arg) +status_pending_p_callback (struct inferior_list_entry *entry, void *arg) { struct lwp_info *lwp = (struct lwp_info *) entry; ptid_t ptid = * (ptid_t *) arg; + struct thread_info *thread = get_lwp_thread (lwp); /* Check if we're only interested in events from a specific process or its lwps. */ @@ -868,20 +951,15 @@ status_pending_p (struct inferior_list_entry *entry, void *arg) && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id)) return 0; - if (lwp->status_pending_p && !lwp->suspended) - if (check_removed_breakpoint (lwp)) - { - /* This thread was stopped at a breakpoint, and the breakpoint - is now gone. We were told to continue (or step...) all threads, - so GDB isn't trying to single-step past this breakpoint. - So instead of reporting the old SIGTRAP, pretend we got to - the breakpoint just after it was removed instead of just - before; resume the process. */ - linux_resume_one_lwp (lwp, 0, 0, NULL); - return 0; - } + thread = get_lwp_thread (lwp); - return (lwp->status_pending_p && !lwp->suspended); + /* If we got a `vCont;t', but we haven't reported a stop yet, do + report any status pending the LWP may have. */ + if (lwp->last_resume_kind == resume_stop + && thread->last_status.kind == TARGET_WAITKIND_STOPPED) + return 0; + + return lwp->status_pending_p; } static int @@ -953,7 +1031,6 @@ retry: goto retry; child->stopped = 1; - child->pending_is_breakpoint = 0; child->last_status = *wstatp; @@ -970,16 +1047,69 @@ retry: new_inferior = 0; } + /* Fetch the possibly triggered data watchpoint info and store it in + CHILD. + + On some archs, like x86, that use debug registers to set + watchpoints, it's possible that the way to know which watched + address trapped, is to check the register that is used to select + which address to watch. Problem is, between setting the + watchpoint and reading back which data address trapped, the user + may change the set of watchpoints, and, as a consequence, GDB + changes the debug registers in the inferior. To avoid reading + back a stale stopped-data-address when that happens, we cache in + LP the fact that a watchpoint trapped, and the corresponding data + address, as soon as we see CHILD stop with a SIGTRAP. If GDB + changes the debug registers meanwhile, we have the cached data we + can rely on. */ + + if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP) + { + if (the_low_target.stopped_by_watchpoint == NULL) + { + child->stopped_by_watchpoint = 0; + } + else + { + struct thread_info *saved_inferior; + + saved_inferior = current_inferior; + current_inferior = get_lwp_thread (child); + + child->stopped_by_watchpoint + = the_low_target.stopped_by_watchpoint (); + + if (child->stopped_by_watchpoint) + { + if (the_low_target.stopped_data_address != NULL) + child->stopped_data_address + = the_low_target.stopped_data_address (); + else + child->stopped_data_address = 0; + } + + current_inferior = saved_inferior; + } + } + + /* Store the STOP_PC, with adjustment applied. This depends on the + architecture being defined already (so that CHILD has a valid + regcache), and on LAST_STATUS being set (to check for SIGTRAP or + not). */ + if (WIFSTOPPED (*wstatp)) + child->stop_pc = get_stop_pc (child); + if (debug_threads && WIFSTOPPED (*wstatp) && the_low_target.get_pc != NULL) { struct thread_info *saved_inferior = current_inferior; + struct regcache *regcache; CORE_ADDR pc; - current_inferior = (struct thread_info *) - find_inferior_id (&all_threads, child->head.id); - pc = (*the_low_target.get_pc) (); + current_inferior = get_lwp_thread (child); + regcache = get_thread_regcache (current_inferior, 1); + pc = (*the_low_target.get_pc) (regcache); fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc); current_inferior = saved_inferior; } @@ -987,6 +1117,92 @@ retry: return child; } +/* This function should only be called if the LWP got a SIGTRAP. + + Handle any tracepoint steps or hits. Return true if a tracepoint + event was handled, 0 otherwise. */ + +static int +handle_tracepoints (struct lwp_info *lwp) +{ + struct thread_info *tinfo = get_lwp_thread (lwp); + int tpoint_related_event = 0; + + /* And we need to be sure that any all-threads-stopping doesn't try + to move threads out of the jump pads, as it could deadlock the + inferior (LWP could be in the jump pad, maybe even holding the + lock.) */ + + /* Do any necessary step collect actions. */ + tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc); + + /* See if we just hit a tracepoint and do its main collect + actions. */ + tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc); + + if (tpoint_related_event) + { + if (debug_threads) + fprintf (stderr, "got a tracepoint event\n"); + return 1; + } + + return 0; +} + +/* Arrange for a breakpoint to be hit again later. We don't keep the + SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We + will handle the current event, eventually we will resume this LWP, + and this breakpoint will trap again. */ + +static int +cancel_breakpoint (struct lwp_info *lwp) +{ + struct thread_info *saved_inferior; + + /* There's nothing to do if we don't support breakpoints. */ + if (!supports_breakpoints ()) + return 0; + + /* breakpoint_at reads from current inferior. */ + saved_inferior = current_inferior; + current_inferior = get_lwp_thread (lwp); + + if ((*the_low_target.breakpoint_at) (lwp->stop_pc)) + { + if (debug_threads) + fprintf (stderr, + "CB: Push back breakpoint for %s\n", + target_pid_to_str (ptid_of (lwp))); + + /* Back up the PC if necessary. */ + if (the_low_target.decr_pc_after_break) + { + struct regcache *regcache + = get_thread_regcache (current_inferior, 1); + (*the_low_target.set_pc) (regcache, lwp->stop_pc); + } + + current_inferior = saved_inferior; + return 1; + } + else + { + if (debug_threads) + fprintf (stderr, + "CB: No breakpoint found at %s for [%s]\n", + paddress (lwp->stop_pc), + target_pid_to_str (ptid_of (lwp))); + } + + current_inferior = saved_inferior; + return 0; +} + +/* When the event-loop is doing a step-over, this points at the thread + being stepped. */ +ptid_t step_over_bkpt; + /* Wait for an event from child PID. If PID is -1, wait for any child. Store the stop status through the status pointer WSTAT. OPTIONS is passed to the waitpid call. Return 0 if no child stop @@ -996,29 +1212,26 @@ retry: static int linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) { - CORE_ADDR stop_pc; - struct lwp_info *event_child = NULL; - int bp_status; - struct lwp_info *requested_child = NULL; + struct lwp_info *event_child, *requested_child; + + event_child = NULL; + requested_child = NULL; /* Check for a lwp with a pending status. */ - /* It is possible that the user changed the pending task's registers since - it stopped. We correctly handle the change of PC if we hit a breakpoint - (in check_removed_breakpoint); signals should be reported anyway. */ if (ptid_equal (ptid, minus_one_ptid) || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid)) { event_child = (struct lwp_info *) - find_inferior (&all_lwps, status_pending_p, &ptid); + find_inferior (&all_lwps, status_pending_p_callback, &ptid); if (debug_threads && event_child) fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child)); } else { requested_child = find_lwp_pid (ptid); - if (requested_child->status_pending_p - && !check_removed_breakpoint (requested_child)) + + if (requested_child->status_pending_p) event_child = requested_child; } @@ -1043,7 +1256,11 @@ linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) event_child = linux_wait_for_lwp (ptid, wstat, options); if ((options & WNOHANG) && event_child == NULL) - return 0; + { + if (debug_threads) + fprintf (stderr, "WNOHANG set, no event found\n"); + return 0; + } if (event_child == NULL) error ("event from unknown child"); @@ -1065,8 +1282,6 @@ linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) return lwpid_of (event_child); } - delete_lwp (event_child); - if (!non_stop) { current_inferior = (struct thread_info *) all_threads.head; @@ -1084,7 +1299,18 @@ linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) /* If we were waiting for this particular child to do something... well, it did something. */ if (requested_child != NULL) - return lwpid_of (event_child); + { + int lwpid = lwpid_of (event_child); + + /* Cancel the step-over operation --- the thread that + started it is gone. */ + if (finish_step_over (event_child)) + unstop_all_lwps (event_child); + delete_lwp (event_child); + return lwpid; + } + + delete_lwp (event_child); /* Wait for a more interesting event. */ continue; @@ -1093,21 +1319,10 @@ linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) if (event_child->must_set_ptrace_flags) { ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child), - 0, PTRACE_O_TRACECLONE); + 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE); event_child->must_set_ptrace_flags = 0; } - if (WIFSTOPPED (*wstat) - && WSTOPSIG (*wstat) == SIGSTOP - && event_child->stop_expected) - { - if (debug_threads) - fprintf (stderr, "Expected stop.\n"); - event_child->stop_expected = 0; - linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL); - continue; - } - if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP && *wstat >> 16 != 0) { @@ -1127,14 +1342,15 @@ linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) if (WIFSTOPPED (*wstat) && !event_child->stepping && ( -#ifdef USE_THREAD_DB +#if defined (USE_THREAD_DB) && defined (__SIGRTMIN) (current_process ()->private->thread_db != NULL && (WSTOPSIG (*wstat) == __SIGRTMIN || WSTOPSIG (*wstat) == __SIGRTMIN + 1)) || #endif (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))] - && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads)))) + && !(WSTOPSIG (*wstat) == SIGSTOP + && event_child->stop_expected)))) { siginfo_t info, *info_p; @@ -1146,117 +1362,30 @@ linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options) info_p = &info; else info_p = NULL; - linux_resume_one_lwp (event_child, - event_child->stepping, + linux_resume_one_lwp (event_child, event_child->stepping, WSTOPSIG (*wstat), info_p); continue; } - /* If this event was not handled above, and is not a SIGTRAP, report - it. */ - if (!WIFSTOPPED (*wstat) || WSTOPSIG (*wstat) != SIGTRAP) - return lwpid_of (event_child); + if (WIFSTOPPED (*wstat) + && WSTOPSIG (*wstat) == SIGSTOP + && event_child->stop_expected) + { + int should_stop; - /* If this target does not support breakpoints, we simply report the - SIGTRAP; it's of no concern to us. */ - if (the_low_target.get_pc == NULL) - return lwpid_of (event_child); - - stop_pc = get_stop_pc (); - - /* bp_reinsert will only be set if we were single-stepping. - Notice that we will resume the process after hitting - a gdbserver breakpoint; single-stepping to/over one - is not supported (yet). */ - if (event_child->bp_reinsert != 0) - { if (debug_threads) - fprintf (stderr, "Reinserted breakpoint.\n"); - reinsert_breakpoint (event_child->bp_reinsert); - event_child->bp_reinsert = 0; - - /* Clear the single-stepping flag and SIGTRAP as we resume. */ - linux_resume_one_lwp (event_child, 0, 0, NULL); - continue; - } - - bp_status = check_breakpoints (stop_pc); - - if (bp_status != 0) - { - if (debug_threads) - fprintf (stderr, "Hit a gdbserver breakpoint.\n"); + fprintf (stderr, "Expected stop.\n"); + event_child->stop_expected = 0; - /* We hit one of our own breakpoints. We mark it as a pending - breakpoint, so that check_removed_breakpoint () will do the PC - adjustment for us at the appropriate time. */ - event_child->pending_is_breakpoint = 1; - event_child->pending_stop_pc = stop_pc; - - /* We may need to put the breakpoint back. We continue in the event - loop instead of simply replacing the breakpoint right away, - in order to not lose signals sent to the thread that hit the - breakpoint. Unfortunately this increases the window where another - thread could sneak past the removed breakpoint. For the current - use of server-side breakpoints (thread creation) this is - acceptable; but it needs to be considered before this breakpoint - mechanism can be used in more general ways. For some breakpoints - it may be necessary to stop all other threads, but that should - be avoided where possible. - - If breakpoint_reinsert_addr is NULL, that means that we can - use PTRACE_SINGLESTEP on this platform. Uninsert the breakpoint, - mark it for reinsertion, and single-step. - - Otherwise, call the target function to figure out where we need - our temporary breakpoint, create it, and continue executing this - process. */ + should_stop = (event_child->last_resume_kind == resume_stop + || stopping_threads); - /* NOTE: we're lifting breakpoints in non-stop mode. This - is currently only used for thread event breakpoints, so - it isn't that bad as long as we have PTRACE_EVENT_CLONE - events. */ - if (bp_status == 2) - /* No need to reinsert. */ - linux_resume_one_lwp (event_child, 0, 0, NULL); - else if (the_low_target.breakpoint_reinsert_addr == NULL) - { - event_child->bp_reinsert = stop_pc; - uninsert_breakpoint (stop_pc); - linux_resume_one_lwp (event_child, 1, 0, NULL); - } - else + if (!should_stop) { - reinsert_breakpoint_by_bp - (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ()); - linux_resume_one_lwp (event_child, 0, 0, NULL); + linux_resume_one_lwp (event_child, + event_child->stepping, 0, NULL); + continue; } - - continue; - } - - if (debug_threads) - fprintf (stderr, "Hit a non-gdbserver breakpoint.\n"); - - /* If we were single-stepping, we definitely want to report the - SIGTRAP. Although the single-step operation has completed, - do not clear clear the stepping flag yet; we need to check it - in wait_for_sigstop. */ - if (event_child->stepping) - return lwpid_of (event_child); - - /* A SIGTRAP that we can't explain. It may have been a breakpoint. - Check if it is a breakpoint, and if so mark the process information - accordingly. This will handle both the necessary fiddling with the - PC on decr_pc_after_break targets and suppressing extra threads - hitting a breakpoint if two hit it at once and then GDB removes it - after the first is reported. Arguably it would be better to report - multiple threads hitting breakpoints simultaneously, but the current - remote protocol does not allow this. */ - if ((*the_low_target.breakpoint_at) (stop_pc)) - { - event_child->pending_is_breakpoint = 1; - event_child->pending_stop_pc = stop_pc; } return lwpid_of (event_child); @@ -1306,6 +1435,179 @@ linux_wait_for_event (ptid_t ptid, int *wstat, int options) } } + +/* Count the LWP's that have had events. */ + +static int +count_events_callback (struct inferior_list_entry *entry, void *data) +{ + struct lwp_info *lp = (struct lwp_info *) entry; + int *count = data; + + gdb_assert (count != NULL); + + /* Count only resumed LWPs that have a SIGTRAP event pending that + should be reported to GDB. */ + if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE + && lp->last_resume_kind != resume_stop + && lp->status_pending_p + && WIFSTOPPED (lp->status_pending) + && WSTOPSIG (lp->status_pending) == SIGTRAP + && !breakpoint_inserted_here (lp->stop_pc)) + (*count)++; + + return 0; +} + +/* Select the LWP (if any) that is currently being single-stepped. */ + +static int +select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data) +{ + struct lwp_info *lp = (struct lwp_info *) entry; + + if (get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE + && lp->last_resume_kind == resume_step + && lp->status_pending_p) + return 1; + else + return 0; +} + +/* Select the Nth LWP that has had a SIGTRAP event that should be + reported to GDB. */ + +static int +select_event_lwp_callback (struct inferior_list_entry *entry, void *data) +{ + struct lwp_info *lp = (struct lwp_info *) entry; + int *selector = data; + + gdb_assert (selector != NULL); + + /* Select only resumed LWPs that have a SIGTRAP event pending. */ + if (lp->last_resume_kind != resume_stop + && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE + && lp->status_pending_p + && WIFSTOPPED (lp->status_pending) + && WSTOPSIG (lp->status_pending) == SIGTRAP + && !breakpoint_inserted_here (lp->stop_pc)) + if ((*selector)-- == 0) + return 1; + + return 0; +} + +static int +cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data) +{ + struct lwp_info *lp = (struct lwp_info *) entry; + struct lwp_info *event_lp = data; + + /* Leave the LWP that has been elected to receive a SIGTRAP alone. */ + if (lp == event_lp) + return 0; + + /* If a LWP other than the LWP that we're reporting an event for has + hit a GDB breakpoint (as opposed to some random trap signal), + then just arrange for it to hit it again later. We don't keep + the SIGTRAP status and don't forward the SIGTRAP signal to the + LWP. We will handle the current event, eventually we will resume + all LWPs, and this one will get its breakpoint trap again. + + If we do not do this, then we run the risk that the user will + delete or disable the breakpoint, but the LWP will have already + tripped on it. */ + + if (lp->last_resume_kind != resume_stop + && get_lwp_thread (lp)->last_status.kind == TARGET_WAITKIND_IGNORE + && lp->status_pending_p + && WIFSTOPPED (lp->status_pending) + && WSTOPSIG (lp->status_pending) == SIGTRAP + && !lp->stepping + && !lp->stopped_by_watchpoint + && cancel_breakpoint (lp)) + /* Throw away the SIGTRAP. */ + lp->status_pending_p = 0; + + return 0; +} + +/* Select one LWP out of those that have events pending. */ + +static void +select_event_lwp (struct lwp_info **orig_lp) +{ + int num_events = 0; + int random_selector; + struct lwp_info *event_lp; + + /* Give preference to any LWP that is being single-stepped. */ + event_lp + = (struct lwp_info *) find_inferior (&all_lwps, + select_singlestep_lwp_callback, NULL); + if (event_lp != NULL) + { + if (debug_threads) + fprintf (stderr, + "SEL: Select single-step %s\n", + target_pid_to_str (ptid_of (event_lp))); + } + else + { + /* No single-stepping LWP. Select one at random, out of those + which have had SIGTRAP events. */ + + /* First see how many SIGTRAP events we have. */ + find_inferior (&all_lwps, count_events_callback, &num_events); + + /* Now randomly pick a LWP out of those that have had a SIGTRAP. */ + random_selector = (int) + ((num_events * (double) rand ()) / (RAND_MAX + 1.0)); + + if (debug_threads && num_events > 1) + fprintf (stderr, + "SEL: Found %d SIGTRAP events, selecting #%d\n", + num_events, random_selector); + + event_lp = (struct lwp_info *) find_inferior (&all_lwps, + select_event_lwp_callback, + &random_selector); + } + + if (event_lp != NULL) + { + /* Switch the event LWP. */ + *orig_lp = event_lp; + } +} + +/* Set this inferior LWP's state as "want-stopped". We won't resume + this LWP until the client gives us another action for it. */ + +static void +gdb_wants_lwp_stopped (struct inferior_list_entry *entry) +{ + struct lwp_info *lwp = (struct lwp_info *) entry; + struct thread_info *thread = get_lwp_thread (lwp); + + /* Most threads are stopped implicitly (all-stop); tag that with + signal 0. The thread being explicitly reported stopped to the + client, gets it's status fixed up afterwards. */ + thread->last_status.kind = TARGET_WAITKIND_STOPPED; + thread->last_status.value.sig = TARGET_SIGNAL_0; + + lwp->last_resume_kind = resume_stop; +} + +/* Set all LWP's states as "want-stopped". */ + +static void +gdb_wants_all_stopped (void) +{ + for_each_inferior (&all_lwps, gdb_wants_lwp_stopped); +} + /* Wait for process, returns status. */ static ptid_t @@ -1313,10 +1615,14 @@ linux_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus, int target_options) { int w; - struct thread_info *thread = NULL; - struct lwp_info *lwp = NULL; + struct lwp_info *event_child; int options; int pid; + int step_over_finished; + int bp_explains_trap; + int maybe_internal_trap; + int report_to_gdb; + int trace_event; /* Translate generic target options into linux options. */ options = __WALL; @@ -1336,6 +1642,8 @@ retry: && !ptid_equal (cont_thread, null_ptid) && !ptid_equal (cont_thread, minus_one_ptid)) { + struct thread_info *thread; + thread = (struct thread_info *) find_inferior_id (&all_threads, cont_thread); @@ -1352,11 +1660,20 @@ retry: ptid = cont_thread; } - pid = linux_wait_for_event (ptid, &w, options); + if (ptid_equal (step_over_bkpt, null_ptid)) + pid = linux_wait_for_event (ptid, &w, options); + else + { + if (debug_threads) + fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n", + target_pid_to_str (step_over_bkpt)); + pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG); + } + if (pid == 0) /* only if TARGET_WNOHANG */ return null_ptid; - lwp = get_thread_lwp (current_inferior); + event_child = get_thread_lwp (current_inferior); /* If we are waiting for a particular child, and it exited, linux_wait_for_event will return its exit status. Similarly if @@ -1374,10 +1691,13 @@ retry: { if (WIFEXITED (w) || WIFSIGNALED (w)) { - int pid = pid_of (lwp); + int pid = pid_of (event_child); struct process_info *process = find_process_pid (pid); - delete_lwp (lwp); +#ifdef USE_THREAD_DB + thread_db_free (process, 0); +#endif + delete_lwp (event_child); linux_remove_process (process); current_inferior = NULL; @@ -1409,26 +1729,183 @@ retry: goto retry; } - /* In all-stop, stop all threads. Be careful to only do this if - we're about to report an event to GDB. */ + /* If this event was not handled before, and is not a SIGTRAP, we + report it. SIGILL and SIGSEGV are also treated as traps in case + a breakpoint is inserted at the current PC. If this target does + not support internal breakpoints at all, we also report the + SIGTRAP without further processing; it's of no concern to us. */ + maybe_internal_trap + = (supports_breakpoints () + && (WSTOPSIG (w) == SIGTRAP + || ((WSTOPSIG (w) == SIGILL + || WSTOPSIG (w) == SIGSEGV) + && (*the_low_target.breakpoint_at) (event_child->stop_pc)))); + + if (maybe_internal_trap) + { + /* Handle anything that requires bookkeeping before deciding to + report the event or continue waiting. */ + + /* First check if we can explain the SIGTRAP with an internal + breakpoint, or if we should possibly report the event to GDB. + Do this before anything that may remove or insert a + breakpoint. */ + bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc); + + /* We have a SIGTRAP, possibly a step-over dance has just + finished. If so, tweak the state machine accordingly, + reinsert breakpoints and delete any reinsert (software + single-step) breakpoints. */ + step_over_finished = finish_step_over (event_child); + + /* Now invoke the callbacks of any internal breakpoints there. */ + check_breakpoints (event_child->stop_pc); + + /* Handle tracepoint data collecting. This may overflow the + trace buffer, and cause a tracing stop, removing + breakpoints. */ + trace_event = handle_tracepoints (event_child); + + if (bp_explains_trap) + { + /* If we stepped or ran into an internal breakpoint, we've + already handled it. So next time we resume (from this + PC), we should step over it. */ + if (debug_threads) + fprintf (stderr, "Hit a gdbserver breakpoint.\n"); + + if (breakpoint_here (event_child->stop_pc)) + event_child->need_step_over = 1; + } + } + else + { + /* We have some other signal, possibly a step-over dance was in + progress, and it should be cancelled too. */ + step_over_finished = finish_step_over (event_child); + + trace_event = 0; + } + + /* We have all the data we need. Either report the event to GDB, or + resume threads and keep waiting for more. */ + + /* Check If GDB would be interested in this event. If GDB wanted + this thread to single step, we always want to report the SIGTRAP, + and let GDB handle it. Watchpoints should always be reported. + So should signals we can't explain. A SIGTRAP we can't explain + could be a GDB breakpoint --- we may or not support Z0 + breakpoints. If we do, we're be able to handle GDB breakpoints + on top of internal breakpoints, by handling the internal + breakpoint and still reporting the event to GDB. If we don't, + we're out of luck, GDB won't see the breakpoint hit. */ + report_to_gdb = (!maybe_internal_trap + || event_child->last_resume_kind == resume_step + || event_child->stopped_by_watchpoint + || (!step_over_finished && !bp_explains_trap && !trace_event) + || gdb_breakpoint_here (event_child->stop_pc)); + + /* We found no reason GDB would want us to stop. We either hit one + of our own breakpoints, or finished an internal step GDB + shouldn't know about. */ + if (!report_to_gdb) + { + if (debug_threads) + { + if (bp_explains_trap) + fprintf (stderr, "Hit a gdbserver breakpoint.\n"); + if (step_over_finished) + fprintf (stderr, "Step-over finished.\n"); + if (trace_event) + fprintf (stderr, "Tracepoint event.\n"); + } + + /* We're not reporting this breakpoint to GDB, so apply the + decr_pc_after_break adjustment to the inferior's regcache + ourselves. */ + + if (the_low_target.set_pc != NULL) + { + struct regcache *regcache + = get_thread_regcache (get_lwp_thread (event_child), 1); + (*the_low_target.set_pc) (regcache, event_child->stop_pc); + } + + /* We've finished stepping over a breakpoint. We've stopped all + LWPs momentarily except the stepping one. This is where we + resume them all again. We're going to keep waiting, so use + proceed, which handles stepping over the next breakpoint. */ + if (debug_threads) + fprintf (stderr, "proceeding all threads.\n"); + proceed_all_lwps (); + goto retry; + } + + if (debug_threads) + { + if (event_child->last_resume_kind == resume_step) + fprintf (stderr, "GDB wanted to single-step, reporting event.\n"); + if (event_child->stopped_by_watchpoint) + fprintf (stderr, "Stopped by watchpoint.\n"); + if (gdb_breakpoint_here (event_child->stop_pc)) + fprintf (stderr, "Stopped by GDB breakpoint.\n"); + if (debug_threads) + fprintf (stderr, "Hit a non-gdbserver trap event.\n"); + } + + /* Alright, we're going to report a stop. */ + if (!non_stop) - stop_all_lwps (); + { + /* In all-stop, stop all threads. */ + stop_all_lwps (); + + /* If we're not waiting for a specific LWP, choose an event LWP + from among those that have had events. Giving equal priority + to all LWPs that have had events helps prevent + starvation. */ + if (ptid_equal (ptid, minus_one_ptid)) + { + event_child->status_pending_p = 1; + event_child->status_pending = w; + + select_event_lwp (&event_child); + + event_child->status_pending_p = 0; + w = event_child->status_pending; + } + + /* Now that we've selected our final event LWP, cancel any + breakpoints in other LWPs that have hit a GDB breakpoint. + See the comment in cancel_breakpoints_callback to find out + why. */ + find_inferior (&all_lwps, cancel_breakpoints_callback, event_child); + } + else + { + /* If we just finished a step-over, then all threads had been + momentarily paused. In all-stop, that's fine, we want + threads stopped by now anyway. In non-stop, we need to + re-resume threads that GDB wanted to be running. */ + if (step_over_finished) + unstop_all_lwps (event_child); + } ourstatus->kind = TARGET_WAITKIND_STOPPED; - if (lwp->suspended && WSTOPSIG (w) == SIGSTOP) + /* Do this before the gdb_wants_all_stopped calls below, since they + always set last_resume_kind to resume_stop. */ + if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) == SIGSTOP) { /* A thread that has been requested to stop by GDB with vCont;t, and it stopped cleanly, so report as SIG0. The use of SIGSTOP is an implementation detail. */ ourstatus->value.sig = TARGET_SIGNAL_0; } - else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP) + else if (event_child->last_resume_kind == resume_stop && WSTOPSIG (w) != SIGSTOP) { /* A thread that has been requested to stop by GDB with vCont;t, - but, it stopped for other reasons. Set stop_expected so the - pending SIGSTOP is ignored and the LWP is resumed. */ - lwp->stop_expected = 1; + but, it stopped for other reasons. */ ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w)); } else @@ -1436,13 +1913,30 @@ retry: ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w)); } + gdb_assert (ptid_equal (step_over_bkpt, null_ptid)); + + if (!non_stop) + { + /* From GDB's perspective, all-stop mode always stops all + threads implicitly. Tag all threads as "want-stopped". */ + gdb_wants_all_stopped (); + } + else + { + /* We're reporting this LWP as stopped. Update it's + "want-stopped" state to what the client wants, until it gets + a new resume action. */ + gdb_wants_lwp_stopped (&event_child->head); + } + if (debug_threads) fprintf (stderr, "linux_wait ret = %s, %d, %d\n", - target_pid_to_str (lwp->head.id), + target_pid_to_str (ptid_of (event_child)), ourstatus->kind, ourstatus->value.sig); - return lwp->head.id; + get_lwp_thread (event_child)->last_status = *ourstatus; + return ptid_of (event_child); } /* Get rid of any pending event in the pipe. */ @@ -1498,25 +1992,29 @@ linux_wait (ptid_t ptid, return event_ptid; } -/* Send a signal to an LWP. For LinuxThreads, kill is enough; however, if - thread groups are in use, we need to use tkill. */ +/* Send a signal to an LWP. */ static int kill_lwp (unsigned long lwpid, int signo) { - static int tkill_failed; + /* Use tkill, if possible, in case we are using nptl threads. If tkill + fails, then we are not using nptl threads and we should be using kill. */ - errno = 0; +#ifdef __NR_tkill + { + static int tkill_failed; -#ifdef SYS_tkill - if (!tkill_failed) - { - int ret = syscall (SYS_tkill, lwpid, signo); - if (errno != ENOSYS) - return ret; - errno = 0; - tkill_failed = 1; - } + if (!tkill_failed) + { + int ret; + + errno = 0; + ret = syscall (__NR_tkill, lwpid, signo); + if (errno != ENOSYS) + return ret; + tkill_failed = 1; + } + } #endif return kill (lwpid, signo); @@ -1540,16 +2038,13 @@ send_sigstop (struct inferior_list_entry *entry) if (debug_threads) fprintf (stderr, "Have pending sigstop for lwp %d\n", pid); - /* We clear the stop_expected flag so that wait_for_sigstop - will receive the SIGSTOP event (instead of silently resuming and - waiting again). It'll be reset below. */ - lwp->stop_expected = 0; return; } if (debug_threads) fprintf (stderr, "Sending sigstop to lwp %d\n", pid); + lwp->stop_expected = 1; kill_lwp (pid, SIGSTOP); } @@ -1563,10 +2058,6 @@ mark_lwp_dead (struct lwp_info *lwp, int wstat) lwp->status_pending_p = 1; lwp->status_pending = wstat; - /* So that check_removed_breakpoint doesn't try to figure out if - this is stopped at a breakpoint. */ - lwp->pending_is_breakpoint = 0; - /* Prevent trying to stop it. */ lwp->stopped = 1; @@ -1582,9 +2073,15 @@ wait_for_sigstop (struct inferior_list_entry *entry) int wstat; ptid_t saved_tid; ptid_t ptid; + int pid; if (lwp->stopped) - return; + { + if (debug_threads) + fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n", + lwpid_of (lwp)); + return; + } saved_inferior = current_inferior; if (saved_inferior != NULL) @@ -1594,48 +2091,47 @@ wait_for_sigstop (struct inferior_list_entry *entry) ptid = lwp->head.id; - linux_wait_for_event (ptid, &wstat, __WALL); + if (debug_threads) + fprintf (stderr, "wait_for_sigstop: pulling one event\n"); + + pid = linux_wait_for_event (ptid, &wstat, __WALL); /* If we stopped with a non-SIGSTOP signal, save it for later and record the pending SIGSTOP. If the process exited, just return. */ - if (WIFSTOPPED (wstat) - && WSTOPSIG (wstat) != SIGSTOP) + if (WIFSTOPPED (wstat)) { if (debug_threads) - fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n", - lwpid_of (lwp), wstat); - - /* Do not leave a pending single-step finish to be reported to - the client. The client will give us a new action for this - thread, possibly a continue request --- otherwise, the client - would consider this pending SIGTRAP reported later a spurious - signal. */ - if (WSTOPSIG (wstat) == SIGTRAP - && lwp->stepping - && !linux_stopped_by_watchpoint ()) + fprintf (stderr, "LWP %ld stopped with signal %d\n", + lwpid_of (lwp), WSTOPSIG (wstat)); + + if (WSTOPSIG (wstat) != SIGSTOP) { if (debug_threads) - fprintf (stderr, " single-step SIGTRAP ignored\n"); - } - else - { + fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n", + lwpid_of (lwp), wstat); + lwp->status_pending_p = 1; lwp->status_pending = wstat; } - lwp->stop_expected = 1; } - else if (!WIFSTOPPED (wstat)) + else { if (debug_threads) - fprintf (stderr, "Process %ld exited while stopping LWPs\n", - lwpid_of (lwp)); + fprintf (stderr, "Process %d exited while stopping LWPs\n", pid); - /* Leave this status pending for the next time we're able to - report it. In the mean time, we'll report this lwp as dead - to GDB, so GDB doesn't try to read registers and memory from - it. */ - mark_lwp_dead (lwp, wstat); + lwp = find_lwp_pid (pid_to_ptid (pid)); + if (lwp) + { + /* Leave this status pending for the next time we're able to + report it. In the mean time, we'll report this lwp as + dead to GDB, so GDB doesn't try to read registers and + memory from it. This can only happen if this was the + last thread of the process; otherwise, PID is removed + from the thread tables before linux_wait_for_event + returns. */ + mark_lwp_dead (lwp, wstat); + } } if (saved_inferior == NULL || linux_thread_alive (saved_tid)) @@ -1682,6 +2178,15 @@ linux_resume_one_lwp (struct lwp_info *lwp, if (lwp->stopped == 0) return; + /* Cancel actions that rely on GDB not changing the PC (e.g., the + user used the "jump" command, or "set $pc = foo"). */ + if (lwp->stop_pc != get_pc (lwp)) + { + /* Collecting 'while-stepping' actions doesn't make sense + anymore. */ + release_while_stepping_state_list (get_lwp_thread (lwp)); + } + /* If we have pending signals or status, and a new signal, enqueue the signal. Also enqueue the signal if we are waiting to reinsert a breakpoint; it will be picked up again below. */ @@ -1700,8 +2205,15 @@ linux_resume_one_lwp (struct lwp_info *lwp, lwp->pending_signals = p_sig; } - if (lwp->status_pending_p && !check_removed_breakpoint (lwp)) - return; + if (lwp->status_pending_p) + { + if (debug_threads) + fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);" + " has pending status\n", + lwpid_of (lwp), step ? "step" : "continue", signal, + lwp->stop_expected ? "expected" : "not expected"); + return; + } saved_inferior = current_inferior; current_inferior = get_lwp_thread (lwp); @@ -1724,20 +2236,43 @@ linux_resume_one_lwp (struct lwp_info *lwp, if (lwp->bp_reinsert != 0) { if (debug_threads) - fprintf (stderr, " pending reinsert at %08lx", (long)lwp->bp_reinsert); - if (step == 0) - fprintf (stderr, "BAD - reinserting but not stepping.\n"); - step = 1; + fprintf (stderr, " pending reinsert at 0x%s\n", + paddress (lwp->bp_reinsert)); + + if (lwp->bp_reinsert != 0 && can_hardware_single_step ()) + { + if (step == 0) + fprintf (stderr, "BAD - reinserting but not stepping.\n"); + + step = 1; + } /* Postpone any pending signal. It was enqueued above. */ signal = 0; } - check_removed_breakpoint (lwp); + /* If we have while-stepping actions in this thread set it stepping. + If we have a signal to deliver, it may or may not be set to + SIG_IGN, we don't know. Assume so, and allow collecting + while-stepping into a signal handler. A possible smart thing to + do would be to set an internal breakpoint at the signal return + address, continue, and carry on catching this while-stepping + action only when that breakpoint is hit. A future + enhancement. */ + if (get_lwp_thread (lwp)->while_stepping != NULL + && can_hardware_single_step ()) + { + if (debug_threads) + fprintf (stderr, + "lwp %ld has a while-stepping action -> forcing step.\n", + lwpid_of (lwp)); + step = 1; + } if (debug_threads && the_low_target.get_pc != NULL) { - CORE_ADDR pc = (*the_low_target.get_pc) (); + struct regcache *regcache = get_thread_regcache (current_inferior, 1); + CORE_ADDR pc = (*the_low_target.get_pc) (regcache); fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc); } @@ -1766,8 +2301,12 @@ linux_resume_one_lwp (struct lwp_info *lwp, get_lwp_thread (lwp)); errno = 0; lwp->stopped = 0; + lwp->stopped_by_watchpoint = 0; lwp->stepping = step; - ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, signal); + ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0, + /* Coerce to a uintptr_t first to avoid potential gcc warning + of coercing an 8 byte integer to a 4 byte pointer. */ + (PTRACE_ARG4_TYPE) (uintptr_t) signal); current_inferior = saved_inferior; if (errno) @@ -1821,7 +2360,21 @@ linux_set_resume_request (struct inferior_list_entry *entry, void *arg) || (ptid_get_lwp (ptid) == -1 && (ptid_get_pid (ptid) == pid_of (lwp)))) { + if (r->resume[ndx].kind == resume_stop + && lwp->last_resume_kind == resume_stop) + { + if (debug_threads) + fprintf (stderr, "already %s LWP %ld at GDB's request\n", + thread->last_status.kind == TARGET_WAITKIND_STOPPED + ? "stopped" + : "stopping", + lwpid_of (lwp)); + + continue; + } + lwp->resume = &r->resume[ndx]; + lwp->last_resume_kind = lwp->resume->kind; return 0; } } @@ -1844,23 +2397,228 @@ resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p) if (lwp->resume == NULL) return 0; - /* If this thread has a removed breakpoint, we won't have any - events to report later, so check now. check_removed_breakpoint - may clear status_pending_p. We avoid calling check_removed_breakpoint - for any thread that we are not otherwise going to resume - this - lets us preserve stopped status when two threads hit a breakpoint. - GDB removes the breakpoint to single-step a particular thread - past it, then re-inserts it and resumes all threads. We want - to report the second thread without resuming it in the interim. */ if (lwp->status_pending_p) - check_removed_breakpoint (lwp); + * (int *) flag_p = 1; + + return 0; +} + +/* Return 1 if this lwp that GDB wants running is stopped at an + internal breakpoint that we need to step over. It assumes that any + required STOP_PC adjustment has already been propagated to the + inferior's regcache. */ + +static int +need_step_over_p (struct inferior_list_entry *entry, void *dummy) +{ + struct lwp_info *lwp = (struct lwp_info *) entry; + struct thread_info *saved_inferior; + CORE_ADDR pc; + + /* LWPs which will not be resumed are not interesting, because we + might not wait for them next time through linux_wait. */ + + if (!lwp->stopped) + { + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? Ignoring, not stopped\n", + lwpid_of (lwp)); + return 0; + } + + if (lwp->last_resume_kind == resume_stop) + { + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? Ignoring, should remain stopped\n", + lwpid_of (lwp)); + return 0; + } + + if (!lwp->need_step_over) + { + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? No\n", lwpid_of (lwp)); + } if (lwp->status_pending_p) - * (int *) flag_p = 1; + { + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? Ignoring, has pending status.\n", + lwpid_of (lwp)); + return 0; + } + + /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already, + or we have. */ + pc = get_pc (lwp); + + /* If the PC has changed since we stopped, then don't do anything, + and let the breakpoint/tracepoint be hit. This happens if, for + instance, GDB handled the decr_pc_after_break subtraction itself, + GDB is OOL stepping this thread, or the user has issued a "jump" + command, or poked thread's registers herself. */ + if (pc != lwp->stop_pc) + { + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? Cancelling, PC was changed. " + "Old stop_pc was 0x%s, PC is now 0x%s\n", + lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc)); + + lwp->need_step_over = 0; + return 0; + } + + saved_inferior = current_inferior; + current_inferior = get_lwp_thread (lwp); + + /* We can only step over breakpoints we know about. */ + if (breakpoint_here (pc)) + { + /* Don't step over a breakpoint that GDB expects to hit + though. */ + if (gdb_breakpoint_here (pc)) + { + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? yes, but found" + " GDB breakpoint at 0x%s; skipping step over\n", + lwpid_of (lwp), paddress (pc)); + + current_inferior = saved_inferior; + return 0; + } + else + { + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n", + lwpid_of (lwp), paddress (pc)); + + /* We've found an lwp that needs stepping over --- return 1 so + that find_inferior stops looking. */ + current_inferior = saved_inferior; + + /* If the step over is cancelled, this is set again. */ + lwp->need_step_over = 0; + return 1; + } + } + + current_inferior = saved_inferior; + + if (debug_threads) + fprintf (stderr, + "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n", + lwpid_of (lwp), paddress (pc)); return 0; } +/* Start a step-over operation on LWP. When LWP stopped at a + breakpoint, to make progress, we need to remove the breakpoint out + of the way. If we let other threads run while we do that, they may + pass by the breakpoint location and miss hitting it. To avoid + that, a step-over momentarily stops all threads while LWP is + single-stepped while the breakpoint is temporarily uninserted from + the inferior. When the single-step finishes, we reinsert the + breakpoint, and let all threads that are supposed to be running, + run again. + + On targets that don't support hardware single-step, we don't + currently support full software single-stepping. Instead, we only + support stepping over the thread event breakpoint, by asking the + low target where to place a reinsert breakpoint. Since this + routine assumes the breakpoint being stepped over is a thread event + breakpoint, it usually assumes the return address of the current + function is a good enough place to set the reinsert breakpoint. */ + +static int +start_step_over (struct lwp_info *lwp) +{ + struct thread_info *saved_inferior; + CORE_ADDR pc; + int step; + + if (debug_threads) + fprintf (stderr, + "Starting step-over on LWP %ld. Stopping all threads\n", + lwpid_of (lwp)); + + stop_all_lwps (); + + if (debug_threads) + fprintf (stderr, "Done stopping all threads for step-over.\n"); + + /* Note, we should always reach here with an already adjusted PC, + either by GDB (if we're resuming due to GDB's request), or by our + caller, if we just finished handling an internal breakpoint GDB + shouldn't care about. */ + pc = get_pc (lwp); + + saved_inferior = current_inferior; + current_inferior = get_lwp_thread (lwp); + + lwp->bp_reinsert = pc; + uninsert_breakpoints_at (pc); + + if (can_hardware_single_step ()) + { + step = 1; + } + else + { + CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) (); + set_reinsert_breakpoint (raddr); + step = 0; + } + + current_inferior = saved_inferior; + + linux_resume_one_lwp (lwp, step, 0, NULL); + + /* Require next event from this LWP. */ + step_over_bkpt = lwp->head.id; + return 1; +} + +/* Finish a step-over. Reinsert the breakpoint we had uninserted in + start_step_over, if still there, and delete any reinsert + breakpoints we've set, on non hardware single-step targets. */ + +static int +finish_step_over (struct lwp_info *lwp) +{ + if (lwp->bp_reinsert != 0) + { + if (debug_threads) + fprintf (stderr, "Finished step over.\n"); + + /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there + may be no breakpoint to reinsert there by now. */ + reinsert_breakpoints_at (lwp->bp_reinsert); + + lwp->bp_reinsert = 0; + + /* Delete any software-single-step reinsert breakpoints. No + longer needed. We don't have to worry about other threads + hitting this trap, and later not being able to explain it, + because we were stepping over a breakpoint, and we hold all + threads but LWP stopped while doing that. */ + if (!can_hardware_single_step ()) + delete_reinsert_breakpoints (); + + step_over_bkpt = null_ptid; + return 1; + } + else + return 0; +} + /* This function is called once per thread. We check the thread's resume request, which will tell us whether to resume, step, or leave the thread stopped; and what signal, if any, it should be sent. @@ -1880,7 +2638,8 @@ linux_resume_one_thread (struct inferior_list_entry *entry, void *arg) struct lwp_info *lwp; struct thread_info *thread; int step; - int pending_flag = * (int *) arg; + int leave_all_stopped = * (int *) arg; + int leave_pending; thread = (struct thread_info *) entry; lwp = get_thread_lwp (thread); @@ -1891,122 +2650,262 @@ linux_resume_one_thread (struct inferior_list_entry *entry, void *arg) if (lwp->resume->kind == resume_stop) { if (debug_threads) - fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp)); + fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp)); if (!lwp->stopped) { if (debug_threads) - fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp)); + fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp)); - lwp->suspended = 1; + /* Stop the thread, and wait for the event asynchronously, + through the event loop. */ send_sigstop (&lwp->head); } else { if (debug_threads) - { - if (lwp->suspended) - fprintf (stderr, "already stopped/suspended LWP %ld\n", - lwpid_of (lwp)); - else - fprintf (stderr, "already stopped/not suspended LWP %ld\n", - lwpid_of (lwp)); - } - - /* Make sure we leave the LWP suspended, so we don't try to - resume it without GDB telling us to. FIXME: The LWP may - have been stopped in an internal event that was not meant - to be notified back to GDB (e.g., gdbserver breakpoint), - so we should be reporting a stop event in that case - too. */ - lwp->suspended = 1; + fprintf (stderr, "already stopped LWP %ld\n", + lwpid_of (lwp)); + + /* The LWP may have been stopped in an internal event that + was not meant to be notified back to GDB (e.g., gdbserver + breakpoint), so we should be reporting a stop event in + this case too. */ + + /* If the thread already has a pending SIGSTOP, this is a + no-op. Otherwise, something later will presumably resume + the thread and this will cause it to cancel any pending + operation, due to last_resume_kind == resume_stop. If + the thread already has a pending status to report, we + will still report it the next time we wait - see + status_pending_p_callback. */ + send_sigstop (&lwp->head); } /* For stop requests, we're done. */ lwp->resume = NULL; + thread->last_status.kind = TARGET_WAITKIND_IGNORE; return 0; } - else - lwp->suspended = 0; /* If this thread which is about to be resumed has a pending status, then don't resume any threads - we can just report the pending status. Make sure to queue any signals that would otherwise be sent. In all-stop mode, we do this decision based on if *any* - thread has a pending status. */ - if (non_stop) - resume_status_pending_p (&lwp->head, &pending_flag); + thread has a pending status. If there's a thread that needs the + step-over-breakpoint dance, then don't resume any other thread + but that particular one. */ + leave_pending = (lwp->status_pending_p || leave_all_stopped); - if (!pending_flag) + if (!leave_pending) { if (debug_threads) fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp)); - if (ptid_equal (lwp->resume->thread, minus_one_ptid) - && lwp->stepping - && lwp->pending_is_breakpoint) - step = 1; - else - step = (lwp->resume->kind == resume_step); + step = (lwp->resume->kind == resume_step); + linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL); + } + else + { + if (debug_threads) + fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp)); + + /* If we have a new signal, enqueue the signal. */ + if (lwp->resume->sig != 0) + { + struct pending_signals *p_sig; + p_sig = xmalloc (sizeof (*p_sig)); + p_sig->prev = lwp->pending_signals; + p_sig->signal = lwp->resume->sig; + memset (&p_sig->info, 0, sizeof (siginfo_t)); + + /* If this is the same signal we were previously stopped by, + make sure to queue its siginfo. We can ignore the return + value of ptrace; if it fails, we'll skip + PTRACE_SETSIGINFO. */ + if (WIFSTOPPED (lwp->last_status) + && WSTOPSIG (lwp->last_status) == lwp->resume->sig) + ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info); + + lwp->pending_signals = p_sig; + } + } + + thread->last_status.kind = TARGET_WAITKIND_IGNORE; + lwp->resume = NULL; + return 0; +} + +static void +linux_resume (struct thread_resume *resume_info, size_t n) +{ + struct thread_resume_array array = { resume_info, n }; + struct lwp_info *need_step_over = NULL; + int any_pending; + int leave_all_stopped; + + find_inferior (&all_threads, linux_set_resume_request, &array); + + /* If there is a thread which would otherwise be resumed, which has + a pending status, then don't resume any threads - we can just + report the pending status. Make sure to queue any signals that + would otherwise be sent. In non-stop mode, we'll apply this + logic to each thread individually. We consume all pending events + before considering to start a step-over (in all-stop). */ + any_pending = 0; + if (!non_stop) + find_inferior (&all_lwps, resume_status_pending_p, &any_pending); + + /* If there is a thread which would otherwise be resumed, which is + stopped at a breakpoint that needs stepping over, then don't + resume any threads - have it step over the breakpoint with all + other threads stopped, then resume all threads again. Make sure + to queue any signals that would otherwise be delivered or + queued. */ + if (!any_pending && supports_breakpoints ()) + need_step_over + = (struct lwp_info *) find_inferior (&all_lwps, + need_step_over_p, NULL); + + leave_all_stopped = (need_step_over != NULL || any_pending); + + if (debug_threads) + { + if (need_step_over != NULL) + fprintf (stderr, "Not resuming all, need step over\n"); + else if (any_pending) + fprintf (stderr, + "Not resuming, all-stop and found " + "an LWP with pending status\n"); + else + fprintf (stderr, "Resuming, no pending status or step over needed\n"); + } + + /* Even if we're leaving threads stopped, queue all signals we'd + otherwise deliver. */ + find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped); + + if (need_step_over) + start_step_over (need_step_over); +} + +/* This function is called once per thread. We check the thread's + last resume request, which will tell us whether to resume, step, or + leave the thread stopped. Any signal the client requested to be + delivered has already been enqueued at this point. + + If any thread that GDB wants running is stopped at an internal + breakpoint that needs stepping over, we start a step-over operation + on that particular thread, and leave all others stopped. */ + +static void +proceed_one_lwp (struct inferior_list_entry *entry) +{ + struct lwp_info *lwp; + int step; + + lwp = (struct lwp_info *) entry; + + if (debug_threads) + fprintf (stderr, + "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp)); + + if (!lwp->stopped) + { + if (debug_threads) + fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp)); + return; + } + + if (lwp->last_resume_kind == resume_stop) + { + if (debug_threads) + fprintf (stderr, " client wants LWP %ld stopped\n", lwpid_of (lwp)); + return; + } + + if (lwp->status_pending_p) + { + if (debug_threads) + fprintf (stderr, " LWP %ld has pending status, leaving stopped\n", + lwpid_of (lwp)); + return; + } + + if (lwp->suspended) + { + if (debug_threads) + fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp)); + return; + } + + step = lwp->last_resume_kind == resume_step; + linux_resume_one_lwp (lwp, step, 0, NULL); +} + +/* When we finish a step-over, set threads running again. If there's + another thread that may need a step-over, now's the time to start + it. Eventually, we'll move all threads past their breakpoints. */ - linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL); - } - else +static void +proceed_all_lwps (void) +{ + struct lwp_info *need_step_over; + + /* If there is a thread which would otherwise be resumed, which is + stopped at a breakpoint that needs stepping over, then don't + resume any threads - have it step over the breakpoint with all + other threads stopped, then resume all threads again. */ + + if (supports_breakpoints ()) { - if (debug_threads) - fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp)); + need_step_over + = (struct lwp_info *) find_inferior (&all_lwps, + need_step_over_p, NULL); - /* If we have a new signal, enqueue the signal. */ - if (lwp->resume->sig != 0) + if (need_step_over != NULL) { - struct pending_signals *p_sig; - p_sig = xmalloc (sizeof (*p_sig)); - p_sig->prev = lwp->pending_signals; - p_sig->signal = lwp->resume->sig; - memset (&p_sig->info, 0, sizeof (siginfo_t)); - - /* If this is the same signal we were previously stopped by, - make sure to queue its siginfo. We can ignore the return - value of ptrace; if it fails, we'll skip - PTRACE_SETSIGINFO. */ - if (WIFSTOPPED (lwp->last_status) - && WSTOPSIG (lwp->last_status) == lwp->resume->sig) - ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info); + if (debug_threads) + fprintf (stderr, "proceed_all_lwps: found " + "thread %ld needing a step-over\n", + lwpid_of (need_step_over)); - lwp->pending_signals = p_sig; + start_step_over (need_step_over); + return; } } - lwp->resume = NULL; - return 0; + if (debug_threads) + fprintf (stderr, "Proceeding, no step-over needed\n"); + + for_each_inferior (&all_lwps, proceed_one_lwp); } +/* Stopped LWPs that the client wanted to be running, that don't have + pending statuses, are set to run again, except for EXCEPT, if not + NULL. This undoes a stop_all_lwps call. */ + static void -linux_resume (struct thread_resume *resume_info, size_t n) +unstop_all_lwps (struct lwp_info *except) { - int pending_flag; - struct thread_resume_array array = { resume_info, n }; - - find_inferior (&all_threads, linux_set_resume_request, &array); - - /* If there is a thread which would otherwise be resumed, which - has a pending status, then don't resume any threads - we can just - report the pending status. Make sure to queue any signals - that would otherwise be sent. In non-stop mode, we'll apply this - logic to each thread individually. */ - pending_flag = 0; - if (!non_stop) - find_inferior (&all_lwps, resume_status_pending_p, &pending_flag); - if (debug_threads) { - if (pending_flag) - fprintf (stderr, "Not resuming, pending status\n"); + if (except) + fprintf (stderr, + "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except)); else - fprintf (stderr, "Resuming, no pending status\n"); + fprintf (stderr, + "unstopping all lwps\n"); } - find_inferior (&all_threads, linux_resume_one_thread, &pending_flag); + /* Make sure proceed_one_lwp doesn't try to resume this thread. */ + if (except != NULL) + ++except->suspended; + + for_each_inferior (&all_lwps, proceed_one_lwp); + + if (except != NULL) + --except->suspended; } #ifdef HAVE_LINUX_USRREGS @@ -2026,7 +2925,7 @@ register_addr (int regnum) /* Fetch one register. */ static void -fetch_register (int regno) +fetch_register (struct regcache *regcache, int regno) { CORE_ADDR regaddr; int i, size; @@ -2050,44 +2949,37 @@ fetch_register (int regno) { errno = 0; *(PTRACE_XFER_TYPE *) (buf + i) = - ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) regaddr, 0); + ptrace (PTRACE_PEEKUSER, pid, + /* Coerce to a uintptr_t first to avoid potential gcc warning + of coercing an 8 byte integer to a 4 byte pointer. */ + (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0); regaddr += sizeof (PTRACE_XFER_TYPE); if (errno != 0) - { - /* Warning, not error, in case we are attached; sometimes the - kernel doesn't let us at the registers. */ - char *err = strerror (errno); - char *msg = alloca (strlen (err) + 128); - sprintf (msg, "reading register %d: %s", regno, err); - error (msg); - goto error_exit; - } + error ("reading register %d: %s", regno, strerror (errno)); } if (the_low_target.supply_ptrace_register) - the_low_target.supply_ptrace_register (regno, buf); + the_low_target.supply_ptrace_register (regcache, regno, buf); else - supply_register (regno, buf); - -error_exit:; + supply_register (regcache, regno, buf); } /* Fetch all registers, or just one, from the child process. */ static void -usr_fetch_inferior_registers (int regno) +usr_fetch_inferior_registers (struct regcache *regcache, int regno) { if (regno == -1) for (regno = 0; regno < the_low_target.num_regs; regno++) - fetch_register (regno); + fetch_register (regcache, regno); else - fetch_register (regno); + fetch_register (regcache, regno); } /* Store our register values back into the inferior. If REGNO is -1, do this for all registers. Otherwise, REGNO specifies which register (so we can save time). */ static void -usr_store_inferior_registers (int regno) +usr_store_inferior_registers (struct regcache *regcache, int regno) { CORE_ADDR regaddr; int i, size; @@ -2112,16 +3004,19 @@ usr_store_inferior_registers (int regno) memset (buf, 0, size); if (the_low_target.collect_ptrace_register) - the_low_target.collect_ptrace_register (regno, buf); + the_low_target.collect_ptrace_register (regcache, regno, buf); else - collect_register (regno, buf); + collect_register (regcache, regno, buf); pid = lwpid_of (get_thread_lwp (current_inferior)); for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE)) { errno = 0; - ptrace (PTRACE_POKEUSER, pid, (PTRACE_ARG3_TYPE) regaddr, - *(PTRACE_XFER_TYPE *) (buf + i)); + ptrace (PTRACE_POKEUSER, pid, + /* Coerce to a uintptr_t first to avoid potential gcc warning + about coercing an 8 byte integer to a 4 byte pointer. */ + (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, + (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i)); if (errno != 0) { /* At this point, ESRCH should mean the process is @@ -2132,21 +3027,14 @@ usr_store_inferior_registers (int regno) return; if ((*the_low_target.cannot_store_register) (regno) == 0) - { - char *err = strerror (errno); - char *msg = alloca (strlen (err) + 128); - sprintf (msg, "writing register %d: %s", - regno, err); - error (msg); - return; - } + error ("writing register %d: %s", regno, strerror (errno)); } regaddr += sizeof (PTRACE_XFER_TYPE); } } else for (regno = 0; regno < the_low_target.num_regs; regno++) - usr_store_inferior_registers (regno); + usr_store_inferior_registers (regcache, regno); } #endif /* HAVE_LINUX_USRREGS */ @@ -2155,19 +3043,20 @@ usr_store_inferior_registers (int regno) #ifdef HAVE_LINUX_REGSETS static int -regsets_fetch_inferior_registers () +regsets_fetch_inferior_registers (struct regcache *regcache) { struct regset_info *regset; int saw_general_regs = 0; int pid; + struct iovec iov; regset = target_regsets; pid = lwpid_of (get_thread_lwp (current_inferior)); while (regset->size >= 0) { - void *buf; - int res; + void *buf, *data; + int nt_type, res; if (regset->size == 0 || disabled_regsets[regset - target_regsets]) { @@ -2176,10 +3065,21 @@ regsets_fetch_inferior_registers () } buf = xmalloc (regset->size); + + nt_type = regset->nt_type; + if (nt_type) + { + iov.iov_base = buf; + iov.iov_len = regset->size; + data = (void *) &iov; + } + else + data = buf; + #ifndef __sparc__ - res = ptrace (regset->get_request, pid, 0, buf); + res = ptrace (regset->get_request, pid, nt_type, data); #else - res = ptrace (regset->get_request, pid, buf, 0); + res = ptrace (regset->get_request, pid, data, nt_type); #endif if (res < 0) { @@ -2201,7 +3101,7 @@ regsets_fetch_inferior_registers () } else if (regset->type == GENERAL_REGS) saw_general_regs = 1; - regset->store_function (buf); + regset->store_function (regcache, buf); regset ++; free (buf); } @@ -2212,19 +3112,20 @@ regsets_fetch_inferior_registers () } static int -regsets_store_inferior_registers () +regsets_store_inferior_registers (struct regcache *regcache) { struct regset_info *regset; int saw_general_regs = 0; int pid; + struct iovec iov; regset = target_regsets; pid = lwpid_of (get_thread_lwp (current_inferior)); while (regset->size >= 0) { - void *buf; - int res; + void *buf, *data; + int nt_type, res; if (regset->size == 0 || disabled_regsets[regset - target_regsets]) { @@ -2237,22 +3138,33 @@ regsets_store_inferior_registers () /* First fill the buffer with the current register set contents, in case there are any items in the kernel's regset that are not in gdbserver's regcache. */ + + nt_type = regset->nt_type; + if (nt_type) + { + iov.iov_base = buf; + iov.iov_len = regset->size; + data = (void *) &iov; + } + else + data = buf; + #ifndef __sparc__ - res = ptrace (regset->get_request, pid, 0, buf); + res = ptrace (regset->get_request, pid, nt_type, data); #else - res = ptrace (regset->get_request, pid, buf, 0); + res = ptrace (regset->get_request, pid, &iov, data); #endif if (res == 0) { /* Then overlay our cached registers on that. */ - regset->fill_function (buf); + regset->fill_function (regcache, buf); /* Only now do we write the register set. */ #ifndef __sparc__ - res = ptrace (regset->set_request, pid, 0, buf); + res = ptrace (regset->set_request, pid, nt_type, data); #else - res = ptrace (regset->set_request, pid, buf, 0); + res = ptrace (regset->set_request, pid, data, nt_type); #endif } @@ -2296,26 +3208,26 @@ regsets_store_inferior_registers () void -linux_fetch_registers (int regno) +linux_fetch_registers (struct regcache *regcache, int regno) { #ifdef HAVE_LINUX_REGSETS - if (regsets_fetch_inferior_registers () == 0) + if (regsets_fetch_inferior_registers (regcache) == 0) return; #endif #ifdef HAVE_LINUX_USRREGS - usr_fetch_inferior_registers (regno); + usr_fetch_inferior_registers (regcache, regno); #endif } void -linux_store_registers (int regno) +linux_store_registers (struct regcache *regcache, int regno) { #ifdef HAVE_LINUX_REGSETS - if (regsets_store_inferior_registers () == 0) + if (regsets_store_inferior_registers (regcache) == 0) return; #endif #ifdef HAVE_LINUX_USRREGS - usr_store_inferior_registers (regno); + usr_store_inferior_registers (regcache, regno); #endif } @@ -2357,7 +3269,7 @@ linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) #ifdef HAVE_PREAD64 if (pread64 (fd, myaddr, len, memaddr) != len) #else - if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, memaddr, len) != len) + if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len) #endif { close (fd); @@ -2373,7 +3285,10 @@ linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) { errno = 0; - buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0); + /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning + about coercing an 8 byte integer to a 4 byte pointer. */ + buffer[i] = ptrace (PTRACE_PEEKTEXT, pid, + (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0); if (errno) return errno; } @@ -2386,9 +3301,8 @@ linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len) return 0; } -/* Copy LEN bytes of data from debugger memory at MYADDR - to inferior's memory at MEMADDR. - On failure (cannot write the inferior) +/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's + memory at MEMADDR. On failure (cannot write to the inferior) returns the value of errno. */ static int @@ -2420,18 +3334,29 @@ linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len) /* Fill start and end extra bytes of buffer with existing memory data. */ - buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, (PTRACE_ARG3_TYPE) addr, 0); + errno = 0; + /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning + about coercing an 8 byte integer to a 4 byte pointer. */ + buffer[0] = ptrace (PTRACE_PEEKTEXT, pid, + (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0); + if (errno) + return errno; if (count > 1) { + errno = 0; buffer[count - 1] = ptrace (PTRACE_PEEKTEXT, pid, - (PTRACE_ARG3_TYPE) (addr + (count - 1) - * sizeof (PTRACE_XFER_TYPE)), + /* Coerce to a uintptr_t first to avoid potential gcc warning + about coercing an 8 byte integer to a 4 byte pointer. */ + (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1) + * sizeof (PTRACE_XFER_TYPE)), 0); + if (errno) + return errno; } - /* Copy data to be written over corresponding part of buffer */ + /* Copy data to be written over corresponding part of buffer. */ memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len); @@ -2440,7 +3365,11 @@ linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len) for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE)) { errno = 0; - ptrace (PTRACE_POKETEXT, pid, (PTRACE_ARG3_TYPE) addr, buffer[i]); + ptrace (PTRACE_POKETEXT, pid, + /* Coerce to a uintptr_t first to avoid potential gcc warning + about coercing an 8 byte integer to a 4 byte pointer. */ + (PTRACE_ARG3_TYPE) (uintptr_t) addr, + (PTRACE_ARG4_TYPE) buffer[i]); if (errno) return errno; } @@ -2448,6 +3377,7 @@ linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len) return 0; } +/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */ static int linux_supports_tracefork_flag; /* Helper functions for linux_test_for_tracefork, called via clone (). */ @@ -2465,6 +3395,14 @@ linux_tracefork_child (void *arg) { ptrace (PTRACE_TRACEME, 0, 0, 0); kill (getpid (), SIGSTOP); + +#if !(defined(__UCLIBC__) && defined(HAS_NOMMU)) + + if (fork () == 0) + linux_tracefork_grandchild (NULL); + +#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */ + #ifdef __ia64__ __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE, CLONE_VM | SIGCHLD, NULL); @@ -2472,82 +3410,10 @@ linux_tracefork_child (void *arg) clone (linux_tracefork_grandchild, arg + STACK_SIZE, CLONE_VM | SIGCHLD, NULL); #endif - _exit (0); -} - -/* Wrapper function for waitpid which handles EINTR, and emulates - __WALL for systems where that is not available. */ - -static int -my_waitpid (int pid, int *status, int flags) -{ - int ret, out_errno; - - if (debug_threads) - fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags); - - if (flags & __WALL) - { - sigset_t block_mask, org_mask, wake_mask; - int wnohang; - - wnohang = (flags & WNOHANG) != 0; - flags &= ~(__WALL | __WCLONE); - flags |= WNOHANG; - - /* Block all signals while here. This avoids knowing about - LinuxThread's signals. */ - sigfillset (&block_mask); - sigprocmask (SIG_BLOCK, &block_mask, &org_mask); - - /* ... except during the sigsuspend below. */ - sigemptyset (&wake_mask); - - while (1) - { - /* Since all signals are blocked, there's no need to check - for EINTR here. */ - ret = waitpid (pid, status, flags); - out_errno = errno; - - if (ret == -1 && out_errno != ECHILD) - break; - else if (ret > 0) - break; - - if (flags & __WCLONE) - { - /* We've tried both flavors now. If WNOHANG is set, - there's nothing else to do, just bail out. */ - if (wnohang) - break; - - if (debug_threads) - fprintf (stderr, "blocking\n"); - - /* Block waiting for signals. */ - sigsuspend (&wake_mask); - } - - flags ^= __WCLONE; - } - - sigprocmask (SIG_SETMASK, &org_mask, NULL); - } - else - { - do - ret = waitpid (pid, status, flags); - while (ret == -1 && errno == EINTR); - out_errno = errno; - } - if (debug_threads) - fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n", - pid, flags, status ? *status : -1, ret); +#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */ - errno = out_errno; - return ret; + _exit (0); } /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make @@ -2559,18 +3425,31 @@ linux_test_for_tracefork (void) { int child_pid, ret, status; long second_pid; +#if defined(__UCLIBC__) && defined(HAS_NOMMU) char *stack = xmalloc (STACK_SIZE * 4); +#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */ linux_supports_tracefork_flag = 0; +#if !(defined(__UCLIBC__) && defined(HAS_NOMMU)) + + child_pid = fork (); + if (child_pid == 0) + linux_tracefork_child (NULL); + +#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */ + /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */ #ifdef __ia64__ child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE, CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2); -#else +#else /* !__ia64__ */ child_pid = clone (linux_tracefork_child, stack + STACK_SIZE, CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2); -#endif +#endif /* !__ia64__ */ + +#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */ + if (child_pid == -1) perror_with_name ("clone"); @@ -2582,7 +3461,8 @@ linux_test_for_tracefork (void) if (! WIFSTOPPED (status)) error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status); - ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK); + ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, + (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK); if (ret != 0) { ret = ptrace (PTRACE_KILL, child_pid, 0, 0); @@ -2638,7 +3518,9 @@ linux_test_for_tracefork (void) } while (WIFSTOPPED (status)); +#if defined(__UCLIBC__) && defined(HAS_NOMMU) free (stack); +#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */ } @@ -2651,6 +3533,9 @@ linux_look_up_symbols (void) if (proc->private->thread_db != NULL) return; + /* If the kernel supports tracing forks then it also supports tracing + clones, and then we don't need to use the magic thread event breakpoint + to learn about threads. */ thread_db_init (!linux_supports_tracefork_flag); #endif } @@ -2728,19 +3613,17 @@ linux_remove_point (char type, CORE_ADDR addr, int len) static int linux_stopped_by_watchpoint (void) { - if (the_low_target.stopped_by_watchpoint != NULL) - return the_low_target.stopped_by_watchpoint (); - else - return 0; + struct lwp_info *lwp = get_thread_lwp (current_inferior); + + return lwp->stopped_by_watchpoint; } static CORE_ADDR linux_stopped_data_address (void) { - if (the_low_target.stopped_data_address != NULL) - return the_low_target.stopped_data_address (); - else - return 0; + struct lwp_info *lwp = get_thread_lwp (current_inferior); + + return lwp->stopped_data_address; } #if defined(__UCLIBC__) && defined(HAS_NOMMU) @@ -2788,6 +3671,175 @@ linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p) } #endif +static int +compare_ints (const void *xa, const void *xb) +{ + int a = *(const int *)xa; + int b = *(const int *)xb; + + return a - b; +} + +static int * +unique (int *b, int *e) +{ + int *d = b; + while (++b != e) + if (*d != *b) + *++d = *b; + return ++d; +} + +/* Given PID, iterates over all threads in that process. + + Information about each thread, in a format suitable for qXfer:osdata:thread + is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already + initialized, and the caller is responsible for finishing and appending '\0' + to it. + + The list of cores that threads are running on is assigned to *CORES, if it + is not NULL. If no cores are found, *CORES will be set to NULL. Caller + should free *CORES. */ + +static void +list_threads (int pid, struct buffer *buffer, char **cores) +{ + int count = 0; + int allocated = 10; + int *core_numbers = xmalloc (sizeof (int) * allocated); + char pathname[128]; + DIR *dir; + struct dirent *dp; + struct stat statbuf; + + sprintf (pathname, "/proc/%d/task", pid); + if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode)) + { + dir = opendir (pathname); + if (!dir) + { + free (core_numbers); + return; + } + + while ((dp = readdir (dir)) != NULL) + { + unsigned long lwp = strtoul (dp->d_name, NULL, 10); + + if (lwp != 0) + { + unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0)); + + if (core != -1) + { + char s[sizeof ("4294967295")]; + sprintf (s, "%u", core); + + if (count == allocated) + { + allocated *= 2; + core_numbers = realloc (core_numbers, + sizeof (int) * allocated); + } + core_numbers[count++] = core; + if (buffer) + buffer_xml_printf (buffer, + "" + "%d" + "%s" + "%s" + "", pid, dp->d_name, s); + } + else + { + if (buffer) + buffer_xml_printf (buffer, + "" + "%d" + "%s" + "", pid, dp->d_name); + } + } + } + } + + if (cores) + { + *cores = NULL; + if (count > 0) + { + struct buffer buffer2; + int *b; + int *e; + qsort (core_numbers, count, sizeof (int), compare_ints); + + /* Remove duplicates. */ + b = core_numbers; + e = unique (b, core_numbers + count); + + buffer_init (&buffer2); + + for (b = core_numbers; b != e; ++b) + { + char number[sizeof ("4294967295")]; + sprintf (number, "%u", *b); + buffer_xml_printf (&buffer2, "%s%s", + (b == core_numbers) ? "" : ",", number); + } + buffer_grow_str0 (&buffer2, ""); + + *cores = buffer_finish (&buffer2); + } + } + free (core_numbers); +} + +static void +show_process (int pid, const char *username, struct buffer *buffer) +{ + char pathname[128]; + FILE *f; + char cmd[MAXPATHLEN + 1]; + + sprintf (pathname, "/proc/%d/cmdline", pid); + + if ((f = fopen (pathname, "r")) != NULL) + { + size_t len = fread (cmd, 1, sizeof (cmd) - 1, f); + if (len > 0) + { + char *cores = 0; + int i; + for (i = 0; i < len; i++) + if (cmd[i] == '\0') + cmd[i] = ' '; + cmd[len] = '\0'; + + buffer_xml_printf (buffer, + "" + "%d" + "%s" + "%s", + pid, + username, + cmd); + + /* This only collects core numbers, and does not print threads. */ + list_threads (pid, NULL, &cores); + + if (cores) + { + buffer_xml_printf (buffer, + "%s", cores); + free (cores); + } + + buffer_xml_printf (buffer, ""); + } + fclose (f); + } +} + static int linux_qxfer_osdata (const char *annex, unsigned char *readbuf, unsigned const char *writebuf, @@ -2798,10 +3850,16 @@ linux_qxfer_osdata (const char *annex, static const char *buf; static long len_avail = -1; static struct buffer buffer; + int processes = 0; + int threads = 0; DIR *dirp; - if (strcmp (annex, "processes") != 0) + if (strcmp (annex, "processes") == 0) + processes = 1; + else if (strcmp (annex, "threads") == 0) + threads = 1; + else return 0; if (!readbuf || writebuf) @@ -2814,7 +3872,10 @@ linux_qxfer_osdata (const char *annex, len_avail = 0; buf = NULL; buffer_init (&buffer); - buffer_grow_str (&buffer, ""); + if (processes) + buffer_grow_str (&buffer, ""); + else if (threads) + buffer_grow_str (&buffer, ""); dirp = opendir ("/proc"); if (dirp) @@ -2833,37 +3894,16 @@ linux_qxfer_osdata (const char *annex, if (stat (procentry, &statbuf) == 0 && S_ISDIR (statbuf.st_mode)) { - char pathname[128]; - FILE *f; - char cmd[MAXPATHLEN + 1]; - struct passwd *entry; - - sprintf (pathname, "/proc/%s/cmdline", dp->d_name); - entry = getpwuid (statbuf.st_uid); + int pid = (int) strtoul (dp->d_name, NULL, 10); - if ((f = fopen (pathname, "r")) != NULL) + if (processes) { - size_t len = fread (cmd, 1, sizeof (cmd) - 1, f); - if (len > 0) - { - int i; - for (i = 0; i < len; i++) - if (cmd[i] == '\0') - cmd[i] = ' '; - cmd[len] = '\0'; - - buffer_xml_printf ( - &buffer, - "" - "%s" - "%s" - "%s" - "", - dp->d_name, - entry ? entry->pw_name : "?", - cmd); - } - fclose (f); + struct passwd *entry = getpwuid (statbuf.st_uid); + show_process (pid, entry ? entry->pw_name : "?", &buffer); + } + else if (threads) + { + list_threads (pid, &buffer, NULL); } } } @@ -3139,6 +4179,88 @@ linux_qxfer_spu (const char *annex, unsigned char *readbuf, return ret; } +static int +linux_core_of_thread (ptid_t ptid) +{ + char filename[sizeof ("/proc//task//stat") + + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */ + + 1]; + FILE *f; + char *content = NULL; + char *p; + char *ts = 0; + int content_read = 0; + int i; + int core; + + sprintf (filename, "/proc/%d/task/%ld/stat", + ptid_get_pid (ptid), ptid_get_lwp (ptid)); + f = fopen (filename, "r"); + if (!f) + return -1; + + for (;;) + { + int n; + content = realloc (content, content_read + 1024); + n = fread (content + content_read, 1, 1024, f); + content_read += n; + if (n < 1024) + { + content[content_read] = '\0'; + break; + } + } + + p = strchr (content, '('); + p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */ + + p = strtok_r (p, " ", &ts); + for (i = 0; i != 36; ++i) + p = strtok_r (NULL, " ", &ts); + + if (sscanf (p, "%d", &core) == 0) + core = -1; + + free (content); + fclose (f); + + return core; +} + +static void +linux_process_qsupported (const char *query) +{ + if (the_low_target.process_qsupported != NULL) + the_low_target.process_qsupported (query); +} + +static int +linux_supports_tracepoints (void) +{ + if (*the_low_target.supports_tracepoints == NULL) + return 0; + + return (*the_low_target.supports_tracepoints) (); +} + +static CORE_ADDR +linux_read_pc (struct regcache *regcache) +{ + if (the_low_target.get_pc == NULL) + return 0; + + return (*the_low_target.get_pc) (regcache); +} + +static void +linux_write_pc (struct regcache *regcache, CORE_ADDR pc) +{ + gdb_assert (the_low_target.set_pc != NULL); + + (*the_low_target.set_pc) (regcache, pc); +} + static struct target_ops linux_target_ops = { linux_create_inferior, linux_attach, @@ -3178,10 +4300,15 @@ static struct target_ops linux_target_ops = { linux_start_non_stop, linux_supports_multi_process, #ifdef USE_THREAD_DB - thread_db_handle_monitor_command + thread_db_handle_monitor_command, #else - NULL + NULL, #endif + linux_core_of_thread, + linux_process_qsupported, + linux_supports_tracepoints, + linux_read_pc, + linux_write_pc }; static void @@ -3189,7 +4316,9 @@ linux_init_signals () { /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads to find what the cancel signal actually is. */ +#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */ signal (__SIGRTMIN+1, SIG_IGN); +#endif } void