X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gdb%2Finfrun.c;h=a500e97948af077d9c1a7487de7996cc1eb68520;hb=ad98fdaf510e66376482f21152e0a57d0bd47071;hp=a945c1a40fe945e8c74a05c1cfb236b98585774d;hpb=27e232885db363fb545fd2f450e72d929e59b8f6;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/infrun.c b/gdb/infrun.c index a945c1a40f..a500e97948 100644 --- a/gdb/infrun.c +++ b/gdb/infrun.c @@ -1,5 +1,6 @@ /* Target-struct-independent code to start (run) and stop an inferior process. - Copyright 1986-1989, 1991-2000 Free Software Foundation, Inc. + Copyright 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, + 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of GDB. @@ -31,10 +32,11 @@ #include "target.h" #include "gdbthread.h" #include "annotate.h" -#include "symfile.h" /* for overlay functions */ +#include "symfile.h" #include "top.h" #include #include "inf-loop.h" +#include "regcache.h" /* Prototypes for local functions */ @@ -84,6 +86,11 @@ void _initialize_infrun (void); int inferior_ignoring_startup_exec_events = 0; int inferior_ignoring_leading_exec_events = 0; +/* When set, stop the 'step' command if we enter a function which has + no line number information. The normal behavior is that we step + over such function. */ +int step_stop_if_no_debug = 0; + /* In asynchronous mode, but simulating synchronous execution. */ int sync_execution = 0; @@ -92,7 +99,7 @@ int sync_execution = 0; when the inferior stopped in a different thread than it had been running in. */ -static int previous_inferior_pid; +static ptid_t previous_inferior_ptid; /* This is true for configurations that may follow through execl() and similar functions. At present this is only true for HP-UX native. */ @@ -103,31 +110,6 @@ static int previous_inferior_pid; static int may_follow_exec = MAY_FOLLOW_EXEC; -/* resume and wait_for_inferior use this to ensure that when - stepping over a hit breakpoint in a threaded application - only the thread that hit the breakpoint is stepped and the - other threads don't continue. This prevents having another - thread run past the breakpoint while it is temporarily - removed. - - This is not thread-specific, so it isn't saved as part of - the infrun state. - - Versions of gdb which don't use the "step == this thread steps - and others continue" model but instead use the "step == this - thread steps and others wait" shouldn't do this. */ - -static int thread_step_needed = 0; - -/* This is true if thread_step_needed should actually be used. At - present this is only true for HP-UX native. */ - -#ifndef USE_THREAD_STEP_NEEDED -#define USE_THREAD_STEP_NEEDED (0) -#endif - -static int use_thread_step_needed = USE_THREAD_STEP_NEEDED; - /* GET_LONGJMP_TARGET returns the PC at which longjmp() will resume the program. It needs to examine the jmp_buf argument and extract the PC from it. The return value is non-zero on success, zero otherwise. */ @@ -325,6 +307,9 @@ static unsigned char *signal_program; (flags)[signum] = 0; \ } while (0) +/* Value to pass to target_resume() to cause all threads to resume */ + +#define RESUME_ALL (pid_to_ptid (-1)) /* Command list pointer for the "stop" placeholder. */ @@ -399,6 +384,12 @@ static struct breakpoint *through_sigtramp_breakpoint = NULL; currently be running in a syscall. */ static int number_of_threads_in_syscalls; +/* This is a cached copy of the pid/waitstatus of the last event + returned by target_wait()/target_wait_hook(). This information is + returned by get_last_target_status(). */ +static ptid_t target_last_wait_ptid; +static struct target_waitstatus target_last_waitstatus; + /* This is used to remember when a fork, vfork or exec event was caught by a catchpoint, and thus the event is to be followed at the next resume of the inferior, and not @@ -428,7 +419,7 @@ pending_follow; set to 1, a vfork event has been seen, but cannot be followed until the exec is seen. - (In the latter case, inferior_pid is still the parent of the + (In the latter case, inferior_ptid is still the parent of the vfork, and pending_follow.fork_event.child_pid is the child. The appropriate process is followed, according to the setting of follow-fork-mode.) */ @@ -468,7 +459,8 @@ follow_inferior_fork (int parent_pid, int child_pid, int has_forked, /* Or, did the user not know, and want us to ask? */ if (follow_fork_mode_string == follow_fork_mode_ask) { - internal_error ("follow_inferior_fork: \"ask\" mode not implemented"); + internal_error (__FILE__, __LINE__, + "follow_inferior_fork: \"ask\" mode not implemented"); /* follow_mode = follow_fork_mode_...; */ } @@ -499,7 +491,7 @@ follow_inferior_fork (int parent_pid, int child_pid, int has_forked, } /* If we're to be following the child, then attach to it, detach - from inferior_pid, and set inferior_pid to child_pid. */ + from inferior_ptid, and set inferior_ptid to child_pid. */ else if (follow_mode == follow_fork_mode_child) { char child_pid_spelling[100]; /* Arbitrary length. */ @@ -522,7 +514,7 @@ follow_inferior_fork (int parent_pid, int child_pid, int has_forked, /* Also reset the solib inferior hook from the parent. */ #ifdef SOLIB_REMOVE_INFERIOR_HOOK - SOLIB_REMOVE_INFERIOR_HOOK (inferior_pid); + SOLIB_REMOVE_INFERIOR_HOOK (PIDGET (inferior_ptid)); #endif /* Detach from the parent. */ @@ -530,7 +522,7 @@ follow_inferior_fork (int parent_pid, int child_pid, int has_forked, target_detach (NULL, 1); /* Attach to the child. */ - inferior_pid = child_pid; + inferior_ptid = pid_to_ptid (child_pid); sprintf (child_pid_spelling, "%d", child_pid); dont_repeat (); @@ -574,7 +566,7 @@ follow_inferior_fork (int parent_pid, int child_pid, int has_forked, /* We continue to follow the parent. To help distinguish the two debuggers, though, both we and our clone will reset our prompts. */ - sprintf (pid_suffix, "[%d] ", inferior_pid); + sprintf (pid_suffix, "[%d] ", PIDGET (inferior_ptid)); set_prompt (strcat (get_prompt (), pid_suffix)); } @@ -636,15 +628,18 @@ follow_vfork (int parent_pid, int child_pid) follow_inferior_fork (parent_pid, child_pid, 0, 1); /* Did we follow the child? Had it exec'd before we saw the parent vfork? */ - if (pending_follow.fork_event.saw_child_exec && (inferior_pid == child_pid)) + if (pending_follow.fork_event.saw_child_exec + && (PIDGET (inferior_ptid) == child_pid)) { pending_follow.fork_event.saw_child_exec = 0; pending_follow.kind = TARGET_WAITKIND_SPURIOUS; - follow_exec (inferior_pid, pending_follow.execd_pathname); - free (pending_follow.execd_pathname); + follow_exec (PIDGET (inferior_ptid), pending_follow.execd_pathname); + xfree (pending_follow.execd_pathname); } } +/* EXECD_PATHNAME is assumed to be non-NULL. */ + static void follow_exec (int pid, char *execd_pathname) { @@ -660,13 +655,14 @@ follow_exec (int pid, char *execd_pathname) (pending_follow.kind == TARGET_WAITKIND_VFORKED)) { pending_follow.kind = TARGET_WAITKIND_SPURIOUS; - follow_vfork (inferior_pid, pending_follow.fork_event.child_pid); + follow_vfork (PIDGET (inferior_ptid), + pending_follow.fork_event.child_pid); follow_vfork_when_exec = 0; - saved_pid = inferior_pid; + saved_pid = PIDGET (inferior_ptid); /* Did we follow the parent? If so, we're done. If we followed the child then we must also follow its exec(). */ - if (inferior_pid == pending_follow.fork_event.parent_pid) + if (PIDGET (inferior_ptid) == pending_follow.fork_event.parent_pid) return; } @@ -715,14 +711,15 @@ follow_exec (int pid, char *execd_pathname) gdb_flush (gdb_stdout); target_mourn_inferior (); - inferior_pid = saved_pid; /* Because mourn_inferior resets inferior_pid. */ + inferior_ptid = pid_to_ptid (saved_pid); + /* Because mourn_inferior resets inferior_ptid. */ push_target (tgt); /* That a.out is now the one to use. */ exec_file_attach (execd_pathname, 0); /* And also is where symbols can be found. */ - symbol_file_command (execd_pathname, 0); + symbol_file_add_main (execd_pathname, 0); /* Reset the shared library package. This ensures that we get a shlib event when the child reaches "_start", at which point @@ -731,7 +728,7 @@ follow_exec (int pid, char *execd_pathname) SOLIB_RESTART (); #endif #ifdef SOLIB_CREATE_INFERIOR_HOOK - SOLIB_CREATE_INFERIOR_HOOK (inferior_pid); + SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid)); #endif /* Reinsert all breakpoints. (Those which were symbolic have @@ -784,8 +781,6 @@ set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c) } - - /* Resume the inferior, but allow a QUIT. This is useful if the user wants to interrupt some lengthy single-stepping operation (for child processes, the SIGINT goes to the inferior, and so @@ -801,13 +796,8 @@ resume (int step, enum target_signal sig) struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0); QUIT; -#ifdef CANNOT_STEP_BREAKPOINT - /* Most targets can step a breakpoint instruction, thus executing it - normally. But if this one cannot, just continue and we will hit - it anyway. */ - if (step && breakpoints_inserted && breakpoint_here_p (read_pc ())) - step = 0; -#endif + /* FIXME: calling breakpoint_here_p (read_pc ()) three times! */ + /* Some targets (e.g. Solaris x86) have a kernel bug when stepping over an instruction that causes a page fault without triggering @@ -828,7 +818,7 @@ resume (int step, enum target_signal sig) if (breakpoint_here_p (read_pc ()) == permanent_breakpoint_here) SKIP_PERMANENT_BREAKPOINT (); - if (SOFTWARE_SINGLE_STEP_P && step) + if (SOFTWARE_SINGLE_STEP_P () && step) { /* Do it the hard way, w/temp breakpoints */ SOFTWARE_SINGLE_STEP (sig, 1 /*insert-breakpoints */ ); @@ -850,7 +840,8 @@ resume (int step, enum target_signal sig) { case (TARGET_WAITKIND_FORKED): pending_follow.kind = TARGET_WAITKIND_SPURIOUS; - follow_fork (inferior_pid, pending_follow.fork_event.child_pid); + follow_fork (PIDGET (inferior_ptid), + pending_follow.fork_event.child_pid); break; case (TARGET_WAITKIND_VFORKED): @@ -858,14 +849,15 @@ resume (int step, enum target_signal sig) int saw_child_exec = pending_follow.fork_event.saw_child_exec; pending_follow.kind = TARGET_WAITKIND_SPURIOUS; - follow_vfork (inferior_pid, pending_follow.fork_event.child_pid); + follow_vfork (PIDGET (inferior_ptid), + pending_follow.fork_event.child_pid); /* Did we follow the child, but not yet see the child's exec event? If so, then it actually ought to be waiting for us; we respond to parent vfork events. We don't actually want to resume the child in this situation; we want to just get its exec event. */ if (!saw_child_exec && - (inferior_pid == pending_follow.fork_event.child_pid)) + (PIDGET (inferior_ptid) == pending_follow.fork_event.child_pid)) should_resume = 0; } break; @@ -886,44 +878,37 @@ resume (int step, enum target_signal sig) if (should_resume) { - int resume_pid; + ptid_t resume_ptid; + + resume_ptid = RESUME_ALL; /* Default */ - if (use_thread_step_needed && thread_step_needed) + if ((step || singlestep_breakpoints_inserted_p) && + !breakpoints_inserted && breakpoint_here_p (read_pc ())) { - /* We stopped on a BPT instruction; - don't continue other threads and - just step this thread. */ - thread_step_needed = 0; + /* Stepping past a breakpoint without inserting breakpoints. + Make sure only the current thread gets to step, so that + other threads don't sneak past breakpoints while they are + not inserted. */ - if (!breakpoint_here_p (read_pc ())) - { - /* Breakpoint deleted: ok to do regular resume - where all the threads either step or continue. */ - resume_pid = -1; - } - else - { - if (!step) - { - warning ("Internal error, changing continue to step."); - remove_breakpoints (); - breakpoints_inserted = 0; - trap_expected = 1; - step = 1; - } - resume_pid = inferior_pid; - } + resume_ptid = inferior_ptid; } - else + + if ((scheduler_mode == schedlock_on) || + (scheduler_mode == schedlock_step && + (step || singlestep_breakpoints_inserted_p))) { - /* Vanilla resume. */ - if ((scheduler_mode == schedlock_on) || - (scheduler_mode == schedlock_step && step != 0)) - resume_pid = inferior_pid; - else - resume_pid = -1; + /* User-settable 'scheduler' mode requires solo thread resume. */ + resume_ptid = inferior_ptid; } - target_resume (resume_pid, step, sig); + +#ifdef CANNOT_STEP_BREAKPOINT + /* Most targets can step a breakpoint instruction, thus executing it + normally. But if this one cannot, just continue and we will hit + it anyway. */ + if (step && breakpoints_inserted && breakpoint_here_p (read_pc ())) + step = 0; +#endif + target_resume (resume_ptid, step, sig); } discard_cleanups (old_cleanups); @@ -940,7 +925,7 @@ clear_proceed_status (void) step_range_start = 0; step_range_end = 0; step_frame_address = 0; - step_over_calls = -1; + step_over_calls = STEP_OVER_UNDEBUGGABLE; stop_after_trap = 0; stop_soon_quietly = 0; proceed_to_finish = 0; @@ -997,16 +982,6 @@ proceed (CORE_ADDR addr, enum target_signal siggnal, int step) else { write_pc (addr); - - /* New address; we don't need to single-step a thread - over a breakpoint we just hit, 'cause we aren't - continuing from there. - - It's not worth worrying about the case where a user - asks for a "jump" at the current PC--if they get the - hiccup of re-hiting a hit breakpoint, what else do - they expect? */ - thread_step_needed = 0; } #ifdef PREPARE_TO_PROCEED @@ -1024,7 +999,6 @@ proceed (CORE_ADDR addr, enum target_signal siggnal, int step) if (PREPARE_TO_PROCEED (1) && breakpoint_here_p (read_pc ())) { oneproc = 1; - thread_step_needed = 1; } #endif /* PREPARE_TO_PROCEED */ @@ -1108,7 +1082,7 @@ start_remote (void) /* Always go on waiting for the target, regardless of the mode. */ /* FIXME: cagney/1999-09-23: At present it isn't possible to - indicate th wait_for_inferior that a target should timeout if + indicate to wait_for_inferior that a target should timeout if nothing is returned (instead of just blocking). Because of this, targets expecting an immediate response need to, internally, set things up so that the target_wait() is forced to eventually @@ -1214,8 +1188,8 @@ struct execution_control_state int current_line; struct symtab *current_symtab; int handling_longjmp; /* FIXME */ - int pid; - int saved_inferior_pid; + ptid_t ptid; + ptid_t saved_inferior_ptid; int update_step_sp; int stepping_through_solib_after_catch; bpstat stepping_through_solib_catchpoints; @@ -1224,7 +1198,7 @@ struct execution_control_state int new_thread_event; struct target_waitstatus tmpstatus; enum infwait_states infwait_state; - int waiton_pid; + ptid_t waiton_ptid; int wait_some_more; }; @@ -1253,7 +1227,7 @@ wait_for_inferior (void) struct execution_control_state ecss; struct execution_control_state *ecs; - old_cleanups = make_cleanup (delete_breakpoint_current_contents, + old_cleanups = make_cleanup (delete_step_resume_breakpoint, &step_resume_breakpoint); make_cleanup (delete_breakpoint_current_contents, &through_sigtramp_breakpoint); @@ -1265,10 +1239,8 @@ wait_for_inferior (void) /* Fill in with reasonable starting values. */ init_execution_control_state (ecs); - thread_step_needed = 0; - /* We'll update this if & when we switch to a new thread. */ - previous_inferior_pid = inferior_pid; + previous_inferior_ptid = inferior_ptid; overlay_cache_invalid = 1; @@ -1283,9 +1255,9 @@ wait_for_inferior (void) while (1) { if (target_wait_hook) - ecs->pid = target_wait_hook (ecs->waiton_pid, ecs->wp); + ecs->ptid = target_wait_hook (ecs->waiton_ptid, ecs->wp); else - ecs->pid = target_wait (ecs->waiton_pid, ecs->wp); + ecs->ptid = target_wait (ecs->waiton_ptid, ecs->wp); /* Now figure out what to do with the result of the result. */ handle_inferior_event (ecs); @@ -1309,8 +1281,7 @@ struct execution_control_state async_ecss; struct execution_control_state *async_ecs; void -fetch_inferior_event (client_data) - void *client_data; +fetch_inferior_event (void *client_data) { static struct cleanup *old_cleanups; @@ -1318,7 +1289,7 @@ fetch_inferior_event (client_data) if (!async_ecs->wait_some_more) { - old_cleanups = make_exec_cleanup (delete_breakpoint_current_contents, + old_cleanups = make_exec_cleanup (delete_step_resume_breakpoint, &step_resume_breakpoint); make_exec_cleanup (delete_breakpoint_current_contents, &through_sigtramp_breakpoint); @@ -1326,10 +1297,8 @@ fetch_inferior_event (client_data) /* Fill in with reasonable starting values. */ init_execution_control_state (async_ecs); - thread_step_needed = 0; - /* We'll update this if & when we switch to a new thread. */ - previous_inferior_pid = inferior_pid; + previous_inferior_ptid = inferior_ptid; overlay_cache_invalid = 1; @@ -1343,9 +1312,9 @@ fetch_inferior_event (client_data) } if (target_wait_hook) - async_ecs->pid = target_wait_hook (async_ecs->waiton_pid, async_ecs->wp); + async_ecs->ptid = target_wait_hook (async_ecs->waiton_ptid, async_ecs->wp); else - async_ecs->pid = target_wait (async_ecs->waiton_pid, async_ecs->wp); + async_ecs->ptid = target_wait (async_ecs->waiton_ptid, async_ecs->wp); /* Now figure out what to do with the result of the result. */ handle_inferior_event (async_ecs); @@ -1383,7 +1352,7 @@ init_execution_control_state (struct execution_control_state *ecs) ecs->current_line = ecs->sal.line; ecs->current_symtab = ecs->sal.symtab; ecs->infwait_state = infwait_normal_state; - ecs->waiton_pid = -1; + ecs->waiton_ptid = pid_to_ptid (-1); ecs->wp = &(ecs->ws); } @@ -1398,6 +1367,61 @@ check_for_old_step_resume_breakpoint (void) warning ("GDB bug: infrun.c (wait_for_inferior): dropping old step_resume breakpoint"); } +/* Return the cached copy of the last pid/waitstatus returned by + target_wait()/target_wait_hook(). The data is actually cached by + handle_inferior_event(), which gets called immediately after + target_wait()/target_wait_hook(). */ + +void +get_last_target_status(ptid_t *ptidp, struct target_waitstatus *status) +{ + *ptidp = target_last_wait_ptid; + *status = target_last_waitstatus; +} + +/* Switch thread contexts, maintaining "infrun state". */ + +static void +context_switch (struct execution_control_state *ecs) +{ + /* Caution: it may happen that the new thread (or the old one!) + is not in the thread list. In this case we must not attempt + to "switch context", or we run the risk that our context may + be lost. This may happen as a result of the target module + mishandling thread creation. */ + + if (in_thread_list (inferior_ptid) && in_thread_list (ecs->ptid)) + { /* Perform infrun state context switch: */ + /* Save infrun state for the old thread. */ + save_infrun_state (inferior_ptid, prev_pc, + prev_func_start, prev_func_name, + trap_expected, step_resume_breakpoint, + through_sigtramp_breakpoint, step_range_start, + step_range_end, step_frame_address, + ecs->handling_longjmp, ecs->another_trap, + ecs->stepping_through_solib_after_catch, + ecs->stepping_through_solib_catchpoints, + ecs->stepping_through_sigtramp, + ecs->current_line, ecs->current_symtab, + step_sp); + + /* Load infrun state for the new thread. */ + load_infrun_state (ecs->ptid, &prev_pc, + &prev_func_start, &prev_func_name, + &trap_expected, &step_resume_breakpoint, + &through_sigtramp_breakpoint, &step_range_start, + &step_range_end, &step_frame_address, + &ecs->handling_longjmp, &ecs->another_trap, + &ecs->stepping_through_solib_after_catch, + &ecs->stepping_through_solib_catchpoints, + &ecs->stepping_through_sigtramp, + &ecs->current_line, &ecs->current_symtab, + &step_sp); + } + inferior_ptid = ecs->ptid; +} + + /* Given an execution control state that has been freshly filled in by an event from the inferior, figure out what it means and take appropriate action. */ @@ -1408,41 +1432,30 @@ handle_inferior_event (struct execution_control_state *ecs) CORE_ADDR tmp; int stepped_after_stopped_by_watchpoint; + /* Cache the last pid/waitstatus. */ + target_last_wait_ptid = ecs->ptid; + target_last_waitstatus = *ecs->wp; + /* Keep this extra brace for now, minimizes diffs. */ { switch (ecs->infwait_state) { - case infwait_normal_state: - /* Since we've done a wait, we have a new event. Don't - carry over any expectations about needing to step over a - breakpoint. */ - thread_step_needed = 0; + case infwait_thread_hop_state: + /* Cancel the waiton_ptid. */ + ecs->waiton_ptid = pid_to_ptid (-1); + /* Fall thru to the normal_state case. */ + case infwait_normal_state: /* See comments where a TARGET_WAITKIND_SYSCALL_RETURN event is serviced in this loop, below. */ if (ecs->enable_hw_watchpoints_after_wait) { - TARGET_ENABLE_HW_WATCHPOINTS (inferior_pid); + TARGET_ENABLE_HW_WATCHPOINTS (PIDGET (inferior_ptid)); ecs->enable_hw_watchpoints_after_wait = 0; } stepped_after_stopped_by_watchpoint = 0; break; - case infwait_thread_hop_state: - insert_breakpoints (); - - /* We need to restart all the threads now, - * unles we're running in scheduler-locked mode. - * FIXME: shouldn't we look at currently_stepping ()? - */ - if (scheduler_mode == schedlock_on) - target_resume (ecs->pid, 0, TARGET_SIGNAL_0); - else - target_resume (-1, 0, TARGET_SIGNAL_0); - ecs->infwait_state = infwait_normal_state; - prepare_to_wait (ecs); - return; - case infwait_nullified_state: break; @@ -1461,20 +1474,21 @@ handle_inferior_event (struct execution_control_state *ecs) /* If it's a new process, add it to the thread database */ - ecs->new_thread_event = ((ecs->pid != inferior_pid) && !in_thread_list (ecs->pid)); + ecs->new_thread_event = (! ptid_equal (ecs->ptid, inferior_ptid) + && ! in_thread_list (ecs->ptid)); if (ecs->ws.kind != TARGET_WAITKIND_EXITED && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event) { - add_thread (ecs->pid); + add_thread (ecs->ptid); #ifdef UI_OUT ui_out_text (uiout, "[New "); - ui_out_text (uiout, target_pid_or_tid_to_str (ecs->pid)); + ui_out_text (uiout, target_pid_or_tid_to_str (ecs->ptid)); ui_out_text (uiout, "]\n"); #else - printf_filtered ("[New %s]\n", target_pid_or_tid_to_str (ecs->pid)); + printf_filtered ("[New %s]\n", target_pid_or_tid_to_str (ecs->ptid)); #endif #if 0 @@ -1497,7 +1511,7 @@ handle_inferior_event (struct execution_control_state *ecs) Therefore we need to continue all threads in order to make progress. */ - target_resume (-1, 0, TARGET_SIGNAL_0); + target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0); prepare_to_wait (ecs); return; #endif @@ -1553,7 +1567,7 @@ handle_inferior_event (struct execution_control_state *ecs) (LONGEST) ecs->ws.value.integer)); gdb_flush (gdb_stdout); target_mourn_inferior (); - singlestep_breakpoints_inserted_p = 0; /*SOFTWARE_SINGLE_STEP_P */ + singlestep_breakpoints_inserted_p = 0; /*SOFTWARE_SINGLE_STEP_P() */ stop_print_frame = 0; stop_stepping (ecs); return; @@ -1571,7 +1585,7 @@ handle_inferior_event (struct execution_control_state *ecs) target_mourn_inferior (); print_stop_reason (SIGNAL_EXITED, stop_signal); - singlestep_breakpoints_inserted_p = 0; /*SOFTWARE_SINGLE_STEP_P */ + singlestep_breakpoints_inserted_p = 0; /*SOFTWARE_SINGLE_STEP_P() */ stop_stepping (ecs); return; @@ -1585,10 +1599,10 @@ handle_inferior_event (struct execution_control_state *ecs) interested in reacting to forks of the child. Note that we expect the child's fork event to be available if we waited for it now. */ - if (inferior_pid == ecs->pid) + if (ptid_equal (inferior_ptid, ecs->ptid)) { pending_follow.fork_event.saw_parent_fork = 1; - pending_follow.fork_event.parent_pid = ecs->pid; + pending_follow.fork_event.parent_pid = PIDGET (ecs->ptid); pending_follow.fork_event.child_pid = ecs->ws.value.related_pid; prepare_to_wait (ecs); return; @@ -1596,16 +1610,16 @@ handle_inferior_event (struct execution_control_state *ecs) else { pending_follow.fork_event.saw_child_fork = 1; - pending_follow.fork_event.child_pid = ecs->pid; + pending_follow.fork_event.child_pid = PIDGET (ecs->ptid); pending_follow.fork_event.parent_pid = ecs->ws.value.related_pid; } - stop_pc = read_pc_pid (ecs->pid); - ecs->saved_inferior_pid = inferior_pid; - inferior_pid = ecs->pid; + stop_pc = read_pc_pid (ecs->ptid); + ecs->saved_inferior_ptid = inferior_ptid; + inferior_ptid = ecs->ptid; stop_bpstat = bpstat_stop_status (&stop_pc, currently_stepping (ecs)); ecs->random_signal = !bpstat_explains_signal (stop_bpstat); - inferior_pid = ecs->saved_inferior_pid; + inferior_ptid = ecs->saved_inferior_ptid; goto process_event_stop_test; /* If this a platform which doesn't allow a debugger to touch a @@ -1625,10 +1639,10 @@ handle_inferior_event (struct execution_control_state *ecs) it execs, and the child has not yet exec'd. We probably should warn the user to that effect when the catchpoint triggers...) */ - if (ecs->pid == inferior_pid) + if (ptid_equal (ecs->ptid, inferior_ptid)) { pending_follow.fork_event.saw_parent_fork = 1; - pending_follow.fork_event.parent_pid = ecs->pid; + pending_follow.fork_event.parent_pid = PIDGET (ecs->ptid); pending_follow.fork_event.child_pid = ecs->ws.value.related_pid; } @@ -1638,13 +1652,14 @@ handle_inferior_event (struct execution_control_state *ecs) else { pending_follow.fork_event.saw_child_fork = 1; - pending_follow.fork_event.child_pid = ecs->pid; + pending_follow.fork_event.child_pid = PIDGET (ecs->ptid); pending_follow.fork_event.parent_pid = ecs->ws.value.related_pid; - target_post_startup_inferior (pending_follow.fork_event.child_pid); + target_post_startup_inferior ( + pid_to_ptid (pending_follow.fork_event.child_pid)); follow_vfork_when_exec = !target_can_follow_vfork_prior_to_exec (); if (follow_vfork_when_exec) { - target_resume (ecs->pid, 0, TARGET_SIGNAL_0); + target_resume (ecs->ptid, 0, TARGET_SIGNAL_0); prepare_to_wait (ecs); return; } @@ -1667,7 +1682,7 @@ handle_inferior_event (struct execution_control_state *ecs) inferior_ignoring_leading_exec_events--; if (pending_follow.kind == TARGET_WAITKIND_VFORKED) ENSURE_VFORKING_PARENT_REMAINS_STOPPED (pending_follow.fork_event.parent_pid); - target_resume (ecs->pid, 0, TARGET_SIGNAL_0); + target_resume (ecs->ptid, 0, TARGET_SIGNAL_0); prepare_to_wait (ecs); return; } @@ -1678,7 +1693,7 @@ handle_inferior_event (struct execution_control_state *ecs) savestring (ecs->ws.value.execd_pathname, strlen (ecs->ws.value.execd_pathname)); - /* Did inferior_pid exec, or did a (possibly not-yet-followed) + /* Did inferior_ptid exec, or did a (possibly not-yet-followed) child of a vfork exec? ??rehrauer: This is unabashedly an HP-UX specific thing. On @@ -1702,7 +1717,7 @@ handle_inferior_event (struct execution_control_state *ecs) the parent vfork event is delivered. A single-step suffices. */ if (RESUME_EXECD_VFORKING_CHILD_TO_GET_PARENT_VFORK ()) - target_resume (ecs->pid, 1, TARGET_SIGNAL_0); + target_resume (ecs->ptid, 1, TARGET_SIGNAL_0); /* We expect the parent vfork event to be available now. */ prepare_to_wait (ecs); return; @@ -1710,15 +1725,15 @@ handle_inferior_event (struct execution_control_state *ecs) /* This causes the eventpoints and symbol table to be reset. Must do this now, before trying to determine whether to stop. */ - follow_exec (inferior_pid, pending_follow.execd_pathname); - free (pending_follow.execd_pathname); + follow_exec (PIDGET (inferior_ptid), pending_follow.execd_pathname); + xfree (pending_follow.execd_pathname); - stop_pc = read_pc_pid (ecs->pid); - ecs->saved_inferior_pid = inferior_pid; - inferior_pid = ecs->pid; + stop_pc = read_pc_pid (ecs->ptid); + ecs->saved_inferior_ptid = inferior_ptid; + inferior_ptid = ecs->ptid; stop_bpstat = bpstat_stop_status (&stop_pc, currently_stepping (ecs)); ecs->random_signal = !bpstat_explains_signal (stop_bpstat); - inferior_pid = ecs->saved_inferior_pid; + inferior_ptid = ecs->saved_inferior_ptid; goto process_event_stop_test; /* These syscall events are returned on HP-UX, as part of its @@ -1742,7 +1757,7 @@ handle_inferior_event (struct execution_control_state *ecs) number_of_threads_in_syscalls++; if (number_of_threads_in_syscalls == 1) { - TARGET_DISABLE_HW_WATCHPOINTS (inferior_pid); + TARGET_DISABLE_HW_WATCHPOINTS (PIDGET (inferior_ptid)); } resume (0, TARGET_SIGNAL_0); prepare_to_wait (ecs); @@ -1763,7 +1778,7 @@ handle_inferior_event (struct execution_control_state *ecs) here, which will be serviced immediately after the target is waited on. */ case TARGET_WAITKIND_SYSCALL_RETURN: - target_resume (ecs->pid, 1, TARGET_SIGNAL_0); + target_resume (ecs->ptid, 1, TARGET_SIGNAL_0); if (number_of_threads_in_syscalls > 0) { @@ -1799,12 +1814,12 @@ handle_inferior_event (struct execution_control_state *ecs) all threads in order to make progress. */ if (ecs->new_thread_event) { - target_resume (-1, 0, TARGET_SIGNAL_0); + target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0); prepare_to_wait (ecs); return; } - stop_pc = read_pc_pid (ecs->pid); + stop_pc = read_pc_pid (ecs->ptid); /* See if a thread hit a thread-specific breakpoint that was meant for another thread. If so, then step that thread past the breakpoint, @@ -1812,20 +1827,20 @@ handle_inferior_event (struct execution_control_state *ecs) if (stop_signal == TARGET_SIGNAL_TRAP) { - if (SOFTWARE_SINGLE_STEP_P && singlestep_breakpoints_inserted_p) + if (SOFTWARE_SINGLE_STEP_P () && singlestep_breakpoints_inserted_p) ecs->random_signal = 0; else if (breakpoints_inserted && breakpoint_here_p (stop_pc - DECR_PC_AFTER_BREAK)) { ecs->random_signal = 0; if (!breakpoint_thread_match (stop_pc - DECR_PC_AFTER_BREAK, - ecs->pid)) + ecs->ptid)) { int remove_status; /* Saw a breakpoint, but it was hit by the wrong thread. Just continue. */ - write_pc_pid (stop_pc - DECR_PC_AFTER_BREAK, ecs->pid); + write_pc_pid (stop_pc - DECR_PC_AFTER_BREAK, ecs->ptid); remove_status = remove_breakpoints (); /* Did we fail to remove breakpoints? If so, try @@ -1837,39 +1852,38 @@ handle_inferior_event (struct execution_control_state *ecs) then either :-) or execs. */ if (remove_status != 0) { - write_pc_pid (stop_pc - DECR_PC_AFTER_BREAK + 4, ecs->pid); + write_pc_pid (stop_pc - DECR_PC_AFTER_BREAK + 4, ecs->ptid); + /* We need to restart all the threads now, + * unles we're running in scheduler-locked mode. + * Use currently_stepping to determine whether to + * step or continue. + */ + /* FIXME MVS: is there any reason not to call resume()? */ + if (scheduler_mode == schedlock_on) + target_resume (ecs->ptid, + currently_stepping (ecs), + TARGET_SIGNAL_0); + else + target_resume (RESUME_ALL, + currently_stepping (ecs), + TARGET_SIGNAL_0); + prepare_to_wait (ecs); + return; } else { /* Single step */ - target_resume (ecs->pid, 1, TARGET_SIGNAL_0); - /* FIXME: What if a signal arrives instead of the - single-step happening? */ - - ecs->waiton_pid = ecs->pid; + breakpoints_inserted = 0; + if (!ptid_equal (inferior_ptid, ecs->ptid)) + context_switch (ecs); + ecs->waiton_ptid = ecs->ptid; ecs->wp = &(ecs->ws); + ecs->another_trap = 1; + ecs->infwait_state = infwait_thread_hop_state; - prepare_to_wait (ecs); + keep_going (ecs); + registers_changed (); return; } - - /* We need to restart all the threads now, - * unles we're running in scheduler-locked mode. - * FIXME: shouldn't we look at currently_stepping ()? - */ - if (scheduler_mode == schedlock_on) - target_resume (ecs->pid, 0, TARGET_SIGNAL_0); - else - target_resume (-1, 0, TARGET_SIGNAL_0); - prepare_to_wait (ecs); - return; - } - else - { - /* This breakpoint matches--either it is the right - thread or it's a generic breakpoint for all threads. - Remember that we'll need to step just _this_ thread - on any following user continuation! */ - thread_step_needed = 1; } } } @@ -1883,7 +1897,7 @@ handle_inferior_event (struct execution_control_state *ecs) Note that if there's any kind of pending follow (i.e., of a fork, vfork or exec), we don't want to do this now. Rather, we'll let the next resume handle it. */ - if ((ecs->pid != inferior_pid) && + if (! ptid_equal (ecs->ptid, inferior_ptid) && (pending_follow.kind == TARGET_WAITKIND_SPURIOUS)) { int printed = 0; @@ -1926,7 +1940,7 @@ handle_inferior_event (struct execution_control_state *ecs) if (signal_program[stop_signal] == 0) stop_signal = TARGET_SIGNAL_0; - target_resume (ecs->pid, 0, stop_signal); + target_resume (ecs->ptid, 0, stop_signal); prepare_to_wait (ecs); return; } @@ -1934,48 +1948,15 @@ handle_inferior_event (struct execution_control_state *ecs) /* It's a SIGTRAP or a signal we're interested in. Switch threads, and fall into the rest of wait_for_inferior(). */ - /* Caution: it may happen that the new thread (or the old one!) - is not in the thread list. In this case we must not attempt - to "switch context", or we run the risk that our context may - be lost. This may happen as a result of the target module - mishandling thread creation. */ - - if (in_thread_list (inferior_pid) && in_thread_list (ecs->pid)) - { /* Perform infrun state context switch: */ - /* Save infrun state for the old thread. */ - save_infrun_state (inferior_pid, prev_pc, - prev_func_start, prev_func_name, - trap_expected, step_resume_breakpoint, - through_sigtramp_breakpoint, - step_range_start, step_range_end, - step_frame_address, ecs->handling_longjmp, - ecs->another_trap, - ecs->stepping_through_solib_after_catch, - ecs->stepping_through_solib_catchpoints, - ecs->stepping_through_sigtramp); - - /* Load infrun state for the new thread. */ - load_infrun_state (ecs->pid, &prev_pc, - &prev_func_start, &prev_func_name, - &trap_expected, &step_resume_breakpoint, - &through_sigtramp_breakpoint, - &step_range_start, &step_range_end, - &step_frame_address, &ecs->handling_longjmp, - &ecs->another_trap, - &ecs->stepping_through_solib_after_catch, - &ecs->stepping_through_solib_catchpoints, - &ecs->stepping_through_sigtramp); - } - - inferior_pid = ecs->pid; + context_switch (ecs); if (context_hook) - context_hook (pid_to_thread_id (ecs->pid)); + context_hook (pid_to_thread_id (ecs->ptid)); flush_cached_frames (); } - if (SOFTWARE_SINGLE_STEP_P && singlestep_breakpoints_inserted_p) + if (SOFTWARE_SINGLE_STEP_P () && singlestep_breakpoints_inserted_p) { /* Pull the single step breakpoints out of the target. */ SOFTWARE_SINGLE_STEP (0, 0); @@ -1990,14 +1971,14 @@ handle_inferior_event (struct execution_control_state *ecs) if (INSTRUCTION_NULLIFIED) { registers_changed (); - target_resume (ecs->pid, 1, TARGET_SIGNAL_0); + target_resume (ecs->ptid, 1, TARGET_SIGNAL_0); /* We may have received a signal that we want to pass to the inferior; therefore, we must not clobber the waitstatus in WS. */ ecs->infwait_state = infwait_nullified_state; - ecs->waiton_pid = ecs->pid; + ecs->waiton_ptid = ecs->ptid; ecs->wp = &(ecs->tmpstatus); prepare_to_wait (ecs); return; @@ -2039,9 +2020,9 @@ handle_inferior_event (struct execution_control_state *ecs) remove_breakpoints (); registers_changed (); - target_resume (ecs->pid, 1, TARGET_SIGNAL_0); /* Single step */ + target_resume (ecs->ptid, 1, TARGET_SIGNAL_0); /* Single step */ - ecs->waiton_pid = ecs->pid; + ecs->waiton_ptid = ecs->ptid; ecs->wp = &(ecs->ws); ecs->infwait_state = infwait_nonstep_watch_state; prepare_to_wait (ecs); @@ -2313,8 +2294,7 @@ handle_inferior_event (struct execution_control_state *ecs) interferes with us */ if (step_resume_breakpoint != NULL) { - delete_breakpoint (step_resume_breakpoint); - step_resume_breakpoint = NULL; + delete_step_resume_breakpoint (&step_resume_breakpoint); } /* Not sure whether we need to blow this away too, but probably it is like the step-resume breakpoint. */ @@ -2360,7 +2340,6 @@ handle_inferior_event (struct execution_control_state *ecs) case BPSTAT_WHAT_SINGLE: if (breakpoints_inserted) { - thread_step_needed = 1; remove_breakpoints (); } breakpoints_inserted = 0; @@ -2412,8 +2391,7 @@ handle_inferior_event (struct execution_control_state *ecs) step_resume_breakpoint = bpstat_find_step_resume_breakpoint (stop_bpstat); } - delete_breakpoint (step_resume_breakpoint); - step_resume_breakpoint = NULL; + delete_step_resume_breakpoint (&step_resume_breakpoint); break; case BPSTAT_WHAT_THROUGH_SIGTRAMP: @@ -2527,7 +2505,7 @@ handle_inferior_event (struct execution_control_state *ecs) { #if defined(SOLIB_ADD) /* Have we reached our destination? If not, keep going. */ - if (SOLIB_IN_DYNAMIC_LINKER (ecs->pid, stop_pc)) + if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc)) { ecs->another_trap = 1; keep_going (ecs); @@ -2613,7 +2591,7 @@ handle_inferior_event (struct execution_control_state *ecs) loader dynamic symbol resolution code, we keep on single stepping until we exit the run time loader code and reach the callee's address. */ - if (step_over_calls < 0 && IN_SOLIB_DYNSYM_RESOLVE_CODE (stop_pc)) + if (step_over_calls == STEP_OVER_UNDEBUGGABLE && IN_SOLIB_DYNSYM_RESOLVE_CODE (stop_pc)) { CORE_ADDR pc_after_resolver = SKIP_SOLIB_RESOLVER (stop_pc); @@ -2734,7 +2712,7 @@ handle_inferior_event (struct execution_control_state *ecs) { /* It's a subroutine call. */ - if (step_over_calls == 0) + if (step_over_calls == STEP_OVER_NONE) { /* I presume that step_over_calls is only 0 when we're supposed to be stepping at the assembly language level @@ -2745,7 +2723,7 @@ handle_inferior_event (struct execution_control_state *ecs) return; } - if (step_over_calls > 0 || IGNORE_HELPER_CALL (stop_pc)) + if (step_over_calls == STEP_OVER_ALL || IGNORE_HELPER_CALL (stop_pc)) { /* We're doing a "next". */ @@ -2811,6 +2789,18 @@ handle_inferior_event (struct execution_control_state *ecs) return; } } + + /* If we have no line number and the step-stop-if-no-debug + is set, we stop the step so that the user has a chance to + switch in assembly mode. */ + if (step_over_calls == STEP_OVER_UNDEBUGGABLE && step_stop_if_no_debug) + { + stop_step = 1; + print_stop_reason (END_STEPPING_RANGE, 0); + stop_stepping (ecs); + return; + } + step_over_function (ecs); keep_going (ecs); return; @@ -3076,26 +3066,26 @@ stop_stepping (struct execution_control_state *ecs) { /* Are we stopping for a vfork event? We only stop when we see the child's event. However, we may not yet have seen the - parent's event. And, inferior_pid is still set to the + parent's event. And, inferior_ptid is still set to the parent's pid, until we resume again and follow either the parent or child. - To ensure that we can really touch inferior_pid (aka, the + To ensure that we can really touch inferior_ptid (aka, the parent process) -- which calls to functions like read_pc implicitly do -- wait on the parent if necessary. */ if ((pending_follow.kind == TARGET_WAITKIND_VFORKED) && !pending_follow.fork_event.saw_parent_fork) { - int parent_pid; + ptid_t parent_ptid; do { if (target_wait_hook) - parent_pid = target_wait_hook (-1, &(ecs->ws)); + parent_ptid = target_wait_hook (pid_to_ptid (-1), &(ecs->ws)); else - parent_pid = target_wait (-1, &(ecs->ws)); + parent_ptid = target_wait (pid_to_ptid (-1), &(ecs->ws)); } - while (parent_pid != inferior_pid); + while (! ptid_equal (parent_ptid, inferior_ptid)); } /* Assuming the inferior still exists, set these up for next @@ -3240,7 +3230,7 @@ prepare_to_wait (struct execution_control_state *ecs) as part of their normal status mechanism. */ registers_changed (); - ecs->waiton_pid = -1; + ecs->waiton_ptid = pid_to_ptid (-1); ecs->wp = &(ecs->ws); } /* This is the old end of the while loop. Let everybody know we @@ -3270,7 +3260,7 @@ print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info) /* Print a message only if not in the middle of doing a "step n" operation for n > 1 */ if (!step_multi || !stop_step) - if (interpreter_p && strcmp (interpreter_p, "mi") == 0) + if (interpreter_p && strncmp (interpreter_p, "mi", 2) == 0) ui_out_field_string (uiout, "reason", "end-stepping-range"); #endif break; @@ -3282,7 +3272,7 @@ print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info) /* The inferior was terminated by a signal. */ #ifdef UI_OUT annotate_signalled (); - if (interpreter_p && strcmp (interpreter_p, "mi") == 0) + if (interpreter_p && strncmp (interpreter_p, "mi", 2) == 0) ui_out_field_string (uiout, "reason", "exited-signalled"); ui_out_text (uiout, "\nProgram terminated with signal "); annotate_signal_name (); @@ -3316,7 +3306,7 @@ print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info) annotate_exited (stop_info); if (stop_info) { - if (interpreter_p && strcmp (interpreter_p, "mi") == 0) + if (interpreter_p && strncmp (interpreter_p, "mi", 2) == 0) ui_out_field_string (uiout, "reason", "exited"); ui_out_text (uiout, "\nProgram exited with code "); ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) stop_info); @@ -3324,7 +3314,7 @@ print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info) } else { - if (interpreter_p && strcmp (interpreter_p, "mi") == 0) + if (interpreter_p && strncmp (interpreter_p, "mi", 2) == 0) ui_out_field_string (uiout, "reason", "exited-normally"); ui_out_text (uiout, "\nProgram exited normally.\n"); } @@ -3366,7 +3356,8 @@ print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info) #endif break; default: - internal_error ("print_stop_reason: unrecognized enum value"); + internal_error (__FILE__, __LINE__, + "print_stop_reason: unrecognized enum value"); break; } } @@ -3389,13 +3380,13 @@ normal_stop (void) (Note that there's no point in saying anything if the inferior has exited!) */ - if ((previous_inferior_pid != inferior_pid) + if (! ptid_equal (previous_inferior_ptid, inferior_ptid) && target_has_execution) { target_terminal_ours_for_output (); printf_filtered ("[Switching to %s]\n", - target_pid_or_tid_to_str (inferior_pid)); - previous_inferior_pid = inferior_pid; + target_pid_or_tid_to_str (inferior_ptid)); + previous_inferior_ptid = inferior_ptid; } /* Make sure that the current_frame's pc is correct. This @@ -3447,9 +3438,9 @@ and/or watchpoints.\n"); /* Look up the hook_stop and run it if it exists. */ - if (stop_command && stop_command->hook) + if (stop_command && stop_command->hook_pre) { - catch_errors (hook_stop_stub, stop_command->hook, + catch_errors (hook_stop_stub, stop_command->hook_pre, "Error while running hook_stop:\n", RETURN_MASK_ALL); } @@ -3499,21 +3490,24 @@ and/or watchpoints.\n"); source_flag = SRC_LINE; break; case PRINT_NOTHING: + source_flag = SRC_LINE; /* something bogus */ do_frame_printing = 0; break; default: - internal_error ("Unknown value."); + internal_error (__FILE__, __LINE__, + "Unknown value."); } #ifdef UI_OUT /* For mi, have the same behavior every time we stop: print everything but the source line. */ - if (interpreter_p && strcmp (interpreter_p, "mi") == 0) + if (interpreter_p && strncmp (interpreter_p, "mi", 2) == 0) source_flag = LOC_AND_ADDRESS; #endif #ifdef UI_OUT - if (interpreter_p && strcmp (interpreter_p, "mi") == 0) - ui_out_field_int (uiout, "thread-id", pid_to_thread_id (inferior_pid)); + if (interpreter_p && strncmp (interpreter_p, "mi", 2) == 0) + ui_out_field_int (uiout, "thread-id", + pid_to_thread_id (inferior_ptid)); #endif /* The behavior of this routine with respect to the source flag is: @@ -3796,7 +3790,7 @@ Are you sure you want to change it? ", argv++; } - target_notice_signals (inferior_pid); + target_notice_signals (inferior_ptid); if (from_tty) { @@ -3875,7 +3869,7 @@ xdb_handle_command (char *args, int from_tty) else printf_filtered ("Invalid signal handling flag.\n"); if (argBuf) - free (argBuf); + xfree (argBuf); } } do_cleanups (old_chain); @@ -3900,7 +3894,7 @@ signals_info (char *signum_exp, int from_tty) { /* No, try numeric. */ oursig = - target_signal_from_command (parse_and_eval_address (signum_exp)); + target_signal_from_command (parse_and_eval_long (signum_exp)); } sig_print_info (oursig); return; @@ -3935,7 +3929,7 @@ struct inferior_status CORE_ADDR step_range_start; CORE_ADDR step_range_end; CORE_ADDR step_frame_address; - int step_over_calls; + enum step_over_calls_kind step_over_calls; CORE_ADDR step_resume_break_address; int stop_after_trap; int stop_soon_quietly; @@ -3966,9 +3960,9 @@ xmalloc_inferior_status (void) static void free_inferior_status (struct inferior_status *inf_status) { - free (inf_status->registers); - free (inf_status->stop_registers); - free (inf_status); + xfree (inf_status->registers); + xfree (inf_status->stop_registers); + xfree (inf_status); } void @@ -4135,6 +4129,90 @@ discard_inferior_status (struct inferior_status *inf_status) free_inferior_status (inf_status); } +/* Oft used ptids */ +ptid_t null_ptid; +ptid_t minus_one_ptid; + +/* Create a ptid given the necessary PID, LWP, and TID components. */ + +ptid_t +ptid_build (int pid, long lwp, long tid) +{ + ptid_t ptid; + + ptid.pid = pid; + ptid.lwp = lwp; + ptid.tid = tid; + return ptid; +} + +/* Create a ptid from just a pid. */ + +ptid_t +pid_to_ptid (int pid) +{ + return ptid_build (pid, 0, 0); +} + +/* Fetch the pid (process id) component from a ptid. */ + +int +ptid_get_pid (ptid_t ptid) +{ + return ptid.pid; +} + +/* Fetch the lwp (lightweight process) component from a ptid. */ + +long +ptid_get_lwp (ptid_t ptid) +{ + return ptid.lwp; +} + +/* Fetch the tid (thread id) component from a ptid. */ + +long +ptid_get_tid (ptid_t ptid) +{ + return ptid.tid; +} + +/* ptid_equal() is used to test equality of two ptids. */ + +int +ptid_equal (ptid_t ptid1, ptid_t ptid2) +{ + return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp + && ptid1.tid == ptid2.tid); +} + +/* restore_inferior_ptid() will be used by the cleanup machinery + to restore the inferior_ptid value saved in a call to + save_inferior_ptid(). */ + +static void +restore_inferior_ptid (void *arg) +{ + ptid_t *saved_ptid_ptr = arg; + inferior_ptid = *saved_ptid_ptr; + xfree (arg); +} + +/* Save the value of inferior_ptid so that it may be restored by a + later call to do_cleanups(). Returns the struct cleanup pointer + needed for later doing the cleanup. */ + +struct cleanup * +save_inferior_ptid (void) +{ + ptid_t *saved_ptid_ptr; + + saved_ptid_ptr = xmalloc (sizeof (ptid_t)); + *saved_ptid_ptr = inferior_ptid; + return make_cleanup (restore_inferior_ptid, saved_ptid_ptr); +} + static void build_infrun (void) @@ -4310,4 +4388,19 @@ step == scheduler locked during every single-step operation.\n\ c->function.sfunc = set_schedlock_func; /* traps on target vector */ add_show_from_set (c, &showlist); + + c = add_set_cmd ("step-mode", class_run, + var_boolean, (char*) &step_stop_if_no_debug, +"Set mode of the step operation. When set, doing a step over a\n\ +function without debug line information will stop at the first\n\ +instruction of that function. Otherwise, the function is skipped and\n\ +the step command stops at a different source line.", + &setlist); + add_show_from_set (c, &showlist); + + /* ptid initializations */ + null_ptid = ptid_build (0, 0, 0); + minus_one_ptid = ptid_build (-1, 0, 0); + inferior_ptid = null_ptid; + target_last_wait_ptid = minus_one_ptid; }