jump pads). */
static int stabilizing_threads;
-static void linux_resume_one_lwp (struct lwp_info *lwp,
- int step, int signal, siginfo_t *info);
static void unsuspend_all_lwps (struct lwp_info *except);
static struct lwp_info *add_lwp (ptid_t ptid);
static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
static int linux_low_ptrace_options (int attached);
static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
-static void proceed_one_lwp (thread_info *thread, lwp_info *except);
/* When the event-loop is doing a step-over, this points at the thread
being stepped. */
return 0;
}
-/* True if the low target can software single-step. Such targets
- implement the GET_NEXT_PCS callback. */
+bool
+linux_process_target::low_supports_breakpoints ()
+{
+ return false;
+}
-static int
-can_software_single_step (void)
+CORE_ADDR
+linux_process_target::low_get_pc (regcache *regcache)
+{
+ return 0;
+}
+
+void
+linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
{
- return (the_low_target.get_next_pcs != NULL);
+ gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
}
-/* True if the low target supports memory breakpoints. If so, we'll
- have a GET_PC implementation. */
+std::vector<CORE_ADDR>
+linux_process_target::low_get_next_pcs (regcache *regcache)
+{
+ gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
+ "implemented");
+}
-static int
-supports_breakpoints (void)
+int
+linux_process_target::low_decr_pc_after_break ()
{
- return (the_low_target.get_pc != NULL);
+ return 0;
}
/* Returns true if this target can support fast tracepoints. This
return proc;
}
-static CORE_ADDR get_pc (struct lwp_info *lwp);
-
void
linux_process_target::arch_setup_thread (thread_info *thread)
{
child_proc->attached = parent_proc->attached;
if (event_lwp->bp_reinsert != 0
- && can_software_single_step ()
+ && supports_software_single_step ()
&& event == PTRACE_EVENT_VFORK)
{
/* If we leave single-step breakpoints there, child will
In case of vfork, we'll reinsert them back once vforked
child is done. */
if (event_lwp->bp_reinsert != 0
- && can_software_single_step ())
+ && supports_software_single_step ())
{
/* The child process is forked and stopped, so it is safe
to access its memory without stopping all other threads
new_lwp = add_lwp (ptid);
/* Either we're going to immediately resume the new thread
- or leave it stopped. linux_resume_one_lwp is a nop if it
+ or leave it stopped. resume_one_lwp is a nop if it
thinks the thread is currently running, so set this first
- before calling linux_resume_one_lwp. */
+ before calling resume_one_lwp. */
new_lwp->stopped = 1;
/* If we're suspending all threads, leave this one suspended
{
event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
- if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
+ if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
{
reinsert_single_step_breakpoints (event_thr);
internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
}
-/* Return the PC as read from the regcache of LWP, without any
- adjustment. */
-
-static CORE_ADDR
-get_pc (struct lwp_info *lwp)
+CORE_ADDR
+linux_process_target::get_pc (lwp_info *lwp)
{
struct thread_info *saved_thread;
struct regcache *regcache;
CORE_ADDR pc;
- if (the_low_target.get_pc == NULL)
+ if (!low_supports_breakpoints ())
return 0;
saved_thread = current_thread;
current_thread = get_lwp_thread (lwp);
regcache = get_thread_regcache (current_thread, 1);
- pc = (*the_low_target.get_pc) (regcache);
+ pc = low_get_pc (regcache);
if (debug_threads)
debug_printf ("pc is 0x%lx\n", (long) pc);
current_thread = saved_thread;
}
-static int check_stopped_by_watchpoint (struct lwp_info *child);
-
-/* Called when the LWP stopped for a signal/trap. If it stopped for a
- trap check what caused it (breakpoint, watchpoint, trace, etc.),
- and save the result in the LWP's stop_reason field. If it stopped
- for a breakpoint, decrement the PC if necessary on the lwp's
- architecture. Returns true if we now have the LWP's stop PC. */
-
-static int
-save_stop_reason (struct lwp_info *lwp)
+bool
+linux_process_target::save_stop_reason (lwp_info *lwp)
{
CORE_ADDR pc;
CORE_ADDR sw_breakpoint_pc;
siginfo_t siginfo;
#endif
- if (the_low_target.get_pc == NULL)
- return 0;
+ if (!low_supports_breakpoints ())
+ return false;
pc = get_pc (lwp);
- sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
+ sw_breakpoint_pc = pc - low_decr_pc_after_break ();
/* breakpoint_at reads from the current thread. */
saved_thread = current_thread;
then the user inserts a breakpoint inside the range. In that
case we need to report the breakpoint PC. */
if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
- && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
+ && low_breakpoint_at (sw_breakpoint_pc))
lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
if (hardware_breakpoint_inserted_here (pc))
{
struct regcache *regcache
= get_thread_regcache (current_thread, 1);
- (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
+ low_set_pc (regcache, sw_breakpoint_pc);
}
/* Update this so we record the correct stop PC below. */
lwp->stop_pc = pc;
current_thread = saved_thread;
- return 1;
+ return true;
}
static struct lwp_info *
return 0;
}
-/* Return 1 if this lwp still has an interesting status pending. If
- not (e.g., it had stopped for a breakpoint that is gone), return
- false. */
-
-static int
-thread_still_has_status_pending_p (struct thread_info *thread)
+bool
+linux_process_target::thread_still_has_status_pending (thread_info *thread)
{
struct lwp_info *lp = get_thread_lwp (thread);
#if !USE_SIGTRAP_SIGINFO
else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
- && !(*the_low_target.breakpoint_at) (pc))
+ && !low_breakpoint_at (pc))
{
if (debug_threads)
debug_printf ("previous SW breakpoint of %ld gone\n",
return 0;
}
-/* Return true if this lwp has an interesting status pending. */
-static bool
-status_pending_p_callback (thread_info *thread, ptid_t ptid)
+bool
+linux_process_target::status_pending_p_callback (thread_info *thread,
+ ptid_t ptid)
{
struct lwp_info *lp = get_thread_lwp (thread);
return 0;
if (lp->status_pending_p
- && !thread_still_has_status_pending_p (thread))
+ && !thread_still_has_status_pending (thread))
{
- linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
+ resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
return 0;
}
}
regcache = get_thread_regcache (current_thread, 1);
- (*the_low_target.set_pc) (regcache, status.tpoint_addr);
+ low_set_pc (regcache, status.tpoint_addr);
lwp->stop_pc = status.tpoint_addr;
/* Cancel any fast tracepoint lock this thread was
return 0;
}
-/* Fetch the possibly triggered data watchpoint info and store it in
- CHILD.
-
- On some archs, like x86, that use debug registers to set
- watchpoints, it's possible that the way to know which watched
- address trapped, is to check the register that is used to select
- which address to watch. Problem is, between setting the watchpoint
- and reading back which data address trapped, the user may change
- the set of watchpoints, and, as a consequence, GDB changes the
- debug registers in the inferior. To avoid reading back a stale
- stopped-data-address when that happens, we cache in LP the fact
- that a watchpoint trapped, and the corresponding data address, as
- soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
- registers meanwhile, we have the cached data we can rely on. */
-
-static int
-check_stopped_by_watchpoint (struct lwp_info *child)
+bool
+linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
{
- if (the_low_target.stopped_by_watchpoint != NULL)
- {
- struct thread_info *saved_thread;
+ struct thread_info *saved_thread = current_thread;
+ current_thread = get_lwp_thread (child);
- saved_thread = current_thread;
- current_thread = get_lwp_thread (child);
+ if (low_stopped_by_watchpoint ())
+ {
+ child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
+ child->stopped_data_address = low_stopped_data_address ();
+ }
- if (the_low_target.stopped_by_watchpoint ())
- {
- child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
+ current_thread = saved_thread;
- if (the_low_target.stopped_data_address != NULL)
- child->stopped_data_address
- = the_low_target.stopped_data_address ();
- else
- child->stopped_data_address = 0;
- }
+ return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
+}
- current_thread = saved_thread;
- }
+bool
+linux_process_target::low_stopped_by_watchpoint ()
+{
+ return false;
+}
- return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
+CORE_ADDR
+linux_process_target::low_stopped_data_address ()
+{
+ return 0;
}
/* Return the ptrace options that we want to try to enable. */
child->stepping ? "step" : "continue",
target_pid_to_str (ptid_of (thread)));
- linux_resume_one_lwp (child, child->stepping, 0, NULL);
+ resume_one_lwp (child, child->stepping, 0, NULL);
return NULL;
}
}
}
}
-/* Resume LWPs that are currently stopped without any pending status
- to report, but are resumed from the core's perspective. */
-
-static void
-resume_stopped_resumed_lwps (thread_info *thread)
+void
+linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
{
struct lwp_info *lp = get_thread_lwp (thread);
paddress (lp->stop_pc),
step);
- linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
+ resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
}
}
&requested_child->status_pending);
requested_child->status_pending_p = 0;
requested_child->status_pending = 0;
- linux_resume_one_lwp (requested_child, 0, 0, NULL);
+ resume_one_lwp (requested_child, 0, 0, NULL);
}
if (requested_child->suspended
/* Now that we've pulled all events out of the kernel, resume
LWPs that don't have an interesting event to report. */
if (stopping_threads == NOT_STOPPING_THREADS)
- for_each_thread (resume_stopped_resumed_lwps);
+ for_each_thread ([this] (thread_info *thread)
+ {
+ resume_stopped_resumed_lwps (thread);
+ });
/* ... and find an LWP with a status to report to the core, if
any. */
= get_thread_regcache (current_thread, 1);
event_child->stop_pc += increment_pc;
- (*the_low_target.set_pc) (regcache, event_child->stop_pc);
+ low_set_pc (regcache, event_child->stop_pc);
- if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
+ if (!low_breakpoint_at (event_child->stop_pc))
event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
}
}
not support internal breakpoints at all, we also report the
SIGTRAP without further processing; it's of no concern to us. */
maybe_internal_trap
- = (supports_breakpoints ()
+ = (low_supports_breakpoints ()
&& (WSTOPSIG (w) == SIGTRAP
|| ((WSTOPSIG (w) == SIGILL
|| WSTOPSIG (w) == SIGSEGV)
- && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
+ && low_breakpoint_at (event_child->stop_pc))));
if (maybe_internal_trap)
{
debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
WSTOPSIG (w), lwpid_of (current_thread));
- linux_resume_one_lwp (event_child, 0, 0, NULL);
+ resume_one_lwp (event_child, 0, 0, NULL);
if (debug_threads)
debug_exit ();
lwpid_of (current_thread));
}
- linux_resume_one_lwp (event_child, event_child->stepping,
- 0, NULL);
+ resume_one_lwp (event_child, event_child->stepping, 0, NULL);
if (debug_threads)
debug_exit ();
}
else
{
- linux_resume_one_lwp (event_child, event_child->stepping,
- WSTOPSIG (w), info_p);
+ resume_one_lwp (event_child, event_child->stepping,
+ WSTOPSIG (w), info_p);
}
if (debug_threads)
decr_pc_after_break adjustment to the inferior's regcache
ourselves. */
- if (the_low_target.set_pc != NULL)
+ if (low_supports_breakpoints ())
{
struct regcache *regcache
= get_thread_regcache (current_thread, 1);
- (*the_low_target.set_pc) (regcache, event_child->stop_pc);
+ low_set_pc (regcache, event_child->stop_pc);
}
if (step_over_finished)
/* Remove the single-step breakpoints if any. Note that
there isn't single-step breakpoint if we finished stepping
over. */
- if (can_software_single_step ()
+ if (supports_software_single_step ()
&& has_single_step_breakpoints (current_thread))
{
stop_all_lwps (0, event_child);
/* Alright, we're going to report a stop. */
/* Remove single-step breakpoints. */
- if (can_software_single_step ())
+ if (supports_software_single_step ())
{
/* Remove single-step breakpoints or not. It it is true, stop all
lwps, so that other threads won't hit the breakpoint in the
if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
&& !cs.swbreak_feature)
{
- int decr_pc = the_low_target.decr_pc_after_break;
+ int decr_pc = low_decr_pc_after_break ();
if (decr_pc != 0)
{
struct regcache *regcache
= get_thread_regcache (current_thread, 1);
- (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
+ low_set_pc (regcache, event_child->stop_pc + decr_pc);
}
}
WSTOPSIG (*wstat), lwpid_of (thread));
}
- linux_resume_one_lwp (lwp, 0, 0, NULL);
+ resume_one_lwp (lwp, 0, 0, NULL);
}
else
lwp_suspended_inc (lwp);
lwp->pending_signals = p_sig;
}
-/* Install breakpoints for software single stepping. */
-
-static void
-install_software_single_step_breakpoints (struct lwp_info *lwp)
+void
+linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
{
struct thread_info *thread = get_lwp_thread (lwp);
struct regcache *regcache = get_thread_regcache (thread, 1);
scoped_restore save_current_thread = make_scoped_restore (¤t_thread);
current_thread = thread;
- std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
+ std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
for (CORE_ADDR pc : next_pcs)
set_single_step_breakpoint (pc, current_ptid);
}
-/* Single step via hardware or software single step.
- Return 1 if hardware single stepping, 0 if software single stepping
- or can't single step. */
-
-static int
-single_step (struct lwp_info* lwp)
+int
+linux_process_target::single_step (lwp_info* lwp)
{
int step = 0;
{
step = 1;
}
- else if (can_software_single_step ())
+ else if (supports_software_single_step ())
{
install_software_single_step_breakpoints (lwp);
step = 0;
== fast_tpoint_collect_result::not_collecting);
}
-/* Resume execution of LWP. If STEP is nonzero, single-step it. If
- SIGNAL is nonzero, give it that signal. */
-
-static void
-linux_resume_one_lwp_throw (struct lwp_info *lwp,
- int step, int signal, siginfo_t *info)
+void
+linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
+ int signal, siginfo_t *info)
{
struct thread_info *thread = get_lwp_thread (lwp);
struct thread_info *saved_thread;
step = single_step (lwp);
}
- if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
+ if (proc->tdesc != NULL && low_supports_breakpoints ())
{
struct regcache *regcache = get_thread_regcache (current_thread, 1);
- lwp->stop_pc = (*the_low_target.get_pc) (regcache);
+ lwp->stop_pc = low_get_pc (regcache);
if (debug_threads)
{
return 0;
}
-/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
- disappears while we try to resume it. */
-
-static void
-linux_resume_one_lwp (struct lwp_info *lwp,
- int step, int signal, siginfo_t *info)
+void
+linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
+ siginfo_t *info)
{
try
{
- linux_resume_one_lwp_throw (lwp, step, signal, info);
+ resume_one_lwp_throw (lwp, step, signal, info);
}
catch (const gdb_exception_error &ex)
{
lwp->resume = NULL;
}
-/* find_thread callback for linux_resume. Return true if this lwp has an
- interesting status pending. */
-
-static bool
-resume_status_pending_p (thread_info *thread)
+bool
+linux_process_target::resume_status_pending (thread_info *thread)
{
struct lwp_info *lwp = get_thread_lwp (thread);
if (lwp->resume == NULL)
return false;
- return thread_still_has_status_pending_p (thread);
+ return thread_still_has_status_pending (thread);
}
-/* Return 1 if this lwp that GDB wants running is stopped at an
- internal breakpoint that we need to step over. It assumes that any
- required STOP_PC adjustment has already been propagated to the
- inferior's regcache. */
-
-static bool
-need_step_over_p (thread_info *thread)
+bool
+linux_process_target::thread_needs_step_over (thread_info *thread)
{
struct lwp_info *lwp = get_thread_lwp (thread);
struct thread_info *saved_thread;
/* On software single step target, resume the inferior with signal
rather than stepping over. */
- if (can_software_single_step ()
+ if (supports_software_single_step ()
&& lwp->pending_signals != NULL
&& lwp_signal_can_be_delivered (lwp))
{
current_thread = saved_thread;
- linux_resume_one_lwp (lwp, step, 0, NULL);
+ resume_one_lwp (lwp, step, 0, NULL);
/* Require next event from this LWP. */
step_over_bkpt = thread->id;
}
}
-/* This function is called once per thread. We check the thread's resume
- request, which will tell us whether to resume, step, or leave the thread
- stopped; and what signal, if any, it should be sent.
-
- For threads which we aren't explicitly told otherwise, we preserve
- the stepping flag; this is used for stepping over gdbserver-placed
- breakpoints.
-
- If pending_flags was set in any thread, we queue any needed
- signals, since we won't actually resume. We already have a pending
- event to report, so we don't need to preserve any step requests;
- they should be re-issued if necessary. */
-
-static void
-linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
+void
+linux_process_target::resume_one_thread (thread_info *thread,
+ bool leave_all_stopped)
{
struct lwp_info *lwp = get_thread_lwp (thread);
int leave_pending;
before considering to start a step-over (in all-stop). */
bool any_pending = false;
if (!non_stop)
- any_pending = find_thread (resume_status_pending_p) != NULL;
+ any_pending = find_thread ([this] (thread_info *thread)
+ {
+ return resume_status_pending (thread);
+ }) != nullptr;
/* If there is a thread which would otherwise be resumed, which is
stopped at a breakpoint that needs stepping over, then don't
other threads stopped, then resume all threads again. Make sure
to queue any signals that would otherwise be delivered or
queued. */
- if (!any_pending && supports_breakpoints ())
- need_step_over = find_thread (need_step_over_p);
+ if (!any_pending && low_supports_breakpoints ())
+ need_step_over = find_thread ([this] (thread_info *thread)
+ {
+ return thread_needs_step_over (thread);
+ });
bool leave_all_stopped = (need_step_over != NULL || any_pending);
otherwise deliver. */
for_each_thread ([&] (thread_info *thread)
{
- linux_resume_one_thread (thread, leave_all_stopped);
+ resume_one_thread (thread, leave_all_stopped);
});
if (need_step_over)
async_file_mark ();
}
-/* This function is called once per thread. We check the thread's
- last resume request, which will tell us whether to resume, step, or
- leave the thread stopped. Any signal the client requested to be
- delivered has already been enqueued at this point.
-
- If any thread that GDB wants running is stopped at an internal
- breakpoint that needs stepping over, we start a step-over operation
- on that particular thread, and leave all others stopped. */
-
-static void
-proceed_one_lwp (thread_info *thread, lwp_info *except)
+void
+linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
{
struct lwp_info *lwp = get_thread_lwp (thread);
int step;
/* If resume_step is requested by GDB, install single-step
breakpoints when the thread is about to be actually resumed if
the single-step breakpoints weren't removed. */
- if (can_software_single_step ()
+ if (supports_software_single_step ()
&& !has_single_step_breakpoints (thread))
install_software_single_step_breakpoints (lwp);
else
step = 0;
- linux_resume_one_lwp (lwp, step, 0, NULL);
+ resume_one_lwp (lwp, step, 0, NULL);
}
-static void
-unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
+void
+linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
+ lwp_info *except)
{
struct lwp_info *lwp = get_thread_lwp (thread);
resume any threads - have it step over the breakpoint with all
other threads stopped, then resume all threads again. */
- if (supports_breakpoints ())
+ if (low_supports_breakpoints ())
{
- need_step_over = find_thread (need_step_over_p);
+ need_step_over = find_thread ([this] (thread_info *thread)
+ {
+ return thread_needs_step_over (thread);
+ });
if (need_step_over != NULL)
{
if (debug_threads)
debug_printf ("Proceeding, no step-over needed\n");
- for_each_thread ([] (thread_info *thread)
+ for_each_thread ([this] (thread_info *thread)
{
proceed_one_lwp (thread, NULL);
});
/* At this point, ESRCH should mean the process is
already gone, in which case we simply ignore attempts
to change its registers. See also the related
- comment in linux_resume_one_lwp. */
+ comment in resume_one_lwp. */
free (buf);
return 0;
}
}
}
- if (the_low_target.supply_ptrace_register)
- the_low_target.supply_ptrace_register (regcache, regno, buf);
- else
- supply_register (regcache, regno, buf);
+ low_supply_ptrace_register (regcache, regno, buf);
}
void
buf = (char *) alloca (size);
memset (buf, 0, size);
- if (the_low_target.collect_ptrace_register)
- the_low_target.collect_ptrace_register (regcache, regno, buf);
- else
- collect_register (regcache, regno, buf);
+ low_collect_ptrace_register (regcache, regno, buf);
pid = lwpid_of (current_thread);
for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
/* At this point, ESRCH should mean the process is
already gone, in which case we simply ignore attempts
to change its registers. See also the related
- comment in linux_resume_one_lwp. */
+ comment in resume_one_lwp. */
if (errno == ESRCH)
return;
}
#endif /* HAVE_LINUX_USRREGS */
+void
+linux_process_target::low_collect_ptrace_register (regcache *regcache,
+ int regno, char *buf)
+{
+ collect_register (regcache, regno, buf);
+}
+
+void
+linux_process_target::low_supply_ptrace_register (regcache *regcache,
+ int regno, const char *buf)
+{
+ supply_register (regcache, regno, buf);
+}
+
void
linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
regcache *regcache,
return n;
}
-/* These breakpoint and watchpoint related wrapper functions simply
- pass on the function call if the target has registered a
- corresponding function. */
-
-bool
-linux_process_target::supports_z_point_type (char z_type)
-{
- return (the_low_target.supports_z_point_type != NULL
- && the_low_target.supports_z_point_type (z_type));
-}
-
int
linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
int size, raw_breakpoint *bp)
{
if (type == raw_bkpt_type_sw)
return insert_memory_breakpoint (bp);
- else if (the_low_target.insert_point != NULL)
- return the_low_target.insert_point (type, addr, size, bp);
else
- /* Unsupported (see target.h). */
- return 1;
+ return low_insert_point (type, addr, size, bp);
+}
+
+int
+linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
+ int size, raw_breakpoint *bp)
+{
+ /* Unsupported (see target.h). */
+ return 1;
}
int
{
if (type == raw_bkpt_type_sw)
return remove_memory_breakpoint (bp);
- else if (the_low_target.remove_point != NULL)
- return the_low_target.remove_point (type, addr, size, bp);
else
- /* Unsupported (see target.h). */
- return 1;
+ return low_remove_point (type, addr, size, bp);
+}
+
+int
+linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
+ int size, raw_breakpoint *bp)
+{
+ /* Unsupported (see target.h). */
+ return 1;
}
/* Implement the stopped_by_sw_breakpoint target_ops
return can_hardware_single_step ();
}
-bool
-linux_process_target::supports_software_single_step ()
-{
- return can_software_single_step ();
-}
-
bool
linux_process_target::stopped_by_watchpoint ()
{
bool
linux_process_target::supports_range_stepping ()
{
- if (can_software_single_step ())
+ if (supports_software_single_step ())
return true;
if (*the_low_target.supports_range_stepping == NULL)
return false;
CORE_ADDR
linux_process_target::read_pc (regcache *regcache)
{
- if (the_low_target.get_pc == NULL)
+ if (!low_supports_breakpoints ())
return 0;
- return (*the_low_target.get_pc) (regcache);
+ return low_get_pc (regcache);
}
void
linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
{
- gdb_assert (the_low_target.set_pc != NULL);
+ gdb_assert (low_supports_breakpoints ());
- (*the_low_target.set_pc) (regcache, pc);
+ low_set_pc (regcache, pc);
}
bool
return ptid_of (current_thread);
}
-/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
-
-int
-linux_process_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
-{
- if (the_low_target.breakpoint_kind_from_pc != NULL)
- return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
- else
- return process_stratum_target::breakpoint_kind_from_pc (pcptr);
-}
-
-/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
-
-const gdb_byte *
-linux_process_target::sw_breakpoint_from_kind (int kind, int *size)
-{
- gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
-
- return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
-}
-
-/* Implementation of the target_ops method
- "breakpoint_kind_from_current_state". */
-
-int
-linux_process_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
-{
- if (the_low_target.breakpoint_kind_from_current_state != NULL)
- return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
- else
- return breakpoint_kind_from_pc (pcptr);
-}
-
const char *
linux_process_target::thread_name (ptid_t thread)
{