lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
+ lwp->thread = add_thread (ptid, lwp);
+
if (the_low_target.new_thread != NULL)
the_low_target.new_thread (lwp);
- lwp->thread = add_thread (ptid, lwp);
-
return lwp;
}
{
bool seen_one = false;
- thread_info *thread = find_thread (pid, [&] (thread_info *thread)
+ thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
{
if (!seen_one)
{
}
static int
-linux_kill (int pid)
+linux_kill (process_info *process)
{
- struct process_info *process;
- struct lwp_info *lwp;
-
- process = find_process_pid (pid);
- if (process == NULL)
- return -1;
+ int pid = process->pid;
/* If we're killing a running inferior, make sure it is stopped
first, as PTRACE_KILL will not work otherwise. */
/* See the comment in linux_kill_one_lwp. We did not kill the first
thread in the list, so do so now. */
- lwp = find_lwp_pid (ptid_t (pid));
+ lwp_info *lwp = find_lwp_pid (ptid_t (pid));
if (lwp == NULL)
{
}
static int
-linux_detach (int pid)
+linux_detach (process_info *process)
{
- struct process_info *process;
struct lwp_info *main_lwp;
- process = find_process_pid (pid);
- if (process == NULL)
- return -1;
-
/* As there's a step over already in progress, let it finish first,
otherwise nesting a stabilize_threads operation on top gets real
messy. */
/* Detach from the clone lwps first. If the thread group exits just
while we're detaching, we must reap the clone lwps before we're
able to reap the leader. */
- for_each_thread (pid, linux_detach_lwp_callback);
+ for_each_thread (process->pid, linux_detach_lwp_callback);
- main_lwp = find_lwp_pid (ptid_t (pid));
+ main_lwp = find_lwp_pid (ptid_t (process->pid));
linux_detach_one_lwp (main_lwp);
the_target->mourn (process);
struct lwp_info *
find_lwp_pid (ptid_t ptid)
{
- thread_info *thread = find_thread ([&] (thread_info *thread)
+ thread_info *thread = find_thread ([&] (thread_info *thr_arg)
{
int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
- return thread->id.lwp () == lwp;
+ return thr_arg->id.lwp () == lwp;
});
if (thread == NULL)
iterate_over_lwps_ftype callback,
void *data)
{
- thread_info *thread = find_thread (filter, [&] (thread_info *thread)
+ thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
{
- lwp_info *lwp = get_thread_lwp (thread);
+ lwp_info *lwp = get_thread_lwp (thr_arg);
return callback (lwp, data);
});
/* Check for a lwp with a pending status. */
- if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
+ if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
{
event_thread = find_thread_in_random ([&] (thread_info *thread)
{
if (debug_threads && event_thread)
debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
}
- else if (!ptid_equal (filter_ptid, null_ptid))
+ else if (filter_ptid != null_ptid)
{
requested_child = find_lwp_pid (filter_ptid);
else
any_resumed = 0;
- if (ptid_equal (step_over_bkpt, null_ptid))
+ if (step_over_bkpt == null_ptid)
pid = linux_wait_for_event (ptid, &w, options);
else
{
if it's not the single_step_breakpoint we are hitting.
This avoids that a program would keep trapping a permanent breakpoint
forever. */
- if (!ptid_equal (step_over_bkpt, null_ptid)
+ if (step_over_bkpt != null_ptid
&& event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
&& (event_child->stepping
|| !single_step_breakpoint_inserted_here (event_child->stop_pc)))
from among those that have had events. Giving equal priority
to all LWPs that have had events helps prevent
starvation. */
- if (ptid_equal (ptid, minus_one_ptid))
+ if (ptid == minus_one_ptid)
{
event_child->status_pending_p = 1;
event_child->status_pending = w;
ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
}
- gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
+ gdb_assert (step_over_bkpt == null_ptid);
if (debug_threads)
{
event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
}
while ((target_options & TARGET_WNOHANG) == 0
- && ptid_equal (event_ptid, null_ptid)
+ && event_ptid == null_ptid
&& ourstatus->kind == TARGET_WAITKIND_IGNORE);
/* If at least one stop was reported, there may be more. A single
SIGCHLD can signal more than one child stop. */
if (target_is_async_p ()
&& (target_options & TARGET_WNOHANG) != 0
- && !ptid_equal (event_ptid, null_ptid))
+ && event_ptid != null_ptid)
async_file_mark ();
return event_ptid;
for (int ndx = 0; ndx < n; ndx++)
{
ptid_t ptid = resume[ndx].thread;
- if (ptid_equal (ptid, minus_one_ptid)
+ if (ptid == minus_one_ptid
|| ptid == thread->id
/* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
of PID'. */
|| (ptid.pid () == pid_of (thread)
- && (ptid_is_pid (ptid)
+ && (ptid.is_pid ()
|| ptid.lwp () == -1)))
{
if (resume[ndx].kind == resume_stop
static void
complete_ongoing_step_over (void)
{
- if (!ptid_equal (step_over_bkpt, null_ptid))
+ if (step_over_bkpt != null_ptid)
{
struct lwp_info *lwp;
int wstat;
{
const char *sep;
CORE_ADDR *addrp;
- int len;
+ int name_len;
sep = strchr (annex, '=');
if (sep == NULL)
break;
- len = sep - annex;
- if (len == 5 && startswith (annex, "start"))
+ name_len = sep - annex;
+ if (name_len == 5 && startswith (annex, "start"))
addrp = &lm_addr;
- else if (len == 4 && startswith (annex, "prev"))
+ else if (name_len == 4 && startswith (annex, "prev"))
addrp = &lm_prev;
else
{
set_target_ops (&linux_target_ops);
linux_ptrace_init_warnings ();
+ linux_proc_init_warnings ();
sigchld_action.sa_handler = sigchld_handler;
sigemptyset (&sigchld_action.sa_mask);