[OBV] gdb/rs6000: Fix maybe-uninitialized warning.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
... / ...
CommitLineData
1/* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19#include "server.h"
20#include "linux-low.h"
21#include "nat/linux-osdata.h"
22#include "agent.h"
23#include "tdesc.h"
24#include "rsp-low.h"
25
26#include "nat/linux-nat.h"
27#include "nat/linux-waitpid.h"
28#include "gdb_wait.h"
29#include "nat/gdb_ptrace.h"
30#include "nat/linux-ptrace.h"
31#include "nat/linux-procfs.h"
32#include "nat/linux-personality.h"
33#include <signal.h>
34#include <sys/ioctl.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <sys/syscall.h>
38#include <sched.h>
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
43#include <sys/stat.h>
44#include <sys/vfs.h>
45#include <sys/uio.h>
46#include "filestuff.h"
47#include "tracepoint.h"
48#include "hostio.h"
49#include <inttypes.h>
50#ifndef ELFMAG0
51/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55#include <elf.h>
56#endif
57#include "nat/linux-namespaces.h"
58
59#ifndef SPUFS_MAGIC
60#define SPUFS_MAGIC 0x23c9b64e
61#endif
62
63#ifdef HAVE_PERSONALITY
64# include <sys/personality.h>
65# if !HAVE_DECL_ADDR_NO_RANDOMIZE
66# define ADDR_NO_RANDOMIZE 0x0040000
67# endif
68#endif
69
70#ifndef O_LARGEFILE
71#define O_LARGEFILE 0
72#endif
73
74/* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77#if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80#if defined(__mcoldfire__)
81/* These are still undefined in 3.10 kernels. */
82#define PT_TEXT_ADDR 49*4
83#define PT_DATA_ADDR 50*4
84#define PT_TEXT_END_ADDR 51*4
85/* BFIN already defines these since at least 2.6.32 kernels. */
86#elif defined(BFIN)
87#define PT_TEXT_ADDR 220
88#define PT_TEXT_END_ADDR 224
89#define PT_DATA_ADDR 228
90/* These are still undefined in 3.10 kernels. */
91#elif defined(__TMS320C6X__)
92#define PT_TEXT_ADDR (0x10000*4)
93#define PT_DATA_ADDR (0x10004*4)
94#define PT_TEXT_END_ADDR (0x10008*4)
95#endif
96#endif
97
98#ifdef HAVE_LINUX_BTRACE
99# include "nat/linux-btrace.h"
100# include "btrace-common.h"
101#endif
102
103#ifndef HAVE_ELF32_AUXV_T
104/* Copied from glibc's elf.h. */
105typedef struct
106{
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115} Elf32_auxv_t;
116#endif
117
118#ifndef HAVE_ELF64_AUXV_T
119/* Copied from glibc's elf.h. */
120typedef struct
121{
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130} Elf64_auxv_t;
131#endif
132
133/* Does the current host support PTRACE_GETREGSET? */
134int have_ptrace_getregset = -1;
135
136/* LWP accessors. */
137
138/* See nat/linux-nat.h. */
139
140ptid_t
141ptid_of_lwp (struct lwp_info *lwp)
142{
143 return ptid_of (get_lwp_thread (lwp));
144}
145
146/* See nat/linux-nat.h. */
147
148void
149lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151{
152 lwp->arch_private = info;
153}
154
155/* See nat/linux-nat.h. */
156
157struct arch_lwp_info *
158lwp_arch_private_info (struct lwp_info *lwp)
159{
160 return lwp->arch_private;
161}
162
163/* See nat/linux-nat.h. */
164
165int
166lwp_is_stopped (struct lwp_info *lwp)
167{
168 return lwp->stopped;
169}
170
171/* See nat/linux-nat.h. */
172
173enum target_stop_reason
174lwp_stop_reason (struct lwp_info *lwp)
175{
176 return lwp->stop_reason;
177}
178
179/* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183struct simple_pid_list
184{
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193};
194struct simple_pid_list *stopped_pids;
195
196/* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199static void
200add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201{
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208}
209
210static int
211pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212{
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226}
227
228enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240/* This is set while stop_all_lwps is in effect. */
241enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243/* FIXME make into a target method? */
244int using_threads = 1;
245
246/* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248static int stabilizing_threads;
249
250static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252static void linux_resume (struct thread_resume *resume_info, size_t n);
253static void stop_all_lwps (int suspend, struct lwp_info *except);
254static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
256 int *wstat, int options);
257static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
258static struct lwp_info *add_lwp (ptid_t ptid);
259static void linux_mourn (struct process_info *process);
260static int linux_stopped_by_watchpoint (void);
261static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
262static int lwp_is_marked_dead (struct lwp_info *lwp);
263static void proceed_all_lwps (void);
264static int finish_step_over (struct lwp_info *lwp);
265static int kill_lwp (unsigned long lwpid, int signo);
266static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
267static void complete_ongoing_step_over (void);
268static int linux_low_ptrace_options (int attached);
269
270/* When the event-loop is doing a step-over, this points at the thread
271 being stepped. */
272ptid_t step_over_bkpt;
273
274/* True if the low target can hardware single-step. */
275
276static int
277can_hardware_single_step (void)
278{
279 if (the_low_target.supports_hardware_single_step != NULL)
280 return the_low_target.supports_hardware_single_step ();
281 else
282 return 0;
283}
284
285/* True if the low target can software single-step. Such targets
286 implement the GET_NEXT_PCS callback. */
287
288static int
289can_software_single_step (void)
290{
291 return (the_low_target.get_next_pcs != NULL);
292}
293
294/* True if the low target supports memory breakpoints. If so, we'll
295 have a GET_PC implementation. */
296
297static int
298supports_breakpoints (void)
299{
300 return (the_low_target.get_pc != NULL);
301}
302
303/* Returns true if this target can support fast tracepoints. This
304 does not mean that the in-process agent has been loaded in the
305 inferior. */
306
307static int
308supports_fast_tracepoints (void)
309{
310 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
311}
312
313/* True if LWP is stopped in its stepping range. */
314
315static int
316lwp_in_step_range (struct lwp_info *lwp)
317{
318 CORE_ADDR pc = lwp->stop_pc;
319
320 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
321}
322
323struct pending_signals
324{
325 int signal;
326 siginfo_t info;
327 struct pending_signals *prev;
328};
329
330/* The read/write ends of the pipe registered as waitable file in the
331 event loop. */
332static int linux_event_pipe[2] = { -1, -1 };
333
334/* True if we're currently in async mode. */
335#define target_is_async_p() (linux_event_pipe[0] != -1)
336
337static void send_sigstop (struct lwp_info *lwp);
338static void wait_for_sigstop (void);
339
340/* Return non-zero if HEADER is a 64-bit ELF file. */
341
342static int
343elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
344{
345 if (header->e_ident[EI_MAG0] == ELFMAG0
346 && header->e_ident[EI_MAG1] == ELFMAG1
347 && header->e_ident[EI_MAG2] == ELFMAG2
348 && header->e_ident[EI_MAG3] == ELFMAG3)
349 {
350 *machine = header->e_machine;
351 return header->e_ident[EI_CLASS] == ELFCLASS64;
352
353 }
354 *machine = EM_NONE;
355 return -1;
356}
357
358/* Return non-zero if FILE is a 64-bit ELF file,
359 zero if the file is not a 64-bit ELF file,
360 and -1 if the file is not accessible or doesn't exist. */
361
362static int
363elf_64_file_p (const char *file, unsigned int *machine)
364{
365 Elf64_Ehdr header;
366 int fd;
367
368 fd = open (file, O_RDONLY);
369 if (fd < 0)
370 return -1;
371
372 if (read (fd, &header, sizeof (header)) != sizeof (header))
373 {
374 close (fd);
375 return 0;
376 }
377 close (fd);
378
379 return elf_64_header_p (&header, machine);
380}
381
382/* Accepts an integer PID; Returns true if the executable PID is
383 running is a 64-bit ELF file.. */
384
385int
386linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
387{
388 char file[PATH_MAX];
389
390 sprintf (file, "/proc/%d/exe", pid);
391 return elf_64_file_p (file, machine);
392}
393
394static void
395delete_lwp (struct lwp_info *lwp)
396{
397 struct thread_info *thr = get_lwp_thread (lwp);
398
399 if (debug_threads)
400 debug_printf ("deleting %ld\n", lwpid_of (thr));
401
402 remove_thread (thr);
403 free (lwp->arch_private);
404 free (lwp);
405}
406
407/* Add a process to the common process list, and set its private
408 data. */
409
410static struct process_info *
411linux_add_process (int pid, int attached)
412{
413 struct process_info *proc;
414
415 proc = add_process (pid, attached);
416 proc->priv = XCNEW (struct process_info_private);
417
418 if (the_low_target.new_process != NULL)
419 proc->priv->arch_private = the_low_target.new_process ();
420
421 return proc;
422}
423
424static CORE_ADDR get_pc (struct lwp_info *lwp);
425
426/* Call the target arch_setup function on the current thread. */
427
428static void
429linux_arch_setup (void)
430{
431 the_low_target.arch_setup ();
432}
433
434/* Call the target arch_setup function on THREAD. */
435
436static void
437linux_arch_setup_thread (struct thread_info *thread)
438{
439 struct thread_info *saved_thread;
440
441 saved_thread = current_thread;
442 current_thread = thread;
443
444 linux_arch_setup ();
445
446 current_thread = saved_thread;
447}
448
449/* Handle a GNU/Linux extended wait response. If we see a clone,
450 fork, or vfork event, we need to add the new LWP to our list
451 (and return 0 so as not to report the trap to higher layers).
452 If we see an exec event, we will modify ORIG_EVENT_LWP to point
453 to a new LWP representing the new program. */
454
455static int
456handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
457{
458 struct lwp_info *event_lwp = *orig_event_lwp;
459 int event = linux_ptrace_get_extended_event (wstat);
460 struct thread_info *event_thr = get_lwp_thread (event_lwp);
461 struct lwp_info *new_lwp;
462
463 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
464
465 /* All extended events we currently use are mid-syscall. Only
466 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
467 you have to be using PTRACE_SEIZE to get that. */
468 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
469
470 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
471 || (event == PTRACE_EVENT_CLONE))
472 {
473 ptid_t ptid;
474 unsigned long new_pid;
475 int ret, status;
476
477 /* Get the pid of the new lwp. */
478 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
479 &new_pid);
480
481 /* If we haven't already seen the new PID stop, wait for it now. */
482 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
483 {
484 /* The new child has a pending SIGSTOP. We can't affect it until it
485 hits the SIGSTOP, but we're already attached. */
486
487 ret = my_waitpid (new_pid, &status, __WALL);
488
489 if (ret == -1)
490 perror_with_name ("waiting for new child");
491 else if (ret != new_pid)
492 warning ("wait returned unexpected PID %d", ret);
493 else if (!WIFSTOPPED (status))
494 warning ("wait returned unexpected status 0x%x", status);
495 }
496
497 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
498 {
499 struct process_info *parent_proc;
500 struct process_info *child_proc;
501 struct lwp_info *child_lwp;
502 struct thread_info *child_thr;
503 struct target_desc *tdesc;
504
505 ptid = ptid_build (new_pid, new_pid, 0);
506
507 if (debug_threads)
508 {
509 debug_printf ("HEW: Got fork event from LWP %ld, "
510 "new child is %d\n",
511 ptid_get_lwp (ptid_of (event_thr)),
512 ptid_get_pid (ptid));
513 }
514
515 /* Add the new process to the tables and clone the breakpoint
516 lists of the parent. We need to do this even if the new process
517 will be detached, since we will need the process object and the
518 breakpoints to remove any breakpoints from memory when we
519 detach, and the client side will access registers. */
520 child_proc = linux_add_process (new_pid, 0);
521 gdb_assert (child_proc != NULL);
522 child_lwp = add_lwp (ptid);
523 gdb_assert (child_lwp != NULL);
524 child_lwp->stopped = 1;
525 child_lwp->must_set_ptrace_flags = 1;
526 child_lwp->status_pending_p = 0;
527 child_thr = get_lwp_thread (child_lwp);
528 child_thr->last_resume_kind = resume_stop;
529 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
530
531 /* If we're suspending all threads, leave this one suspended
532 too. */
533 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
534 {
535 if (debug_threads)
536 debug_printf ("HEW: leaving child suspended\n");
537 child_lwp->suspended = 1;
538 }
539
540 parent_proc = get_thread_process (event_thr);
541 child_proc->attached = parent_proc->attached;
542 clone_all_breakpoints (&child_proc->breakpoints,
543 &child_proc->raw_breakpoints,
544 parent_proc->breakpoints);
545
546 tdesc = XNEW (struct target_desc);
547 copy_target_description (tdesc, parent_proc->tdesc);
548 child_proc->tdesc = tdesc;
549
550 /* Clone arch-specific process data. */
551 if (the_low_target.new_fork != NULL)
552 the_low_target.new_fork (parent_proc, child_proc);
553
554 /* Save fork info in the parent thread. */
555 if (event == PTRACE_EVENT_FORK)
556 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
557 else if (event == PTRACE_EVENT_VFORK)
558 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
559
560 event_lwp->waitstatus.value.related_pid = ptid;
561
562 /* The status_pending field contains bits denoting the
563 extended event, so when the pending event is handled,
564 the handler will look at lwp->waitstatus. */
565 event_lwp->status_pending_p = 1;
566 event_lwp->status_pending = wstat;
567
568 /* Report the event. */
569 return 0;
570 }
571
572 if (debug_threads)
573 debug_printf ("HEW: Got clone event "
574 "from LWP %ld, new child is LWP %ld\n",
575 lwpid_of (event_thr), new_pid);
576
577 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
578 new_lwp = add_lwp (ptid);
579
580 /* Either we're going to immediately resume the new thread
581 or leave it stopped. linux_resume_one_lwp is a nop if it
582 thinks the thread is currently running, so set this first
583 before calling linux_resume_one_lwp. */
584 new_lwp->stopped = 1;
585
586 /* If we're suspending all threads, leave this one suspended
587 too. */
588 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
589 new_lwp->suspended = 1;
590
591 /* Normally we will get the pending SIGSTOP. But in some cases
592 we might get another signal delivered to the group first.
593 If we do get another signal, be sure not to lose it. */
594 if (WSTOPSIG (status) != SIGSTOP)
595 {
596 new_lwp->stop_expected = 1;
597 new_lwp->status_pending_p = 1;
598 new_lwp->status_pending = status;
599 }
600 else if (report_thread_events)
601 {
602 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
603 new_lwp->status_pending_p = 1;
604 new_lwp->status_pending = status;
605 }
606
607 /* Don't report the event. */
608 return 1;
609 }
610 else if (event == PTRACE_EVENT_VFORK_DONE)
611 {
612 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
613
614 /* Report the event. */
615 return 0;
616 }
617 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
618 {
619 struct process_info *proc;
620 VEC (int) *syscalls_to_catch;
621 ptid_t event_ptid;
622 pid_t event_pid;
623
624 if (debug_threads)
625 {
626 debug_printf ("HEW: Got exec event from LWP %ld\n",
627 lwpid_of (event_thr));
628 }
629
630 /* Get the event ptid. */
631 event_ptid = ptid_of (event_thr);
632 event_pid = ptid_get_pid (event_ptid);
633
634 /* Save the syscall list from the execing process. */
635 proc = get_thread_process (event_thr);
636 syscalls_to_catch = proc->syscalls_to_catch;
637 proc->syscalls_to_catch = NULL;
638
639 /* Delete the execing process and all its threads. */
640 linux_mourn (proc);
641 current_thread = NULL;
642
643 /* Create a new process/lwp/thread. */
644 proc = linux_add_process (event_pid, 0);
645 event_lwp = add_lwp (event_ptid);
646 event_thr = get_lwp_thread (event_lwp);
647 gdb_assert (current_thread == event_thr);
648 linux_arch_setup_thread (event_thr);
649
650 /* Set the event status. */
651 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
652 event_lwp->waitstatus.value.execd_pathname
653 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
654
655 /* Mark the exec status as pending. */
656 event_lwp->stopped = 1;
657 event_lwp->status_pending_p = 1;
658 event_lwp->status_pending = wstat;
659 event_thr->last_resume_kind = resume_continue;
660 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
661
662 /* Update syscall state in the new lwp, effectively mid-syscall too. */
663 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
664
665 /* Restore the list to catch. Don't rely on the client, which is free
666 to avoid sending a new list when the architecture doesn't change.
667 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
668 proc->syscalls_to_catch = syscalls_to_catch;
669
670 /* Report the event. */
671 *orig_event_lwp = event_lwp;
672 return 0;
673 }
674
675 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
676}
677
678/* Return the PC as read from the regcache of LWP, without any
679 adjustment. */
680
681static CORE_ADDR
682get_pc (struct lwp_info *lwp)
683{
684 struct thread_info *saved_thread;
685 struct regcache *regcache;
686 CORE_ADDR pc;
687
688 if (the_low_target.get_pc == NULL)
689 return 0;
690
691 saved_thread = current_thread;
692 current_thread = get_lwp_thread (lwp);
693
694 regcache = get_thread_regcache (current_thread, 1);
695 pc = (*the_low_target.get_pc) (regcache);
696
697 if (debug_threads)
698 debug_printf ("pc is 0x%lx\n", (long) pc);
699
700 current_thread = saved_thread;
701 return pc;
702}
703
704/* This function should only be called if LWP got a SYSCALL_SIGTRAP.
705 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
706 return code. */
707
708static void
709get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
710{
711 struct thread_info *saved_thread;
712 struct regcache *regcache;
713
714 if (the_low_target.get_syscall_trapinfo == NULL)
715 {
716 /* If we cannot get the syscall trapinfo, report an unknown
717 system call number and -ENOSYS return value. */
718 *sysno = UNKNOWN_SYSCALL;
719 *sysret = -ENOSYS;
720 return;
721 }
722
723 saved_thread = current_thread;
724 current_thread = get_lwp_thread (lwp);
725
726 regcache = get_thread_regcache (current_thread, 1);
727 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
728
729 if (debug_threads)
730 {
731 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
732 *sysno, *sysret);
733 }
734
735 current_thread = saved_thread;
736}
737
738/* This function should only be called if LWP got a SIGTRAP.
739 The SIGTRAP could mean several things.
740
741 On i386, where decr_pc_after_break is non-zero:
742
743 If we were single-stepping this process using PTRACE_SINGLESTEP, we
744 will get only the one SIGTRAP. The value of $eip will be the next
745 instruction. If the instruction we stepped over was a breakpoint,
746 we need to decrement the PC.
747
748 If we continue the process using PTRACE_CONT, we will get a
749 SIGTRAP when we hit a breakpoint. The value of $eip will be
750 the instruction after the breakpoint (i.e. needs to be
751 decremented). If we report the SIGTRAP to GDB, we must also
752 report the undecremented PC. If the breakpoint is removed, we
753 must resume at the decremented PC.
754
755 On a non-decr_pc_after_break machine with hardware or kernel
756 single-step:
757
758 If we either single-step a breakpoint instruction, or continue and
759 hit a breakpoint instruction, our PC will point at the breakpoint
760 instruction. */
761
762static int
763check_stopped_by_breakpoint (struct lwp_info *lwp)
764{
765 CORE_ADDR pc;
766 CORE_ADDR sw_breakpoint_pc;
767 struct thread_info *saved_thread;
768#if USE_SIGTRAP_SIGINFO
769 siginfo_t siginfo;
770#endif
771
772 if (the_low_target.get_pc == NULL)
773 return 0;
774
775 pc = get_pc (lwp);
776 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
777
778 /* breakpoint_at reads from the current thread. */
779 saved_thread = current_thread;
780 current_thread = get_lwp_thread (lwp);
781
782#if USE_SIGTRAP_SIGINFO
783 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
784 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
785 {
786 if (siginfo.si_signo == SIGTRAP)
787 {
788 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
789 {
790 if (debug_threads)
791 {
792 struct thread_info *thr = get_lwp_thread (lwp);
793
794 debug_printf ("CSBB: %s stopped by software breakpoint\n",
795 target_pid_to_str (ptid_of (thr)));
796 }
797
798 /* Back up the PC if necessary. */
799 if (pc != sw_breakpoint_pc)
800 {
801 struct regcache *regcache
802 = get_thread_regcache (current_thread, 1);
803 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
804 }
805
806 lwp->stop_pc = sw_breakpoint_pc;
807 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
808 current_thread = saved_thread;
809 return 1;
810 }
811 else if (siginfo.si_code == TRAP_HWBKPT)
812 {
813 if (debug_threads)
814 {
815 struct thread_info *thr = get_lwp_thread (lwp);
816
817 debug_printf ("CSBB: %s stopped by hardware "
818 "breakpoint/watchpoint\n",
819 target_pid_to_str (ptid_of (thr)));
820 }
821
822 lwp->stop_pc = pc;
823 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
824 current_thread = saved_thread;
825 return 1;
826 }
827 else if (siginfo.si_code == TRAP_TRACE)
828 {
829 if (debug_threads)
830 {
831 struct thread_info *thr = get_lwp_thread (lwp);
832
833 debug_printf ("CSBB: %s stopped by trace\n",
834 target_pid_to_str (ptid_of (thr)));
835 }
836
837 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
838 }
839 }
840 }
841#else
842 /* We may have just stepped a breakpoint instruction. E.g., in
843 non-stop mode, GDB first tells the thread A to step a range, and
844 then the user inserts a breakpoint inside the range. In that
845 case we need to report the breakpoint PC. */
846 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
847 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
848 {
849 if (debug_threads)
850 {
851 struct thread_info *thr = get_lwp_thread (lwp);
852
853 debug_printf ("CSBB: %s stopped by software breakpoint\n",
854 target_pid_to_str (ptid_of (thr)));
855 }
856
857 /* Back up the PC if necessary. */
858 if (pc != sw_breakpoint_pc)
859 {
860 struct regcache *regcache
861 = get_thread_regcache (current_thread, 1);
862 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
863 }
864
865 lwp->stop_pc = sw_breakpoint_pc;
866 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
867 current_thread = saved_thread;
868 return 1;
869 }
870
871 if (hardware_breakpoint_inserted_here (pc))
872 {
873 if (debug_threads)
874 {
875 struct thread_info *thr = get_lwp_thread (lwp);
876
877 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
878 target_pid_to_str (ptid_of (thr)));
879 }
880
881 lwp->stop_pc = pc;
882 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
883 current_thread = saved_thread;
884 return 1;
885 }
886#endif
887
888 current_thread = saved_thread;
889 return 0;
890}
891
892static struct lwp_info *
893add_lwp (ptid_t ptid)
894{
895 struct lwp_info *lwp;
896
897 lwp = XCNEW (struct lwp_info);
898
899 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
900
901 if (the_low_target.new_thread != NULL)
902 the_low_target.new_thread (lwp);
903
904 lwp->thread = add_thread (ptid, lwp);
905
906 return lwp;
907}
908
909/* Start an inferior process and returns its pid.
910 ALLARGS is a vector of program-name and args. */
911
912static int
913linux_create_inferior (char *program, char **allargs)
914{
915 struct lwp_info *new_lwp;
916 int pid;
917 ptid_t ptid;
918 struct cleanup *restore_personality
919 = maybe_disable_address_space_randomization (disable_randomization);
920
921#if defined(__UCLIBC__) && defined(HAS_NOMMU)
922 pid = vfork ();
923#else
924 pid = fork ();
925#endif
926 if (pid < 0)
927 perror_with_name ("fork");
928
929 if (pid == 0)
930 {
931 close_most_fds ();
932 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
933
934 setpgid (0, 0);
935
936 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
937 stdout to stderr so that inferior i/o doesn't corrupt the connection.
938 Also, redirect stdin to /dev/null. */
939 if (remote_connection_is_stdio ())
940 {
941 close (0);
942 open ("/dev/null", O_RDONLY);
943 dup2 (2, 1);
944 if (write (2, "stdin/stdout redirected\n",
945 sizeof ("stdin/stdout redirected\n") - 1) < 0)
946 {
947 /* Errors ignored. */;
948 }
949 }
950
951 execv (program, allargs);
952 if (errno == ENOENT)
953 execvp (program, allargs);
954
955 fprintf (stderr, "Cannot exec %s: %s.\n", program,
956 strerror (errno));
957 fflush (stderr);
958 _exit (0177);
959 }
960
961 do_cleanups (restore_personality);
962
963 linux_add_process (pid, 0);
964
965 ptid = ptid_build (pid, pid, 0);
966 new_lwp = add_lwp (ptid);
967 new_lwp->must_set_ptrace_flags = 1;
968
969 return pid;
970}
971
972/* Implement the post_create_inferior target_ops method. */
973
974static void
975linux_post_create_inferior (void)
976{
977 struct lwp_info *lwp = get_thread_lwp (current_thread);
978
979 linux_arch_setup ();
980
981 if (lwp->must_set_ptrace_flags)
982 {
983 struct process_info *proc = current_process ();
984 int options = linux_low_ptrace_options (proc->attached);
985
986 linux_enable_event_reporting (lwpid_of (current_thread), options);
987 lwp->must_set_ptrace_flags = 0;
988 }
989}
990
991/* Attach to an inferior process. Returns 0 on success, ERRNO on
992 error. */
993
994int
995linux_attach_lwp (ptid_t ptid)
996{
997 struct lwp_info *new_lwp;
998 int lwpid = ptid_get_lwp (ptid);
999
1000 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1001 != 0)
1002 return errno;
1003
1004 new_lwp = add_lwp (ptid);
1005
1006 /* We need to wait for SIGSTOP before being able to make the next
1007 ptrace call on this LWP. */
1008 new_lwp->must_set_ptrace_flags = 1;
1009
1010 if (linux_proc_pid_is_stopped (lwpid))
1011 {
1012 if (debug_threads)
1013 debug_printf ("Attached to a stopped process\n");
1014
1015 /* The process is definitely stopped. It is in a job control
1016 stop, unless the kernel predates the TASK_STOPPED /
1017 TASK_TRACED distinction, in which case it might be in a
1018 ptrace stop. Make sure it is in a ptrace stop; from there we
1019 can kill it, signal it, et cetera.
1020
1021 First make sure there is a pending SIGSTOP. Since we are
1022 already attached, the process can not transition from stopped
1023 to running without a PTRACE_CONT; so we know this signal will
1024 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1025 probably already in the queue (unless this kernel is old
1026 enough to use TASK_STOPPED for ptrace stops); but since
1027 SIGSTOP is not an RT signal, it can only be queued once. */
1028 kill_lwp (lwpid, SIGSTOP);
1029
1030 /* Finally, resume the stopped process. This will deliver the
1031 SIGSTOP (or a higher priority signal, just like normal
1032 PTRACE_ATTACH), which we'll catch later on. */
1033 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1034 }
1035
1036 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1037 brings it to a halt.
1038
1039 There are several cases to consider here:
1040
1041 1) gdbserver has already attached to the process and is being notified
1042 of a new thread that is being created.
1043 In this case we should ignore that SIGSTOP and resume the
1044 process. This is handled below by setting stop_expected = 1,
1045 and the fact that add_thread sets last_resume_kind ==
1046 resume_continue.
1047
1048 2) This is the first thread (the process thread), and we're attaching
1049 to it via attach_inferior.
1050 In this case we want the process thread to stop.
1051 This is handled by having linux_attach set last_resume_kind ==
1052 resume_stop after we return.
1053
1054 If the pid we are attaching to is also the tgid, we attach to and
1055 stop all the existing threads. Otherwise, we attach to pid and
1056 ignore any other threads in the same group as this pid.
1057
1058 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1059 existing threads.
1060 In this case we want the thread to stop.
1061 FIXME: This case is currently not properly handled.
1062 We should wait for the SIGSTOP but don't. Things work apparently
1063 because enough time passes between when we ptrace (ATTACH) and when
1064 gdb makes the next ptrace call on the thread.
1065
1066 On the other hand, if we are currently trying to stop all threads, we
1067 should treat the new thread as if we had sent it a SIGSTOP. This works
1068 because we are guaranteed that the add_lwp call above added us to the
1069 end of the list, and so the new thread has not yet reached
1070 wait_for_sigstop (but will). */
1071 new_lwp->stop_expected = 1;
1072
1073 return 0;
1074}
1075
1076/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1077 already attached. Returns true if a new LWP is found, false
1078 otherwise. */
1079
1080static int
1081attach_proc_task_lwp_callback (ptid_t ptid)
1082{
1083 /* Is this a new thread? */
1084 if (find_thread_ptid (ptid) == NULL)
1085 {
1086 int lwpid = ptid_get_lwp (ptid);
1087 int err;
1088
1089 if (debug_threads)
1090 debug_printf ("Found new lwp %d\n", lwpid);
1091
1092 err = linux_attach_lwp (ptid);
1093
1094 /* Be quiet if we simply raced with the thread exiting. EPERM
1095 is returned if the thread's task still exists, and is marked
1096 as exited or zombie, as well as other conditions, so in that
1097 case, confirm the status in /proc/PID/status. */
1098 if (err == ESRCH
1099 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1100 {
1101 if (debug_threads)
1102 {
1103 debug_printf ("Cannot attach to lwp %d: "
1104 "thread is gone (%d: %s)\n",
1105 lwpid, err, strerror (err));
1106 }
1107 }
1108 else if (err != 0)
1109 {
1110 warning (_("Cannot attach to lwp %d: %s"),
1111 lwpid,
1112 linux_ptrace_attach_fail_reason_string (ptid, err));
1113 }
1114
1115 return 1;
1116 }
1117 return 0;
1118}
1119
1120static void async_file_mark (void);
1121
1122/* Attach to PID. If PID is the tgid, attach to it and all
1123 of its threads. */
1124
1125static int
1126linux_attach (unsigned long pid)
1127{
1128 struct process_info *proc;
1129 struct thread_info *initial_thread;
1130 ptid_t ptid = ptid_build (pid, pid, 0);
1131 int err;
1132
1133 /* Attach to PID. We will check for other threads
1134 soon. */
1135 err = linux_attach_lwp (ptid);
1136 if (err != 0)
1137 error ("Cannot attach to process %ld: %s",
1138 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1139
1140 proc = linux_add_process (pid, 1);
1141
1142 /* Don't ignore the initial SIGSTOP if we just attached to this
1143 process. It will be collected by wait shortly. */
1144 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1145 initial_thread->last_resume_kind = resume_stop;
1146
1147 /* We must attach to every LWP. If /proc is mounted, use that to
1148 find them now. On the one hand, the inferior may be using raw
1149 clone instead of using pthreads. On the other hand, even if it
1150 is using pthreads, GDB may not be connected yet (thread_db needs
1151 to do symbol lookups, through qSymbol). Also, thread_db walks
1152 structures in the inferior's address space to find the list of
1153 threads/LWPs, and those structures may well be corrupted. Note
1154 that once thread_db is loaded, we'll still use it to list threads
1155 and associate pthread info with each LWP. */
1156 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1157
1158 /* GDB will shortly read the xml target description for this
1159 process, to figure out the process' architecture. But the target
1160 description is only filled in when the first process/thread in
1161 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1162 that now, otherwise, if GDB is fast enough, it could read the
1163 target description _before_ that initial stop. */
1164 if (non_stop)
1165 {
1166 struct lwp_info *lwp;
1167 int wstat, lwpid;
1168 ptid_t pid_ptid = pid_to_ptid (pid);
1169
1170 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1171 &wstat, __WALL);
1172 gdb_assert (lwpid > 0);
1173
1174 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1175
1176 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1177 {
1178 lwp->status_pending_p = 1;
1179 lwp->status_pending = wstat;
1180 }
1181
1182 initial_thread->last_resume_kind = resume_continue;
1183
1184 async_file_mark ();
1185
1186 gdb_assert (proc->tdesc != NULL);
1187 }
1188
1189 return 0;
1190}
1191
1192struct counter
1193{
1194 int pid;
1195 int count;
1196};
1197
1198static int
1199second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1200{
1201 struct counter *counter = (struct counter *) args;
1202
1203 if (ptid_get_pid (entry->id) == counter->pid)
1204 {
1205 if (++counter->count > 1)
1206 return 1;
1207 }
1208
1209 return 0;
1210}
1211
1212static int
1213last_thread_of_process_p (int pid)
1214{
1215 struct counter counter = { pid , 0 };
1216
1217 return (find_inferior (&all_threads,
1218 second_thread_of_pid_p, &counter) == NULL);
1219}
1220
1221/* Kill LWP. */
1222
1223static void
1224linux_kill_one_lwp (struct lwp_info *lwp)
1225{
1226 struct thread_info *thr = get_lwp_thread (lwp);
1227 int pid = lwpid_of (thr);
1228
1229 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1230 there is no signal context, and ptrace(PTRACE_KILL) (or
1231 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1232 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1233 alternative is to kill with SIGKILL. We only need one SIGKILL
1234 per process, not one for each thread. But since we still support
1235 support debugging programs using raw clone without CLONE_THREAD,
1236 we send one for each thread. For years, we used PTRACE_KILL
1237 only, so we're being a bit paranoid about some old kernels where
1238 PTRACE_KILL might work better (dubious if there are any such, but
1239 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1240 second, and so we're fine everywhere. */
1241
1242 errno = 0;
1243 kill_lwp (pid, SIGKILL);
1244 if (debug_threads)
1245 {
1246 int save_errno = errno;
1247
1248 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1249 target_pid_to_str (ptid_of (thr)),
1250 save_errno ? strerror (save_errno) : "OK");
1251 }
1252
1253 errno = 0;
1254 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1255 if (debug_threads)
1256 {
1257 int save_errno = errno;
1258
1259 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1260 target_pid_to_str (ptid_of (thr)),
1261 save_errno ? strerror (save_errno) : "OK");
1262 }
1263}
1264
1265/* Kill LWP and wait for it to die. */
1266
1267static void
1268kill_wait_lwp (struct lwp_info *lwp)
1269{
1270 struct thread_info *thr = get_lwp_thread (lwp);
1271 int pid = ptid_get_pid (ptid_of (thr));
1272 int lwpid = ptid_get_lwp (ptid_of (thr));
1273 int wstat;
1274 int res;
1275
1276 if (debug_threads)
1277 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1278
1279 do
1280 {
1281 linux_kill_one_lwp (lwp);
1282
1283 /* Make sure it died. Notes:
1284
1285 - The loop is most likely unnecessary.
1286
1287 - We don't use linux_wait_for_event as that could delete lwps
1288 while we're iterating over them. We're not interested in
1289 any pending status at this point, only in making sure all
1290 wait status on the kernel side are collected until the
1291 process is reaped.
1292
1293 - We don't use __WALL here as the __WALL emulation relies on
1294 SIGCHLD, and killing a stopped process doesn't generate
1295 one, nor an exit status.
1296 */
1297 res = my_waitpid (lwpid, &wstat, 0);
1298 if (res == -1 && errno == ECHILD)
1299 res = my_waitpid (lwpid, &wstat, __WCLONE);
1300 } while (res > 0 && WIFSTOPPED (wstat));
1301
1302 /* Even if it was stopped, the child may have already disappeared.
1303 E.g., if it was killed by SIGKILL. */
1304 if (res < 0 && errno != ECHILD)
1305 perror_with_name ("kill_wait_lwp");
1306}
1307
1308/* Callback for `find_inferior'. Kills an lwp of a given process,
1309 except the leader. */
1310
1311static int
1312kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1313{
1314 struct thread_info *thread = (struct thread_info *) entry;
1315 struct lwp_info *lwp = get_thread_lwp (thread);
1316 int pid = * (int *) args;
1317
1318 if (ptid_get_pid (entry->id) != pid)
1319 return 0;
1320
1321 /* We avoid killing the first thread here, because of a Linux kernel (at
1322 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1323 the children get a chance to be reaped, it will remain a zombie
1324 forever. */
1325
1326 if (lwpid_of (thread) == pid)
1327 {
1328 if (debug_threads)
1329 debug_printf ("lkop: is last of process %s\n",
1330 target_pid_to_str (entry->id));
1331 return 0;
1332 }
1333
1334 kill_wait_lwp (lwp);
1335 return 0;
1336}
1337
1338static int
1339linux_kill (int pid)
1340{
1341 struct process_info *process;
1342 struct lwp_info *lwp;
1343
1344 process = find_process_pid (pid);
1345 if (process == NULL)
1346 return -1;
1347
1348 /* If we're killing a running inferior, make sure it is stopped
1349 first, as PTRACE_KILL will not work otherwise. */
1350 stop_all_lwps (0, NULL);
1351
1352 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1353
1354 /* See the comment in linux_kill_one_lwp. We did not kill the first
1355 thread in the list, so do so now. */
1356 lwp = find_lwp_pid (pid_to_ptid (pid));
1357
1358 if (lwp == NULL)
1359 {
1360 if (debug_threads)
1361 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1362 pid);
1363 }
1364 else
1365 kill_wait_lwp (lwp);
1366
1367 the_target->mourn (process);
1368
1369 /* Since we presently can only stop all lwps of all processes, we
1370 need to unstop lwps of other processes. */
1371 unstop_all_lwps (0, NULL);
1372 return 0;
1373}
1374
1375/* Get pending signal of THREAD, for detaching purposes. This is the
1376 signal the thread last stopped for, which we need to deliver to the
1377 thread when detaching, otherwise, it'd be suppressed/lost. */
1378
1379static int
1380get_detach_signal (struct thread_info *thread)
1381{
1382 enum gdb_signal signo = GDB_SIGNAL_0;
1383 int status;
1384 struct lwp_info *lp = get_thread_lwp (thread);
1385
1386 if (lp->status_pending_p)
1387 status = lp->status_pending;
1388 else
1389 {
1390 /* If the thread had been suspended by gdbserver, and it stopped
1391 cleanly, then it'll have stopped with SIGSTOP. But we don't
1392 want to deliver that SIGSTOP. */
1393 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1394 || thread->last_status.value.sig == GDB_SIGNAL_0)
1395 return 0;
1396
1397 /* Otherwise, we may need to deliver the signal we
1398 intercepted. */
1399 status = lp->last_status;
1400 }
1401
1402 if (!WIFSTOPPED (status))
1403 {
1404 if (debug_threads)
1405 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1406 target_pid_to_str (ptid_of (thread)));
1407 return 0;
1408 }
1409
1410 /* Extended wait statuses aren't real SIGTRAPs. */
1411 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1412 {
1413 if (debug_threads)
1414 debug_printf ("GPS: lwp %s had stopped with extended "
1415 "status: no pending signal\n",
1416 target_pid_to_str (ptid_of (thread)));
1417 return 0;
1418 }
1419
1420 signo = gdb_signal_from_host (WSTOPSIG (status));
1421
1422 if (program_signals_p && !program_signals[signo])
1423 {
1424 if (debug_threads)
1425 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1426 target_pid_to_str (ptid_of (thread)),
1427 gdb_signal_to_string (signo));
1428 return 0;
1429 }
1430 else if (!program_signals_p
1431 /* If we have no way to know which signals GDB does not
1432 want to have passed to the program, assume
1433 SIGTRAP/SIGINT, which is GDB's default. */
1434 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1435 {
1436 if (debug_threads)
1437 debug_printf ("GPS: lwp %s had signal %s, "
1438 "but we don't know if we should pass it. "
1439 "Default to not.\n",
1440 target_pid_to_str (ptid_of (thread)),
1441 gdb_signal_to_string (signo));
1442 return 0;
1443 }
1444 else
1445 {
1446 if (debug_threads)
1447 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1448 target_pid_to_str (ptid_of (thread)),
1449 gdb_signal_to_string (signo));
1450
1451 return WSTOPSIG (status);
1452 }
1453}
1454
1455static int
1456linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1457{
1458 struct thread_info *thread = (struct thread_info *) entry;
1459 struct lwp_info *lwp = get_thread_lwp (thread);
1460 int pid = * (int *) args;
1461 int sig;
1462
1463 if (ptid_get_pid (entry->id) != pid)
1464 return 0;
1465
1466 /* If there is a pending SIGSTOP, get rid of it. */
1467 if (lwp->stop_expected)
1468 {
1469 if (debug_threads)
1470 debug_printf ("Sending SIGCONT to %s\n",
1471 target_pid_to_str (ptid_of (thread)));
1472
1473 kill_lwp (lwpid_of (thread), SIGCONT);
1474 lwp->stop_expected = 0;
1475 }
1476
1477 /* Flush any pending changes to the process's registers. */
1478 regcache_invalidate_thread (thread);
1479
1480 /* Pass on any pending signal for this thread. */
1481 sig = get_detach_signal (thread);
1482
1483 /* Finally, let it resume. */
1484 if (the_low_target.prepare_to_resume != NULL)
1485 the_low_target.prepare_to_resume (lwp);
1486 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1487 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1488 error (_("Can't detach %s: %s"),
1489 target_pid_to_str (ptid_of (thread)),
1490 strerror (errno));
1491
1492 delete_lwp (lwp);
1493 return 0;
1494}
1495
1496static int
1497linux_detach (int pid)
1498{
1499 struct process_info *process;
1500
1501 process = find_process_pid (pid);
1502 if (process == NULL)
1503 return -1;
1504
1505 /* As there's a step over already in progress, let it finish first,
1506 otherwise nesting a stabilize_threads operation on top gets real
1507 messy. */
1508 complete_ongoing_step_over ();
1509
1510 /* Stop all threads before detaching. First, ptrace requires that
1511 the thread is stopped to sucessfully detach. Second, thread_db
1512 may need to uninstall thread event breakpoints from memory, which
1513 only works with a stopped process anyway. */
1514 stop_all_lwps (0, NULL);
1515
1516#ifdef USE_THREAD_DB
1517 thread_db_detach (process);
1518#endif
1519
1520 /* Stabilize threads (move out of jump pads). */
1521 stabilize_threads ();
1522
1523 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1524
1525 the_target->mourn (process);
1526
1527 /* Since we presently can only stop all lwps of all processes, we
1528 need to unstop lwps of other processes. */
1529 unstop_all_lwps (0, NULL);
1530 return 0;
1531}
1532
1533/* Remove all LWPs that belong to process PROC from the lwp list. */
1534
1535static int
1536delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1537{
1538 struct thread_info *thread = (struct thread_info *) entry;
1539 struct lwp_info *lwp = get_thread_lwp (thread);
1540 struct process_info *process = (struct process_info *) proc;
1541
1542 if (pid_of (thread) == pid_of (process))
1543 delete_lwp (lwp);
1544
1545 return 0;
1546}
1547
1548static void
1549linux_mourn (struct process_info *process)
1550{
1551 struct process_info_private *priv;
1552
1553#ifdef USE_THREAD_DB
1554 thread_db_mourn (process);
1555#endif
1556
1557 find_inferior (&all_threads, delete_lwp_callback, process);
1558
1559 /* Freeing all private data. */
1560 priv = process->priv;
1561 free (priv->arch_private);
1562 free (priv);
1563 process->priv = NULL;
1564
1565 remove_process (process);
1566}
1567
1568static void
1569linux_join (int pid)
1570{
1571 int status, ret;
1572
1573 do {
1574 ret = my_waitpid (pid, &status, 0);
1575 if (WIFEXITED (status) || WIFSIGNALED (status))
1576 break;
1577 } while (ret != -1 || errno != ECHILD);
1578}
1579
1580/* Return nonzero if the given thread is still alive. */
1581static int
1582linux_thread_alive (ptid_t ptid)
1583{
1584 struct lwp_info *lwp = find_lwp_pid (ptid);
1585
1586 /* We assume we always know if a thread exits. If a whole process
1587 exited but we still haven't been able to report it to GDB, we'll
1588 hold on to the last lwp of the dead process. */
1589 if (lwp != NULL)
1590 return !lwp_is_marked_dead (lwp);
1591 else
1592 return 0;
1593}
1594
1595/* Return 1 if this lwp still has an interesting status pending. If
1596 not (e.g., it had stopped for a breakpoint that is gone), return
1597 false. */
1598
1599static int
1600thread_still_has_status_pending_p (struct thread_info *thread)
1601{
1602 struct lwp_info *lp = get_thread_lwp (thread);
1603
1604 if (!lp->status_pending_p)
1605 return 0;
1606
1607 if (thread->last_resume_kind != resume_stop
1608 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1609 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1610 {
1611 struct thread_info *saved_thread;
1612 CORE_ADDR pc;
1613 int discard = 0;
1614
1615 gdb_assert (lp->last_status != 0);
1616
1617 pc = get_pc (lp);
1618
1619 saved_thread = current_thread;
1620 current_thread = thread;
1621
1622 if (pc != lp->stop_pc)
1623 {
1624 if (debug_threads)
1625 debug_printf ("PC of %ld changed\n",
1626 lwpid_of (thread));
1627 discard = 1;
1628 }
1629
1630#if !USE_SIGTRAP_SIGINFO
1631 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1632 && !(*the_low_target.breakpoint_at) (pc))
1633 {
1634 if (debug_threads)
1635 debug_printf ("previous SW breakpoint of %ld gone\n",
1636 lwpid_of (thread));
1637 discard = 1;
1638 }
1639 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1640 && !hardware_breakpoint_inserted_here (pc))
1641 {
1642 if (debug_threads)
1643 debug_printf ("previous HW breakpoint of %ld gone\n",
1644 lwpid_of (thread));
1645 discard = 1;
1646 }
1647#endif
1648
1649 current_thread = saved_thread;
1650
1651 if (discard)
1652 {
1653 if (debug_threads)
1654 debug_printf ("discarding pending breakpoint status\n");
1655 lp->status_pending_p = 0;
1656 return 0;
1657 }
1658 }
1659
1660 return 1;
1661}
1662
1663/* Returns true if LWP is resumed from the client's perspective. */
1664
1665static int
1666lwp_resumed (struct lwp_info *lwp)
1667{
1668 struct thread_info *thread = get_lwp_thread (lwp);
1669
1670 if (thread->last_resume_kind != resume_stop)
1671 return 1;
1672
1673 /* Did gdb send us a `vCont;t', but we haven't reported the
1674 corresponding stop to gdb yet? If so, the thread is still
1675 resumed/running from gdb's perspective. */
1676 if (thread->last_resume_kind == resume_stop
1677 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1678 return 1;
1679
1680 return 0;
1681}
1682
1683/* Return 1 if this lwp has an interesting status pending. */
1684static int
1685status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1686{
1687 struct thread_info *thread = (struct thread_info *) entry;
1688 struct lwp_info *lp = get_thread_lwp (thread);
1689 ptid_t ptid = * (ptid_t *) arg;
1690
1691 /* Check if we're only interested in events from a specific process
1692 or a specific LWP. */
1693 if (!ptid_match (ptid_of (thread), ptid))
1694 return 0;
1695
1696 if (!lwp_resumed (lp))
1697 return 0;
1698
1699 if (lp->status_pending_p
1700 && !thread_still_has_status_pending_p (thread))
1701 {
1702 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1703 return 0;
1704 }
1705
1706 return lp->status_pending_p;
1707}
1708
1709static int
1710same_lwp (struct inferior_list_entry *entry, void *data)
1711{
1712 ptid_t ptid = *(ptid_t *) data;
1713 int lwp;
1714
1715 if (ptid_get_lwp (ptid) != 0)
1716 lwp = ptid_get_lwp (ptid);
1717 else
1718 lwp = ptid_get_pid (ptid);
1719
1720 if (ptid_get_lwp (entry->id) == lwp)
1721 return 1;
1722
1723 return 0;
1724}
1725
1726struct lwp_info *
1727find_lwp_pid (ptid_t ptid)
1728{
1729 struct inferior_list_entry *thread
1730 = find_inferior (&all_threads, same_lwp, &ptid);
1731
1732 if (thread == NULL)
1733 return NULL;
1734
1735 return get_thread_lwp ((struct thread_info *) thread);
1736}
1737
1738/* Return the number of known LWPs in the tgid given by PID. */
1739
1740static int
1741num_lwps (int pid)
1742{
1743 struct inferior_list_entry *inf, *tmp;
1744 int count = 0;
1745
1746 ALL_INFERIORS (&all_threads, inf, tmp)
1747 {
1748 if (ptid_get_pid (inf->id) == pid)
1749 count++;
1750 }
1751
1752 return count;
1753}
1754
1755/* The arguments passed to iterate_over_lwps. */
1756
1757struct iterate_over_lwps_args
1758{
1759 /* The FILTER argument passed to iterate_over_lwps. */
1760 ptid_t filter;
1761
1762 /* The CALLBACK argument passed to iterate_over_lwps. */
1763 iterate_over_lwps_ftype *callback;
1764
1765 /* The DATA argument passed to iterate_over_lwps. */
1766 void *data;
1767};
1768
1769/* Callback for find_inferior used by iterate_over_lwps to filter
1770 calls to the callback supplied to that function. Returning a
1771 nonzero value causes find_inferiors to stop iterating and return
1772 the current inferior_list_entry. Returning zero indicates that
1773 find_inferiors should continue iterating. */
1774
1775static int
1776iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1777{
1778 struct iterate_over_lwps_args *args
1779 = (struct iterate_over_lwps_args *) args_p;
1780
1781 if (ptid_match (entry->id, args->filter))
1782 {
1783 struct thread_info *thr = (struct thread_info *) entry;
1784 struct lwp_info *lwp = get_thread_lwp (thr);
1785
1786 return (*args->callback) (lwp, args->data);
1787 }
1788
1789 return 0;
1790}
1791
1792/* See nat/linux-nat.h. */
1793
1794struct lwp_info *
1795iterate_over_lwps (ptid_t filter,
1796 iterate_over_lwps_ftype callback,
1797 void *data)
1798{
1799 struct iterate_over_lwps_args args = {filter, callback, data};
1800 struct inferior_list_entry *entry;
1801
1802 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1803 if (entry == NULL)
1804 return NULL;
1805
1806 return get_thread_lwp ((struct thread_info *) entry);
1807}
1808
1809/* Detect zombie thread group leaders, and "exit" them. We can't reap
1810 their exits until all other threads in the group have exited. */
1811
1812static void
1813check_zombie_leaders (void)
1814{
1815 struct process_info *proc, *tmp;
1816
1817 ALL_PROCESSES (proc, tmp)
1818 {
1819 pid_t leader_pid = pid_of (proc);
1820 struct lwp_info *leader_lp;
1821
1822 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1823
1824 if (debug_threads)
1825 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1826 "num_lwps=%d, zombie=%d\n",
1827 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1828 linux_proc_pid_is_zombie (leader_pid));
1829
1830 if (leader_lp != NULL && !leader_lp->stopped
1831 /* Check if there are other threads in the group, as we may
1832 have raced with the inferior simply exiting. */
1833 && !last_thread_of_process_p (leader_pid)
1834 && linux_proc_pid_is_zombie (leader_pid))
1835 {
1836 /* A leader zombie can mean one of two things:
1837
1838 - It exited, and there's an exit status pending
1839 available, or only the leader exited (not the whole
1840 program). In the latter case, we can't waitpid the
1841 leader's exit status until all other threads are gone.
1842
1843 - There are 3 or more threads in the group, and a thread
1844 other than the leader exec'd. On an exec, the Linux
1845 kernel destroys all other threads (except the execing
1846 one) in the thread group, and resets the execing thread's
1847 tid to the tgid. No exit notification is sent for the
1848 execing thread -- from the ptracer's perspective, it
1849 appears as though the execing thread just vanishes.
1850 Until we reap all other threads except the leader and the
1851 execing thread, the leader will be zombie, and the
1852 execing thread will be in `D (disc sleep)'. As soon as
1853 all other threads are reaped, the execing thread changes
1854 it's tid to the tgid, and the previous (zombie) leader
1855 vanishes, giving place to the "new" leader. We could try
1856 distinguishing the exit and exec cases, by waiting once
1857 more, and seeing if something comes out, but it doesn't
1858 sound useful. The previous leader _does_ go away, and
1859 we'll re-add the new one once we see the exec event
1860 (which is just the same as what would happen if the
1861 previous leader did exit voluntarily before some other
1862 thread execs). */
1863
1864 if (debug_threads)
1865 fprintf (stderr,
1866 "CZL: Thread group leader %d zombie "
1867 "(it exited, or another thread execd).\n",
1868 leader_pid);
1869
1870 delete_lwp (leader_lp);
1871 }
1872 }
1873}
1874
1875/* Callback for `find_inferior'. Returns the first LWP that is not
1876 stopped. ARG is a PTID filter. */
1877
1878static int
1879not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1880{
1881 struct thread_info *thr = (struct thread_info *) entry;
1882 struct lwp_info *lwp;
1883 ptid_t filter = *(ptid_t *) arg;
1884
1885 if (!ptid_match (ptid_of (thr), filter))
1886 return 0;
1887
1888 lwp = get_thread_lwp (thr);
1889 if (!lwp->stopped)
1890 return 1;
1891
1892 return 0;
1893}
1894
1895/* Increment LWP's suspend count. */
1896
1897static void
1898lwp_suspended_inc (struct lwp_info *lwp)
1899{
1900 lwp->suspended++;
1901
1902 if (debug_threads && lwp->suspended > 4)
1903 {
1904 struct thread_info *thread = get_lwp_thread (lwp);
1905
1906 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1907 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1908 }
1909}
1910
1911/* Decrement LWP's suspend count. */
1912
1913static void
1914lwp_suspended_decr (struct lwp_info *lwp)
1915{
1916 lwp->suspended--;
1917
1918 if (lwp->suspended < 0)
1919 {
1920 struct thread_info *thread = get_lwp_thread (lwp);
1921
1922 internal_error (__FILE__, __LINE__,
1923 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1924 lwp->suspended);
1925 }
1926}
1927
1928/* This function should only be called if the LWP got a SIGTRAP.
1929
1930 Handle any tracepoint steps or hits. Return true if a tracepoint
1931 event was handled, 0 otherwise. */
1932
1933static int
1934handle_tracepoints (struct lwp_info *lwp)
1935{
1936 struct thread_info *tinfo = get_lwp_thread (lwp);
1937 int tpoint_related_event = 0;
1938
1939 gdb_assert (lwp->suspended == 0);
1940
1941 /* If this tracepoint hit causes a tracing stop, we'll immediately
1942 uninsert tracepoints. To do this, we temporarily pause all
1943 threads, unpatch away, and then unpause threads. We need to make
1944 sure the unpausing doesn't resume LWP too. */
1945 lwp_suspended_inc (lwp);
1946
1947 /* And we need to be sure that any all-threads-stopping doesn't try
1948 to move threads out of the jump pads, as it could deadlock the
1949 inferior (LWP could be in the jump pad, maybe even holding the
1950 lock.) */
1951
1952 /* Do any necessary step collect actions. */
1953 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1954
1955 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1956
1957 /* See if we just hit a tracepoint and do its main collect
1958 actions. */
1959 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1960
1961 lwp_suspended_decr (lwp);
1962
1963 gdb_assert (lwp->suspended == 0);
1964 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1965
1966 if (tpoint_related_event)
1967 {
1968 if (debug_threads)
1969 debug_printf ("got a tracepoint event\n");
1970 return 1;
1971 }
1972
1973 return 0;
1974}
1975
1976/* Convenience wrapper. Returns true if LWP is presently collecting a
1977 fast tracepoint. */
1978
1979static int
1980linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1981 struct fast_tpoint_collect_status *status)
1982{
1983 CORE_ADDR thread_area;
1984 struct thread_info *thread = get_lwp_thread (lwp);
1985
1986 if (the_low_target.get_thread_area == NULL)
1987 return 0;
1988
1989 /* Get the thread area address. This is used to recognize which
1990 thread is which when tracing with the in-process agent library.
1991 We don't read anything from the address, and treat it as opaque;
1992 it's the address itself that we assume is unique per-thread. */
1993 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1994 return 0;
1995
1996 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1997}
1998
1999/* The reason we resume in the caller, is because we want to be able
2000 to pass lwp->status_pending as WSTAT, and we need to clear
2001 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2002 refuses to resume. */
2003
2004static int
2005maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2006{
2007 struct thread_info *saved_thread;
2008
2009 saved_thread = current_thread;
2010 current_thread = get_lwp_thread (lwp);
2011
2012 if ((wstat == NULL
2013 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2014 && supports_fast_tracepoints ()
2015 && agent_loaded_p ())
2016 {
2017 struct fast_tpoint_collect_status status;
2018 int r;
2019
2020 if (debug_threads)
2021 debug_printf ("Checking whether LWP %ld needs to move out of the "
2022 "jump pad.\n",
2023 lwpid_of (current_thread));
2024
2025 r = linux_fast_tracepoint_collecting (lwp, &status);
2026
2027 if (wstat == NULL
2028 || (WSTOPSIG (*wstat) != SIGILL
2029 && WSTOPSIG (*wstat) != SIGFPE
2030 && WSTOPSIG (*wstat) != SIGSEGV
2031 && WSTOPSIG (*wstat) != SIGBUS))
2032 {
2033 lwp->collecting_fast_tracepoint = r;
2034
2035 if (r != 0)
2036 {
2037 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2038 {
2039 /* Haven't executed the original instruction yet.
2040 Set breakpoint there, and wait till it's hit,
2041 then single-step until exiting the jump pad. */
2042 lwp->exit_jump_pad_bkpt
2043 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2044 }
2045
2046 if (debug_threads)
2047 debug_printf ("Checking whether LWP %ld needs to move out of "
2048 "the jump pad...it does\n",
2049 lwpid_of (current_thread));
2050 current_thread = saved_thread;
2051
2052 return 1;
2053 }
2054 }
2055 else
2056 {
2057 /* If we get a synchronous signal while collecting, *and*
2058 while executing the (relocated) original instruction,
2059 reset the PC to point at the tpoint address, before
2060 reporting to GDB. Otherwise, it's an IPA lib bug: just
2061 report the signal to GDB, and pray for the best. */
2062
2063 lwp->collecting_fast_tracepoint = 0;
2064
2065 if (r != 0
2066 && (status.adjusted_insn_addr <= lwp->stop_pc
2067 && lwp->stop_pc < status.adjusted_insn_addr_end))
2068 {
2069 siginfo_t info;
2070 struct regcache *regcache;
2071
2072 /* The si_addr on a few signals references the address
2073 of the faulting instruction. Adjust that as
2074 well. */
2075 if ((WSTOPSIG (*wstat) == SIGILL
2076 || WSTOPSIG (*wstat) == SIGFPE
2077 || WSTOPSIG (*wstat) == SIGBUS
2078 || WSTOPSIG (*wstat) == SIGSEGV)
2079 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2080 (PTRACE_TYPE_ARG3) 0, &info) == 0
2081 /* Final check just to make sure we don't clobber
2082 the siginfo of non-kernel-sent signals. */
2083 && (uintptr_t) info.si_addr == lwp->stop_pc)
2084 {
2085 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2086 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2087 (PTRACE_TYPE_ARG3) 0, &info);
2088 }
2089
2090 regcache = get_thread_regcache (current_thread, 1);
2091 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2092 lwp->stop_pc = status.tpoint_addr;
2093
2094 /* Cancel any fast tracepoint lock this thread was
2095 holding. */
2096 force_unlock_trace_buffer ();
2097 }
2098
2099 if (lwp->exit_jump_pad_bkpt != NULL)
2100 {
2101 if (debug_threads)
2102 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2103 "stopping all threads momentarily.\n");
2104
2105 stop_all_lwps (1, lwp);
2106
2107 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2108 lwp->exit_jump_pad_bkpt = NULL;
2109
2110 unstop_all_lwps (1, lwp);
2111
2112 gdb_assert (lwp->suspended >= 0);
2113 }
2114 }
2115 }
2116
2117 if (debug_threads)
2118 debug_printf ("Checking whether LWP %ld needs to move out of the "
2119 "jump pad...no\n",
2120 lwpid_of (current_thread));
2121
2122 current_thread = saved_thread;
2123 return 0;
2124}
2125
2126/* Enqueue one signal in the "signals to report later when out of the
2127 jump pad" list. */
2128
2129static void
2130enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2131{
2132 struct pending_signals *p_sig;
2133 struct thread_info *thread = get_lwp_thread (lwp);
2134
2135 if (debug_threads)
2136 debug_printf ("Deferring signal %d for LWP %ld.\n",
2137 WSTOPSIG (*wstat), lwpid_of (thread));
2138
2139 if (debug_threads)
2140 {
2141 struct pending_signals *sig;
2142
2143 for (sig = lwp->pending_signals_to_report;
2144 sig != NULL;
2145 sig = sig->prev)
2146 debug_printf (" Already queued %d\n",
2147 sig->signal);
2148
2149 debug_printf (" (no more currently queued signals)\n");
2150 }
2151
2152 /* Don't enqueue non-RT signals if they are already in the deferred
2153 queue. (SIGSTOP being the easiest signal to see ending up here
2154 twice) */
2155 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2156 {
2157 struct pending_signals *sig;
2158
2159 for (sig = lwp->pending_signals_to_report;
2160 sig != NULL;
2161 sig = sig->prev)
2162 {
2163 if (sig->signal == WSTOPSIG (*wstat))
2164 {
2165 if (debug_threads)
2166 debug_printf ("Not requeuing already queued non-RT signal %d"
2167 " for LWP %ld\n",
2168 sig->signal,
2169 lwpid_of (thread));
2170 return;
2171 }
2172 }
2173 }
2174
2175 p_sig = XCNEW (struct pending_signals);
2176 p_sig->prev = lwp->pending_signals_to_report;
2177 p_sig->signal = WSTOPSIG (*wstat);
2178
2179 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2180 &p_sig->info);
2181
2182 lwp->pending_signals_to_report = p_sig;
2183}
2184
2185/* Dequeue one signal from the "signals to report later when out of
2186 the jump pad" list. */
2187
2188static int
2189dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2190{
2191 struct thread_info *thread = get_lwp_thread (lwp);
2192
2193 if (lwp->pending_signals_to_report != NULL)
2194 {
2195 struct pending_signals **p_sig;
2196
2197 p_sig = &lwp->pending_signals_to_report;
2198 while ((*p_sig)->prev != NULL)
2199 p_sig = &(*p_sig)->prev;
2200
2201 *wstat = W_STOPCODE ((*p_sig)->signal);
2202 if ((*p_sig)->info.si_signo != 0)
2203 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2204 &(*p_sig)->info);
2205 free (*p_sig);
2206 *p_sig = NULL;
2207
2208 if (debug_threads)
2209 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2210 WSTOPSIG (*wstat), lwpid_of (thread));
2211
2212 if (debug_threads)
2213 {
2214 struct pending_signals *sig;
2215
2216 for (sig = lwp->pending_signals_to_report;
2217 sig != NULL;
2218 sig = sig->prev)
2219 debug_printf (" Still queued %d\n",
2220 sig->signal);
2221
2222 debug_printf (" (no more queued signals)\n");
2223 }
2224
2225 return 1;
2226 }
2227
2228 return 0;
2229}
2230
2231/* Fetch the possibly triggered data watchpoint info and store it in
2232 CHILD.
2233
2234 On some archs, like x86, that use debug registers to set
2235 watchpoints, it's possible that the way to know which watched
2236 address trapped, is to check the register that is used to select
2237 which address to watch. Problem is, between setting the watchpoint
2238 and reading back which data address trapped, the user may change
2239 the set of watchpoints, and, as a consequence, GDB changes the
2240 debug registers in the inferior. To avoid reading back a stale
2241 stopped-data-address when that happens, we cache in LP the fact
2242 that a watchpoint trapped, and the corresponding data address, as
2243 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2244 registers meanwhile, we have the cached data we can rely on. */
2245
2246static int
2247check_stopped_by_watchpoint (struct lwp_info *child)
2248{
2249 if (the_low_target.stopped_by_watchpoint != NULL)
2250 {
2251 struct thread_info *saved_thread;
2252
2253 saved_thread = current_thread;
2254 current_thread = get_lwp_thread (child);
2255
2256 if (the_low_target.stopped_by_watchpoint ())
2257 {
2258 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2259
2260 if (the_low_target.stopped_data_address != NULL)
2261 child->stopped_data_address
2262 = the_low_target.stopped_data_address ();
2263 else
2264 child->stopped_data_address = 0;
2265 }
2266
2267 current_thread = saved_thread;
2268 }
2269
2270 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2271}
2272
2273/* Return the ptrace options that we want to try to enable. */
2274
2275static int
2276linux_low_ptrace_options (int attached)
2277{
2278 int options = 0;
2279
2280 if (!attached)
2281 options |= PTRACE_O_EXITKILL;
2282
2283 if (report_fork_events)
2284 options |= PTRACE_O_TRACEFORK;
2285
2286 if (report_vfork_events)
2287 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2288
2289 if (report_exec_events)
2290 options |= PTRACE_O_TRACEEXEC;
2291
2292 options |= PTRACE_O_TRACESYSGOOD;
2293
2294 return options;
2295}
2296
2297/* Do low-level handling of the event, and check if we should go on
2298 and pass it to caller code. Return the affected lwp if we are, or
2299 NULL otherwise. */
2300
2301static struct lwp_info *
2302linux_low_filter_event (int lwpid, int wstat)
2303{
2304 struct lwp_info *child;
2305 struct thread_info *thread;
2306 int have_stop_pc = 0;
2307
2308 child = find_lwp_pid (pid_to_ptid (lwpid));
2309
2310 /* Check for stop events reported by a process we didn't already
2311 know about - anything not already in our LWP list.
2312
2313 If we're expecting to receive stopped processes after
2314 fork, vfork, and clone events, then we'll just add the
2315 new one to our list and go back to waiting for the event
2316 to be reported - the stopped process might be returned
2317 from waitpid before or after the event is.
2318
2319 But note the case of a non-leader thread exec'ing after the
2320 leader having exited, and gone from our lists (because
2321 check_zombie_leaders deleted it). The non-leader thread
2322 changes its tid to the tgid. */
2323
2324 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2325 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2326 {
2327 ptid_t child_ptid;
2328
2329 /* A multi-thread exec after we had seen the leader exiting. */
2330 if (debug_threads)
2331 {
2332 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2333 "after exec.\n", lwpid);
2334 }
2335
2336 child_ptid = ptid_build (lwpid, lwpid, 0);
2337 child = add_lwp (child_ptid);
2338 child->stopped = 1;
2339 current_thread = child->thread;
2340 }
2341
2342 /* If we didn't find a process, one of two things presumably happened:
2343 - A process we started and then detached from has exited. Ignore it.
2344 - A process we are controlling has forked and the new child's stop
2345 was reported to us by the kernel. Save its PID. */
2346 if (child == NULL && WIFSTOPPED (wstat))
2347 {
2348 add_to_pid_list (&stopped_pids, lwpid, wstat);
2349 return NULL;
2350 }
2351 else if (child == NULL)
2352 return NULL;
2353
2354 thread = get_lwp_thread (child);
2355
2356 child->stopped = 1;
2357
2358 child->last_status = wstat;
2359
2360 /* Check if the thread has exited. */
2361 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2362 {
2363 if (debug_threads)
2364 debug_printf ("LLFE: %d exited.\n", lwpid);
2365 /* If there is at least one more LWP, then the exit signal was
2366 not the end of the debugged application and should be
2367 ignored, unless GDB wants to hear about thread exits. */
2368 if (report_thread_events
2369 || last_thread_of_process_p (pid_of (thread)))
2370 {
2371 /* Since events are serialized to GDB core, and we can't
2372 report this one right now. Leave the status pending for
2373 the next time we're able to report it. */
2374 mark_lwp_dead (child, wstat);
2375 return child;
2376 }
2377 else
2378 {
2379 delete_lwp (child);
2380 return NULL;
2381 }
2382 }
2383
2384 gdb_assert (WIFSTOPPED (wstat));
2385
2386 if (WIFSTOPPED (wstat))
2387 {
2388 struct process_info *proc;
2389
2390 /* Architecture-specific setup after inferior is running. */
2391 proc = find_process_pid (pid_of (thread));
2392 if (proc->tdesc == NULL)
2393 {
2394 if (proc->attached)
2395 {
2396 /* This needs to happen after we have attached to the
2397 inferior and it is stopped for the first time, but
2398 before we access any inferior registers. */
2399 linux_arch_setup_thread (thread);
2400 }
2401 else
2402 {
2403 /* The process is started, but GDBserver will do
2404 architecture-specific setup after the program stops at
2405 the first instruction. */
2406 child->status_pending_p = 1;
2407 child->status_pending = wstat;
2408 return child;
2409 }
2410 }
2411 }
2412
2413 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2414 {
2415 struct process_info *proc = find_process_pid (pid_of (thread));
2416 int options = linux_low_ptrace_options (proc->attached);
2417
2418 linux_enable_event_reporting (lwpid, options);
2419 child->must_set_ptrace_flags = 0;
2420 }
2421
2422 /* Always update syscall_state, even if it will be filtered later. */
2423 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2424 {
2425 child->syscall_state
2426 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2427 ? TARGET_WAITKIND_SYSCALL_RETURN
2428 : TARGET_WAITKIND_SYSCALL_ENTRY);
2429 }
2430 else
2431 {
2432 /* Almost all other ptrace-stops are known to be outside of system
2433 calls, with further exceptions in handle_extended_wait. */
2434 child->syscall_state = TARGET_WAITKIND_IGNORE;
2435 }
2436
2437 /* Be careful to not overwrite stop_pc until
2438 check_stopped_by_breakpoint is called. */
2439 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2440 && linux_is_extended_waitstatus (wstat))
2441 {
2442 child->stop_pc = get_pc (child);
2443 if (handle_extended_wait (&child, wstat))
2444 {
2445 /* The event has been handled, so just return without
2446 reporting it. */
2447 return NULL;
2448 }
2449 }
2450
2451 /* Check first whether this was a SW/HW breakpoint before checking
2452 watchpoints, because at least s390 can't tell the data address of
2453 hardware watchpoint hits, and returns stopped-by-watchpoint as
2454 long as there's a watchpoint set. */
2455 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2456 {
2457 if (check_stopped_by_breakpoint (child))
2458 have_stop_pc = 1;
2459 }
2460
2461 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2462 or hardware watchpoint. Check which is which if we got
2463 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2464 stepped an instruction that triggered a watchpoint. In that
2465 case, on some architectures (such as x86), instead of
2466 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2467 the debug registers separately. */
2468 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2469 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2470 check_stopped_by_watchpoint (child);
2471
2472 if (!have_stop_pc)
2473 child->stop_pc = get_pc (child);
2474
2475 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2476 && child->stop_expected)
2477 {
2478 if (debug_threads)
2479 debug_printf ("Expected stop.\n");
2480 child->stop_expected = 0;
2481
2482 if (thread->last_resume_kind == resume_stop)
2483 {
2484 /* We want to report the stop to the core. Treat the
2485 SIGSTOP as a normal event. */
2486 if (debug_threads)
2487 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2488 target_pid_to_str (ptid_of (thread)));
2489 }
2490 else if (stopping_threads != NOT_STOPPING_THREADS)
2491 {
2492 /* Stopping threads. We don't want this SIGSTOP to end up
2493 pending. */
2494 if (debug_threads)
2495 debug_printf ("LLW: SIGSTOP caught for %s "
2496 "while stopping threads.\n",
2497 target_pid_to_str (ptid_of (thread)));
2498 return NULL;
2499 }
2500 else
2501 {
2502 /* This is a delayed SIGSTOP. Filter out the event. */
2503 if (debug_threads)
2504 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2505 child->stepping ? "step" : "continue",
2506 target_pid_to_str (ptid_of (thread)));
2507
2508 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2509 return NULL;
2510 }
2511 }
2512
2513 child->status_pending_p = 1;
2514 child->status_pending = wstat;
2515 return child;
2516}
2517
2518/* Resume LWPs that are currently stopped without any pending status
2519 to report, but are resumed from the core's perspective. */
2520
2521static void
2522resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2523{
2524 struct thread_info *thread = (struct thread_info *) entry;
2525 struct lwp_info *lp = get_thread_lwp (thread);
2526
2527 if (lp->stopped
2528 && !lp->suspended
2529 && !lp->status_pending_p
2530 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2531 {
2532 int step = thread->last_resume_kind == resume_step;
2533
2534 if (debug_threads)
2535 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2536 target_pid_to_str (ptid_of (thread)),
2537 paddress (lp->stop_pc),
2538 step);
2539
2540 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2541 }
2542}
2543
2544/* Wait for an event from child(ren) WAIT_PTID, and return any that
2545 match FILTER_PTID (leaving others pending). The PTIDs can be:
2546 minus_one_ptid, to specify any child; a pid PTID, specifying all
2547 lwps of a thread group; or a PTID representing a single lwp. Store
2548 the stop status through the status pointer WSTAT. OPTIONS is
2549 passed to the waitpid call. Return 0 if no event was found and
2550 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2551 was found. Return the PID of the stopped child otherwise. */
2552
2553static int
2554linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2555 int *wstatp, int options)
2556{
2557 struct thread_info *event_thread;
2558 struct lwp_info *event_child, *requested_child;
2559 sigset_t block_mask, prev_mask;
2560
2561 retry:
2562 /* N.B. event_thread points to the thread_info struct that contains
2563 event_child. Keep them in sync. */
2564 event_thread = NULL;
2565 event_child = NULL;
2566 requested_child = NULL;
2567
2568 /* Check for a lwp with a pending status. */
2569
2570 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2571 {
2572 event_thread = (struct thread_info *)
2573 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2574 if (event_thread != NULL)
2575 event_child = get_thread_lwp (event_thread);
2576 if (debug_threads && event_thread)
2577 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2578 }
2579 else if (!ptid_equal (filter_ptid, null_ptid))
2580 {
2581 requested_child = find_lwp_pid (filter_ptid);
2582
2583 if (stopping_threads == NOT_STOPPING_THREADS
2584 && requested_child->status_pending_p
2585 && requested_child->collecting_fast_tracepoint)
2586 {
2587 enqueue_one_deferred_signal (requested_child,
2588 &requested_child->status_pending);
2589 requested_child->status_pending_p = 0;
2590 requested_child->status_pending = 0;
2591 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2592 }
2593
2594 if (requested_child->suspended
2595 && requested_child->status_pending_p)
2596 {
2597 internal_error (__FILE__, __LINE__,
2598 "requesting an event out of a"
2599 " suspended child?");
2600 }
2601
2602 if (requested_child->status_pending_p)
2603 {
2604 event_child = requested_child;
2605 event_thread = get_lwp_thread (event_child);
2606 }
2607 }
2608
2609 if (event_child != NULL)
2610 {
2611 if (debug_threads)
2612 debug_printf ("Got an event from pending child %ld (%04x)\n",
2613 lwpid_of (event_thread), event_child->status_pending);
2614 *wstatp = event_child->status_pending;
2615 event_child->status_pending_p = 0;
2616 event_child->status_pending = 0;
2617 current_thread = event_thread;
2618 return lwpid_of (event_thread);
2619 }
2620
2621 /* But if we don't find a pending event, we'll have to wait.
2622
2623 We only enter this loop if no process has a pending wait status.
2624 Thus any action taken in response to a wait status inside this
2625 loop is responding as soon as we detect the status, not after any
2626 pending events. */
2627
2628 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2629 all signals while here. */
2630 sigfillset (&block_mask);
2631 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2632
2633 /* Always pull all events out of the kernel. We'll randomly select
2634 an event LWP out of all that have events, to prevent
2635 starvation. */
2636 while (event_child == NULL)
2637 {
2638 pid_t ret = 0;
2639
2640 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2641 quirks:
2642
2643 - If the thread group leader exits while other threads in the
2644 thread group still exist, waitpid(TGID, ...) hangs. That
2645 waitpid won't return an exit status until the other threads
2646 in the group are reaped.
2647
2648 - When a non-leader thread execs, that thread just vanishes
2649 without reporting an exit (so we'd hang if we waited for it
2650 explicitly in that case). The exec event is reported to
2651 the TGID pid. */
2652 errno = 0;
2653 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2654
2655 if (debug_threads)
2656 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2657 ret, errno ? strerror (errno) : "ERRNO-OK");
2658
2659 if (ret > 0)
2660 {
2661 if (debug_threads)
2662 {
2663 debug_printf ("LLW: waitpid %ld received %s\n",
2664 (long) ret, status_to_str (*wstatp));
2665 }
2666
2667 /* Filter all events. IOW, leave all events pending. We'll
2668 randomly select an event LWP out of all that have events
2669 below. */
2670 linux_low_filter_event (ret, *wstatp);
2671 /* Retry until nothing comes out of waitpid. A single
2672 SIGCHLD can indicate more than one child stopped. */
2673 continue;
2674 }
2675
2676 /* Now that we've pulled all events out of the kernel, resume
2677 LWPs that don't have an interesting event to report. */
2678 if (stopping_threads == NOT_STOPPING_THREADS)
2679 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2680
2681 /* ... and find an LWP with a status to report to the core, if
2682 any. */
2683 event_thread = (struct thread_info *)
2684 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2685 if (event_thread != NULL)
2686 {
2687 event_child = get_thread_lwp (event_thread);
2688 *wstatp = event_child->status_pending;
2689 event_child->status_pending_p = 0;
2690 event_child->status_pending = 0;
2691 break;
2692 }
2693
2694 /* Check for zombie thread group leaders. Those can't be reaped
2695 until all other threads in the thread group are. */
2696 check_zombie_leaders ();
2697
2698 /* If there are no resumed children left in the set of LWPs we
2699 want to wait for, bail. We can't just block in
2700 waitpid/sigsuspend, because lwps might have been left stopped
2701 in trace-stop state, and we'd be stuck forever waiting for
2702 their status to change (which would only happen if we resumed
2703 them). Even if WNOHANG is set, this return code is preferred
2704 over 0 (below), as it is more detailed. */
2705 if ((find_inferior (&all_threads,
2706 not_stopped_callback,
2707 &wait_ptid) == NULL))
2708 {
2709 if (debug_threads)
2710 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2711 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2712 return -1;
2713 }
2714
2715 /* No interesting event to report to the caller. */
2716 if ((options & WNOHANG))
2717 {
2718 if (debug_threads)
2719 debug_printf ("WNOHANG set, no event found\n");
2720
2721 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2722 return 0;
2723 }
2724
2725 /* Block until we get an event reported with SIGCHLD. */
2726 if (debug_threads)
2727 debug_printf ("sigsuspend'ing\n");
2728
2729 sigsuspend (&prev_mask);
2730 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2731 goto retry;
2732 }
2733
2734 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2735
2736 current_thread = event_thread;
2737
2738 return lwpid_of (event_thread);
2739}
2740
2741/* Wait for an event from child(ren) PTID. PTIDs can be:
2742 minus_one_ptid, to specify any child; a pid PTID, specifying all
2743 lwps of a thread group; or a PTID representing a single lwp. Store
2744 the stop status through the status pointer WSTAT. OPTIONS is
2745 passed to the waitpid call. Return 0 if no event was found and
2746 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2747 was found. Return the PID of the stopped child otherwise. */
2748
2749static int
2750linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2751{
2752 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2753}
2754
2755/* Count the LWP's that have had events. */
2756
2757static int
2758count_events_callback (struct inferior_list_entry *entry, void *data)
2759{
2760 struct thread_info *thread = (struct thread_info *) entry;
2761 struct lwp_info *lp = get_thread_lwp (thread);
2762 int *count = (int *) data;
2763
2764 gdb_assert (count != NULL);
2765
2766 /* Count only resumed LWPs that have an event pending. */
2767 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2768 && lp->status_pending_p)
2769 (*count)++;
2770
2771 return 0;
2772}
2773
2774/* Select the LWP (if any) that is currently being single-stepped. */
2775
2776static int
2777select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2778{
2779 struct thread_info *thread = (struct thread_info *) entry;
2780 struct lwp_info *lp = get_thread_lwp (thread);
2781
2782 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2783 && thread->last_resume_kind == resume_step
2784 && lp->status_pending_p)
2785 return 1;
2786 else
2787 return 0;
2788}
2789
2790/* Select the Nth LWP that has had an event. */
2791
2792static int
2793select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2794{
2795 struct thread_info *thread = (struct thread_info *) entry;
2796 struct lwp_info *lp = get_thread_lwp (thread);
2797 int *selector = (int *) data;
2798
2799 gdb_assert (selector != NULL);
2800
2801 /* Select only resumed LWPs that have an event pending. */
2802 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2803 && lp->status_pending_p)
2804 if ((*selector)-- == 0)
2805 return 1;
2806
2807 return 0;
2808}
2809
2810/* Select one LWP out of those that have events pending. */
2811
2812static void
2813select_event_lwp (struct lwp_info **orig_lp)
2814{
2815 int num_events = 0;
2816 int random_selector;
2817 struct thread_info *event_thread = NULL;
2818
2819 /* In all-stop, give preference to the LWP that is being
2820 single-stepped. There will be at most one, and it's the LWP that
2821 the core is most interested in. If we didn't do this, then we'd
2822 have to handle pending step SIGTRAPs somehow in case the core
2823 later continues the previously-stepped thread, otherwise we'd
2824 report the pending SIGTRAP, and the core, not having stepped the
2825 thread, wouldn't understand what the trap was for, and therefore
2826 would report it to the user as a random signal. */
2827 if (!non_stop)
2828 {
2829 event_thread
2830 = (struct thread_info *) find_inferior (&all_threads,
2831 select_singlestep_lwp_callback,
2832 NULL);
2833 if (event_thread != NULL)
2834 {
2835 if (debug_threads)
2836 debug_printf ("SEL: Select single-step %s\n",
2837 target_pid_to_str (ptid_of (event_thread)));
2838 }
2839 }
2840 if (event_thread == NULL)
2841 {
2842 /* No single-stepping LWP. Select one at random, out of those
2843 which have had events. */
2844
2845 /* First see how many events we have. */
2846 find_inferior (&all_threads, count_events_callback, &num_events);
2847 gdb_assert (num_events > 0);
2848
2849 /* Now randomly pick a LWP out of those that have had
2850 events. */
2851 random_selector = (int)
2852 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2853
2854 if (debug_threads && num_events > 1)
2855 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2856 num_events, random_selector);
2857
2858 event_thread
2859 = (struct thread_info *) find_inferior (&all_threads,
2860 select_event_lwp_callback,
2861 &random_selector);
2862 }
2863
2864 if (event_thread != NULL)
2865 {
2866 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2867
2868 /* Switch the event LWP. */
2869 *orig_lp = event_lp;
2870 }
2871}
2872
2873/* Decrement the suspend count of an LWP. */
2874
2875static int
2876unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2877{
2878 struct thread_info *thread = (struct thread_info *) entry;
2879 struct lwp_info *lwp = get_thread_lwp (thread);
2880
2881 /* Ignore EXCEPT. */
2882 if (lwp == except)
2883 return 0;
2884
2885 lwp_suspended_decr (lwp);
2886 return 0;
2887}
2888
2889/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2890 NULL. */
2891
2892static void
2893unsuspend_all_lwps (struct lwp_info *except)
2894{
2895 find_inferior (&all_threads, unsuspend_one_lwp, except);
2896}
2897
2898static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2899static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2900 void *data);
2901static int lwp_running (struct inferior_list_entry *entry, void *data);
2902static ptid_t linux_wait_1 (ptid_t ptid,
2903 struct target_waitstatus *ourstatus,
2904 int target_options);
2905
2906/* Stabilize threads (move out of jump pads).
2907
2908 If a thread is midway collecting a fast tracepoint, we need to
2909 finish the collection and move it out of the jump pad before
2910 reporting the signal.
2911
2912 This avoids recursion while collecting (when a signal arrives
2913 midway, and the signal handler itself collects), which would trash
2914 the trace buffer. In case the user set a breakpoint in a signal
2915 handler, this avoids the backtrace showing the jump pad, etc..
2916 Most importantly, there are certain things we can't do safely if
2917 threads are stopped in a jump pad (or in its callee's). For
2918 example:
2919
2920 - starting a new trace run. A thread still collecting the
2921 previous run, could trash the trace buffer when resumed. The trace
2922 buffer control structures would have been reset but the thread had
2923 no way to tell. The thread could even midway memcpy'ing to the
2924 buffer, which would mean that when resumed, it would clobber the
2925 trace buffer that had been set for a new run.
2926
2927 - we can't rewrite/reuse the jump pads for new tracepoints
2928 safely. Say you do tstart while a thread is stopped midway while
2929 collecting. When the thread is later resumed, it finishes the
2930 collection, and returns to the jump pad, to execute the original
2931 instruction that was under the tracepoint jump at the time the
2932 older run had been started. If the jump pad had been rewritten
2933 since for something else in the new run, the thread would now
2934 execute the wrong / random instructions. */
2935
2936static void
2937linux_stabilize_threads (void)
2938{
2939 struct thread_info *saved_thread;
2940 struct thread_info *thread_stuck;
2941
2942 thread_stuck
2943 = (struct thread_info *) find_inferior (&all_threads,
2944 stuck_in_jump_pad_callback,
2945 NULL);
2946 if (thread_stuck != NULL)
2947 {
2948 if (debug_threads)
2949 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2950 lwpid_of (thread_stuck));
2951 return;
2952 }
2953
2954 saved_thread = current_thread;
2955
2956 stabilizing_threads = 1;
2957
2958 /* Kick 'em all. */
2959 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2960
2961 /* Loop until all are stopped out of the jump pads. */
2962 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2963 {
2964 struct target_waitstatus ourstatus;
2965 struct lwp_info *lwp;
2966 int wstat;
2967
2968 /* Note that we go through the full wait even loop. While
2969 moving threads out of jump pad, we need to be able to step
2970 over internal breakpoints and such. */
2971 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2972
2973 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2974 {
2975 lwp = get_thread_lwp (current_thread);
2976
2977 /* Lock it. */
2978 lwp_suspended_inc (lwp);
2979
2980 if (ourstatus.value.sig != GDB_SIGNAL_0
2981 || current_thread->last_resume_kind == resume_stop)
2982 {
2983 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2984 enqueue_one_deferred_signal (lwp, &wstat);
2985 }
2986 }
2987 }
2988
2989 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2990
2991 stabilizing_threads = 0;
2992
2993 current_thread = saved_thread;
2994
2995 if (debug_threads)
2996 {
2997 thread_stuck
2998 = (struct thread_info *) find_inferior (&all_threads,
2999 stuck_in_jump_pad_callback,
3000 NULL);
3001 if (thread_stuck != NULL)
3002 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3003 lwpid_of (thread_stuck));
3004 }
3005}
3006
3007/* Convenience function that is called when the kernel reports an
3008 event that is not passed out to GDB. */
3009
3010static ptid_t
3011ignore_event (struct target_waitstatus *ourstatus)
3012{
3013 /* If we got an event, there may still be others, as a single
3014 SIGCHLD can indicate more than one child stopped. This forces
3015 another target_wait call. */
3016 async_file_mark ();
3017
3018 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3019 return null_ptid;
3020}
3021
3022/* Convenience function that is called when the kernel reports an exit
3023 event. This decides whether to report the event to GDB as a
3024 process exit event, a thread exit event, or to suppress the
3025 event. */
3026
3027static ptid_t
3028filter_exit_event (struct lwp_info *event_child,
3029 struct target_waitstatus *ourstatus)
3030{
3031 struct thread_info *thread = get_lwp_thread (event_child);
3032 ptid_t ptid = ptid_of (thread);
3033
3034 if (!last_thread_of_process_p (pid_of (thread)))
3035 {
3036 if (report_thread_events)
3037 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3038 else
3039 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3040
3041 delete_lwp (event_child);
3042 }
3043 return ptid;
3044}
3045
3046/* Returns 1 if GDB is interested in any event_child syscalls. */
3047
3048static int
3049gdb_catching_syscalls_p (struct lwp_info *event_child)
3050{
3051 struct thread_info *thread = get_lwp_thread (event_child);
3052 struct process_info *proc = get_thread_process (thread);
3053
3054 return !VEC_empty (int, proc->syscalls_to_catch);
3055}
3056
3057/* Returns 1 if GDB is interested in the event_child syscall.
3058 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3059
3060static int
3061gdb_catch_this_syscall_p (struct lwp_info *event_child)
3062{
3063 int i, iter;
3064 int sysno, sysret;
3065 struct thread_info *thread = get_lwp_thread (event_child);
3066 struct process_info *proc = get_thread_process (thread);
3067
3068 if (VEC_empty (int, proc->syscalls_to_catch))
3069 return 0;
3070
3071 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3072 return 1;
3073
3074 get_syscall_trapinfo (event_child, &sysno, &sysret);
3075 for (i = 0;
3076 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3077 i++)
3078 if (iter == sysno)
3079 return 1;
3080
3081 return 0;
3082}
3083
3084/* Wait for process, returns status. */
3085
3086static ptid_t
3087linux_wait_1 (ptid_t ptid,
3088 struct target_waitstatus *ourstatus, int target_options)
3089{
3090 int w;
3091 struct lwp_info *event_child;
3092 int options;
3093 int pid;
3094 int step_over_finished;
3095 int bp_explains_trap;
3096 int maybe_internal_trap;
3097 int report_to_gdb;
3098 int trace_event;
3099 int in_step_range;
3100 int any_resumed;
3101
3102 if (debug_threads)
3103 {
3104 debug_enter ();
3105 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3106 }
3107
3108 /* Translate generic target options into linux options. */
3109 options = __WALL;
3110 if (target_options & TARGET_WNOHANG)
3111 options |= WNOHANG;
3112
3113 bp_explains_trap = 0;
3114 trace_event = 0;
3115 in_step_range = 0;
3116 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3117
3118 /* Find a resumed LWP, if any. */
3119 if (find_inferior (&all_threads,
3120 status_pending_p_callback,
3121 &minus_one_ptid) != NULL)
3122 any_resumed = 1;
3123 else if ((find_inferior (&all_threads,
3124 not_stopped_callback,
3125 &minus_one_ptid) != NULL))
3126 any_resumed = 1;
3127 else
3128 any_resumed = 0;
3129
3130 if (ptid_equal (step_over_bkpt, null_ptid))
3131 pid = linux_wait_for_event (ptid, &w, options);
3132 else
3133 {
3134 if (debug_threads)
3135 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3136 target_pid_to_str (step_over_bkpt));
3137 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3138 }
3139
3140 if (pid == 0 || (pid == -1 && !any_resumed))
3141 {
3142 gdb_assert (target_options & TARGET_WNOHANG);
3143
3144 if (debug_threads)
3145 {
3146 debug_printf ("linux_wait_1 ret = null_ptid, "
3147 "TARGET_WAITKIND_IGNORE\n");
3148 debug_exit ();
3149 }
3150
3151 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3152 return null_ptid;
3153 }
3154 else if (pid == -1)
3155 {
3156 if (debug_threads)
3157 {
3158 debug_printf ("linux_wait_1 ret = null_ptid, "
3159 "TARGET_WAITKIND_NO_RESUMED\n");
3160 debug_exit ();
3161 }
3162
3163 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3164 return null_ptid;
3165 }
3166
3167 event_child = get_thread_lwp (current_thread);
3168
3169 /* linux_wait_for_event only returns an exit status for the last
3170 child of a process. Report it. */
3171 if (WIFEXITED (w) || WIFSIGNALED (w))
3172 {
3173 if (WIFEXITED (w))
3174 {
3175 ourstatus->kind = TARGET_WAITKIND_EXITED;
3176 ourstatus->value.integer = WEXITSTATUS (w);
3177
3178 if (debug_threads)
3179 {
3180 debug_printf ("linux_wait_1 ret = %s, exited with "
3181 "retcode %d\n",
3182 target_pid_to_str (ptid_of (current_thread)),
3183 WEXITSTATUS (w));
3184 debug_exit ();
3185 }
3186 }
3187 else
3188 {
3189 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3190 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3191
3192 if (debug_threads)
3193 {
3194 debug_printf ("linux_wait_1 ret = %s, terminated with "
3195 "signal %d\n",
3196 target_pid_to_str (ptid_of (current_thread)),
3197 WTERMSIG (w));
3198 debug_exit ();
3199 }
3200 }
3201
3202 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3203 return filter_exit_event (event_child, ourstatus);
3204
3205 return ptid_of (current_thread);
3206 }
3207
3208 /* If step-over executes a breakpoint instruction, in the case of a
3209 hardware single step it means a gdb/gdbserver breakpoint had been
3210 planted on top of a permanent breakpoint, in the case of a software
3211 single step it may just mean that gdbserver hit the reinsert breakpoint.
3212 The PC has been adjusted by check_stopped_by_breakpoint to point at
3213 the breakpoint address.
3214 So in the case of the hardware single step advance the PC manually
3215 past the breakpoint and in the case of software single step advance only
3216 if it's not the reinsert_breakpoint we are hitting.
3217 This avoids that a program would keep trapping a permanent breakpoint
3218 forever. */
3219 if (!ptid_equal (step_over_bkpt, null_ptid)
3220 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3221 && (event_child->stepping
3222 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3223 {
3224 int increment_pc = 0;
3225 int breakpoint_kind = 0;
3226 CORE_ADDR stop_pc = event_child->stop_pc;
3227
3228 breakpoint_kind =
3229 the_target->breakpoint_kind_from_current_state (&stop_pc);
3230 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3231
3232 if (debug_threads)
3233 {
3234 debug_printf ("step-over for %s executed software breakpoint\n",
3235 target_pid_to_str (ptid_of (current_thread)));
3236 }
3237
3238 if (increment_pc != 0)
3239 {
3240 struct regcache *regcache
3241 = get_thread_regcache (current_thread, 1);
3242
3243 event_child->stop_pc += increment_pc;
3244 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3245
3246 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3247 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3248 }
3249 }
3250
3251 /* If this event was not handled before, and is not a SIGTRAP, we
3252 report it. SIGILL and SIGSEGV are also treated as traps in case
3253 a breakpoint is inserted at the current PC. If this target does
3254 not support internal breakpoints at all, we also report the
3255 SIGTRAP without further processing; it's of no concern to us. */
3256 maybe_internal_trap
3257 = (supports_breakpoints ()
3258 && (WSTOPSIG (w) == SIGTRAP
3259 || ((WSTOPSIG (w) == SIGILL
3260 || WSTOPSIG (w) == SIGSEGV)
3261 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3262
3263 if (maybe_internal_trap)
3264 {
3265 /* Handle anything that requires bookkeeping before deciding to
3266 report the event or continue waiting. */
3267
3268 /* First check if we can explain the SIGTRAP with an internal
3269 breakpoint, or if we should possibly report the event to GDB.
3270 Do this before anything that may remove or insert a
3271 breakpoint. */
3272 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3273
3274 /* We have a SIGTRAP, possibly a step-over dance has just
3275 finished. If so, tweak the state machine accordingly,
3276 reinsert breakpoints and delete any reinsert (software
3277 single-step) breakpoints. */
3278 step_over_finished = finish_step_over (event_child);
3279
3280 /* Now invoke the callbacks of any internal breakpoints there. */
3281 check_breakpoints (event_child->stop_pc);
3282
3283 /* Handle tracepoint data collecting. This may overflow the
3284 trace buffer, and cause a tracing stop, removing
3285 breakpoints. */
3286 trace_event = handle_tracepoints (event_child);
3287
3288 if (bp_explains_trap)
3289 {
3290 /* If we stepped or ran into an internal breakpoint, we've
3291 already handled it. So next time we resume (from this
3292 PC), we should step over it. */
3293 if (debug_threads)
3294 debug_printf ("Hit a gdbserver breakpoint.\n");
3295
3296 if (breakpoint_here (event_child->stop_pc))
3297 event_child->need_step_over = 1;
3298 }
3299 }
3300 else
3301 {
3302 /* We have some other signal, possibly a step-over dance was in
3303 progress, and it should be cancelled too. */
3304 step_over_finished = finish_step_over (event_child);
3305 }
3306
3307 /* We have all the data we need. Either report the event to GDB, or
3308 resume threads and keep waiting for more. */
3309
3310 /* If we're collecting a fast tracepoint, finish the collection and
3311 move out of the jump pad before delivering a signal. See
3312 linux_stabilize_threads. */
3313
3314 if (WIFSTOPPED (w)
3315 && WSTOPSIG (w) != SIGTRAP
3316 && supports_fast_tracepoints ()
3317 && agent_loaded_p ())
3318 {
3319 if (debug_threads)
3320 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3321 "to defer or adjust it.\n",
3322 WSTOPSIG (w), lwpid_of (current_thread));
3323
3324 /* Allow debugging the jump pad itself. */
3325 if (current_thread->last_resume_kind != resume_step
3326 && maybe_move_out_of_jump_pad (event_child, &w))
3327 {
3328 enqueue_one_deferred_signal (event_child, &w);
3329
3330 if (debug_threads)
3331 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3332 WSTOPSIG (w), lwpid_of (current_thread));
3333
3334 linux_resume_one_lwp (event_child, 0, 0, NULL);
3335
3336 return ignore_event (ourstatus);
3337 }
3338 }
3339
3340 if (event_child->collecting_fast_tracepoint)
3341 {
3342 if (debug_threads)
3343 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3344 "Check if we're already there.\n",
3345 lwpid_of (current_thread),
3346 event_child->collecting_fast_tracepoint);
3347
3348 trace_event = 1;
3349
3350 event_child->collecting_fast_tracepoint
3351 = linux_fast_tracepoint_collecting (event_child, NULL);
3352
3353 if (event_child->collecting_fast_tracepoint != 1)
3354 {
3355 /* No longer need this breakpoint. */
3356 if (event_child->exit_jump_pad_bkpt != NULL)
3357 {
3358 if (debug_threads)
3359 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3360 "stopping all threads momentarily.\n");
3361
3362 /* Other running threads could hit this breakpoint.
3363 We don't handle moribund locations like GDB does,
3364 instead we always pause all threads when removing
3365 breakpoints, so that any step-over or
3366 decr_pc_after_break adjustment is always taken
3367 care of while the breakpoint is still
3368 inserted. */
3369 stop_all_lwps (1, event_child);
3370
3371 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3372 event_child->exit_jump_pad_bkpt = NULL;
3373
3374 unstop_all_lwps (1, event_child);
3375
3376 gdb_assert (event_child->suspended >= 0);
3377 }
3378 }
3379
3380 if (event_child->collecting_fast_tracepoint == 0)
3381 {
3382 if (debug_threads)
3383 debug_printf ("fast tracepoint finished "
3384 "collecting successfully.\n");
3385
3386 /* We may have a deferred signal to report. */
3387 if (dequeue_one_deferred_signal (event_child, &w))
3388 {
3389 if (debug_threads)
3390 debug_printf ("dequeued one signal.\n");
3391 }
3392 else
3393 {
3394 if (debug_threads)
3395 debug_printf ("no deferred signals.\n");
3396
3397 if (stabilizing_threads)
3398 {
3399 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3400 ourstatus->value.sig = GDB_SIGNAL_0;
3401
3402 if (debug_threads)
3403 {
3404 debug_printf ("linux_wait_1 ret = %s, stopped "
3405 "while stabilizing threads\n",
3406 target_pid_to_str (ptid_of (current_thread)));
3407 debug_exit ();
3408 }
3409
3410 return ptid_of (current_thread);
3411 }
3412 }
3413 }
3414 }
3415
3416 /* Check whether GDB would be interested in this event. */
3417
3418 /* Check if GDB is interested in this syscall. */
3419 if (WIFSTOPPED (w)
3420 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3421 && !gdb_catch_this_syscall_p (event_child))
3422 {
3423 if (debug_threads)
3424 {
3425 debug_printf ("Ignored syscall for LWP %ld.\n",
3426 lwpid_of (current_thread));
3427 }
3428
3429 linux_resume_one_lwp (event_child, event_child->stepping,
3430 0, NULL);
3431 return ignore_event (ourstatus);
3432 }
3433
3434 /* If GDB is not interested in this signal, don't stop other
3435 threads, and don't report it to GDB. Just resume the inferior
3436 right away. We do this for threading-related signals as well as
3437 any that GDB specifically requested we ignore. But never ignore
3438 SIGSTOP if we sent it ourselves, and do not ignore signals when
3439 stepping - they may require special handling to skip the signal
3440 handler. Also never ignore signals that could be caused by a
3441 breakpoint. */
3442 if (WIFSTOPPED (w)
3443 && current_thread->last_resume_kind != resume_step
3444 && (
3445#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3446 (current_process ()->priv->thread_db != NULL
3447 && (WSTOPSIG (w) == __SIGRTMIN
3448 || WSTOPSIG (w) == __SIGRTMIN + 1))
3449 ||
3450#endif
3451 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3452 && !(WSTOPSIG (w) == SIGSTOP
3453 && current_thread->last_resume_kind == resume_stop)
3454 && !linux_wstatus_maybe_breakpoint (w))))
3455 {
3456 siginfo_t info, *info_p;
3457
3458 if (debug_threads)
3459 debug_printf ("Ignored signal %d for LWP %ld.\n",
3460 WSTOPSIG (w), lwpid_of (current_thread));
3461
3462 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3463 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3464 info_p = &info;
3465 else
3466 info_p = NULL;
3467
3468 if (step_over_finished)
3469 {
3470 /* We cancelled this thread's step-over above. We still
3471 need to unsuspend all other LWPs, and set them back
3472 running again while the signal handler runs. */
3473 unsuspend_all_lwps (event_child);
3474
3475 /* Enqueue the pending signal info so that proceed_all_lwps
3476 doesn't lose it. */
3477 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3478
3479 proceed_all_lwps ();
3480 }
3481 else
3482 {
3483 linux_resume_one_lwp (event_child, event_child->stepping,
3484 WSTOPSIG (w), info_p);
3485 }
3486 return ignore_event (ourstatus);
3487 }
3488
3489 /* Note that all addresses are always "out of the step range" when
3490 there's no range to begin with. */
3491 in_step_range = lwp_in_step_range (event_child);
3492
3493 /* If GDB wanted this thread to single step, and the thread is out
3494 of the step range, we always want to report the SIGTRAP, and let
3495 GDB handle it. Watchpoints should always be reported. So should
3496 signals we can't explain. A SIGTRAP we can't explain could be a
3497 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3498 do, we're be able to handle GDB breakpoints on top of internal
3499 breakpoints, by handling the internal breakpoint and still
3500 reporting the event to GDB. If we don't, we're out of luck, GDB
3501 won't see the breakpoint hit. If we see a single-step event but
3502 the thread should be continuing, don't pass the trap to gdb.
3503 That indicates that we had previously finished a single-step but
3504 left the single-step pending -- see
3505 complete_ongoing_step_over. */
3506 report_to_gdb = (!maybe_internal_trap
3507 || (current_thread->last_resume_kind == resume_step
3508 && !in_step_range)
3509 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3510 || (!in_step_range
3511 && !bp_explains_trap
3512 && !trace_event
3513 && !step_over_finished
3514 && !(current_thread->last_resume_kind == resume_continue
3515 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3516 || (gdb_breakpoint_here (event_child->stop_pc)
3517 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3518 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3519 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3520
3521 run_breakpoint_commands (event_child->stop_pc);
3522
3523 /* We found no reason GDB would want us to stop. We either hit one
3524 of our own breakpoints, or finished an internal step GDB
3525 shouldn't know about. */
3526 if (!report_to_gdb)
3527 {
3528 if (debug_threads)
3529 {
3530 if (bp_explains_trap)
3531 debug_printf ("Hit a gdbserver breakpoint.\n");
3532 if (step_over_finished)
3533 debug_printf ("Step-over finished.\n");
3534 if (trace_event)
3535 debug_printf ("Tracepoint event.\n");
3536 if (lwp_in_step_range (event_child))
3537 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3538 paddress (event_child->stop_pc),
3539 paddress (event_child->step_range_start),
3540 paddress (event_child->step_range_end));
3541 }
3542
3543 /* We're not reporting this breakpoint to GDB, so apply the
3544 decr_pc_after_break adjustment to the inferior's regcache
3545 ourselves. */
3546
3547 if (the_low_target.set_pc != NULL)
3548 {
3549 struct regcache *regcache
3550 = get_thread_regcache (current_thread, 1);
3551 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3552 }
3553
3554 /* We may have finished stepping over a breakpoint. If so,
3555 we've stopped and suspended all LWPs momentarily except the
3556 stepping one. This is where we resume them all again. We're
3557 going to keep waiting, so use proceed, which handles stepping
3558 over the next breakpoint. */
3559 if (debug_threads)
3560 debug_printf ("proceeding all threads.\n");
3561
3562 if (step_over_finished)
3563 unsuspend_all_lwps (event_child);
3564
3565 proceed_all_lwps ();
3566 return ignore_event (ourstatus);
3567 }
3568
3569 if (debug_threads)
3570 {
3571 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3572 {
3573 char *str;
3574
3575 str = target_waitstatus_to_string (&event_child->waitstatus);
3576 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3577 lwpid_of (get_lwp_thread (event_child)), str);
3578 xfree (str);
3579 }
3580 if (current_thread->last_resume_kind == resume_step)
3581 {
3582 if (event_child->step_range_start == event_child->step_range_end)
3583 debug_printf ("GDB wanted to single-step, reporting event.\n");
3584 else if (!lwp_in_step_range (event_child))
3585 debug_printf ("Out of step range, reporting event.\n");
3586 }
3587 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3588 debug_printf ("Stopped by watchpoint.\n");
3589 else if (gdb_breakpoint_here (event_child->stop_pc))
3590 debug_printf ("Stopped by GDB breakpoint.\n");
3591 if (debug_threads)
3592 debug_printf ("Hit a non-gdbserver trap event.\n");
3593 }
3594
3595 /* Alright, we're going to report a stop. */
3596
3597 if (!stabilizing_threads)
3598 {
3599 /* In all-stop, stop all threads. */
3600 if (!non_stop)
3601 stop_all_lwps (0, NULL);
3602
3603 /* If we're not waiting for a specific LWP, choose an event LWP
3604 from among those that have had events. Giving equal priority
3605 to all LWPs that have had events helps prevent
3606 starvation. */
3607 if (ptid_equal (ptid, minus_one_ptid))
3608 {
3609 event_child->status_pending_p = 1;
3610 event_child->status_pending = w;
3611
3612 select_event_lwp (&event_child);
3613
3614 /* current_thread and event_child must stay in sync. */
3615 current_thread = get_lwp_thread (event_child);
3616
3617 event_child->status_pending_p = 0;
3618 w = event_child->status_pending;
3619 }
3620
3621 if (step_over_finished)
3622 {
3623 if (!non_stop)
3624 {
3625 /* If we were doing a step-over, all other threads but
3626 the stepping one had been paused in start_step_over,
3627 with their suspend counts incremented. We don't want
3628 to do a full unstop/unpause, because we're in
3629 all-stop mode (so we want threads stopped), but we
3630 still need to unsuspend the other threads, to
3631 decrement their `suspended' count back. */
3632 unsuspend_all_lwps (event_child);
3633 }
3634 else
3635 {
3636 /* If we just finished a step-over, then all threads had
3637 been momentarily paused. In all-stop, that's fine,
3638 we want threads stopped by now anyway. In non-stop,
3639 we need to re-resume threads that GDB wanted to be
3640 running. */
3641 unstop_all_lwps (1, event_child);
3642 }
3643 }
3644
3645 /* Stabilize threads (move out of jump pads). */
3646 if (!non_stop)
3647 stabilize_threads ();
3648 }
3649 else
3650 {
3651 /* If we just finished a step-over, then all threads had been
3652 momentarily paused. In all-stop, that's fine, we want
3653 threads stopped by now anyway. In non-stop, we need to
3654 re-resume threads that GDB wanted to be running. */
3655 if (step_over_finished)
3656 unstop_all_lwps (1, event_child);
3657 }
3658
3659 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3660 {
3661 /* If the reported event is an exit, fork, vfork or exec, let
3662 GDB know. */
3663 *ourstatus = event_child->waitstatus;
3664 /* Clear the event lwp's waitstatus since we handled it already. */
3665 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3666 }
3667 else
3668 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3669
3670 /* Now that we've selected our final event LWP, un-adjust its PC if
3671 it was a software breakpoint, and the client doesn't know we can
3672 adjust the breakpoint ourselves. */
3673 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3674 && !swbreak_feature)
3675 {
3676 int decr_pc = the_low_target.decr_pc_after_break;
3677
3678 if (decr_pc != 0)
3679 {
3680 struct regcache *regcache
3681 = get_thread_regcache (current_thread, 1);
3682 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3683 }
3684 }
3685
3686 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3687 {
3688 int sysret;
3689
3690 get_syscall_trapinfo (event_child,
3691 &ourstatus->value.syscall_number, &sysret);
3692 ourstatus->kind = event_child->syscall_state;
3693 }
3694 else if (current_thread->last_resume_kind == resume_stop
3695 && WSTOPSIG (w) == SIGSTOP)
3696 {
3697 /* A thread that has been requested to stop by GDB with vCont;t,
3698 and it stopped cleanly, so report as SIG0. The use of
3699 SIGSTOP is an implementation detail. */
3700 ourstatus->value.sig = GDB_SIGNAL_0;
3701 }
3702 else if (current_thread->last_resume_kind == resume_stop
3703 && WSTOPSIG (w) != SIGSTOP)
3704 {
3705 /* A thread that has been requested to stop by GDB with vCont;t,
3706 but, it stopped for other reasons. */
3707 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3708 }
3709 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3710 {
3711 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3712 }
3713
3714 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3715
3716 if (debug_threads)
3717 {
3718 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3719 target_pid_to_str (ptid_of (current_thread)),
3720 ourstatus->kind, ourstatus->value.sig);
3721 debug_exit ();
3722 }
3723
3724 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3725 return filter_exit_event (event_child, ourstatus);
3726
3727 return ptid_of (current_thread);
3728}
3729
3730/* Get rid of any pending event in the pipe. */
3731static void
3732async_file_flush (void)
3733{
3734 int ret;
3735 char buf;
3736
3737 do
3738 ret = read (linux_event_pipe[0], &buf, 1);
3739 while (ret >= 0 || (ret == -1 && errno == EINTR));
3740}
3741
3742/* Put something in the pipe, so the event loop wakes up. */
3743static void
3744async_file_mark (void)
3745{
3746 int ret;
3747
3748 async_file_flush ();
3749
3750 do
3751 ret = write (linux_event_pipe[1], "+", 1);
3752 while (ret == 0 || (ret == -1 && errno == EINTR));
3753
3754 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3755 be awakened anyway. */
3756}
3757
3758static ptid_t
3759linux_wait (ptid_t ptid,
3760 struct target_waitstatus *ourstatus, int target_options)
3761{
3762 ptid_t event_ptid;
3763
3764 /* Flush the async file first. */
3765 if (target_is_async_p ())
3766 async_file_flush ();
3767
3768 do
3769 {
3770 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3771 }
3772 while ((target_options & TARGET_WNOHANG) == 0
3773 && ptid_equal (event_ptid, null_ptid)
3774 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3775
3776 /* If at least one stop was reported, there may be more. A single
3777 SIGCHLD can signal more than one child stop. */
3778 if (target_is_async_p ()
3779 && (target_options & TARGET_WNOHANG) != 0
3780 && !ptid_equal (event_ptid, null_ptid))
3781 async_file_mark ();
3782
3783 return event_ptid;
3784}
3785
3786/* Send a signal to an LWP. */
3787
3788static int
3789kill_lwp (unsigned long lwpid, int signo)
3790{
3791 int ret;
3792
3793 errno = 0;
3794 ret = syscall (__NR_tkill, lwpid, signo);
3795 if (errno == ENOSYS)
3796 {
3797 /* If tkill fails, then we are not using nptl threads, a
3798 configuration we no longer support. */
3799 perror_with_name (("tkill"));
3800 }
3801 return ret;
3802}
3803
3804void
3805linux_stop_lwp (struct lwp_info *lwp)
3806{
3807 send_sigstop (lwp);
3808}
3809
3810static void
3811send_sigstop (struct lwp_info *lwp)
3812{
3813 int pid;
3814
3815 pid = lwpid_of (get_lwp_thread (lwp));
3816
3817 /* If we already have a pending stop signal for this process, don't
3818 send another. */
3819 if (lwp->stop_expected)
3820 {
3821 if (debug_threads)
3822 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3823
3824 return;
3825 }
3826
3827 if (debug_threads)
3828 debug_printf ("Sending sigstop to lwp %d\n", pid);
3829
3830 lwp->stop_expected = 1;
3831 kill_lwp (pid, SIGSTOP);
3832}
3833
3834static int
3835send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3836{
3837 struct thread_info *thread = (struct thread_info *) entry;
3838 struct lwp_info *lwp = get_thread_lwp (thread);
3839
3840 /* Ignore EXCEPT. */
3841 if (lwp == except)
3842 return 0;
3843
3844 if (lwp->stopped)
3845 return 0;
3846
3847 send_sigstop (lwp);
3848 return 0;
3849}
3850
3851/* Increment the suspend count of an LWP, and stop it, if not stopped
3852 yet. */
3853static int
3854suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3855 void *except)
3856{
3857 struct thread_info *thread = (struct thread_info *) entry;
3858 struct lwp_info *lwp = get_thread_lwp (thread);
3859
3860 /* Ignore EXCEPT. */
3861 if (lwp == except)
3862 return 0;
3863
3864 lwp_suspended_inc (lwp);
3865
3866 return send_sigstop_callback (entry, except);
3867}
3868
3869static void
3870mark_lwp_dead (struct lwp_info *lwp, int wstat)
3871{
3872 /* Store the exit status for later. */
3873 lwp->status_pending_p = 1;
3874 lwp->status_pending = wstat;
3875
3876 /* Store in waitstatus as well, as there's nothing else to process
3877 for this event. */
3878 if (WIFEXITED (wstat))
3879 {
3880 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3881 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3882 }
3883 else if (WIFSIGNALED (wstat))
3884 {
3885 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3886 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3887 }
3888
3889 /* Prevent trying to stop it. */
3890 lwp->stopped = 1;
3891
3892 /* No further stops are expected from a dead lwp. */
3893 lwp->stop_expected = 0;
3894}
3895
3896/* Return true if LWP has exited already, and has a pending exit event
3897 to report to GDB. */
3898
3899static int
3900lwp_is_marked_dead (struct lwp_info *lwp)
3901{
3902 return (lwp->status_pending_p
3903 && (WIFEXITED (lwp->status_pending)
3904 || WIFSIGNALED (lwp->status_pending)));
3905}
3906
3907/* Wait for all children to stop for the SIGSTOPs we just queued. */
3908
3909static void
3910wait_for_sigstop (void)
3911{
3912 struct thread_info *saved_thread;
3913 ptid_t saved_tid;
3914 int wstat;
3915 int ret;
3916
3917 saved_thread = current_thread;
3918 if (saved_thread != NULL)
3919 saved_tid = saved_thread->entry.id;
3920 else
3921 saved_tid = null_ptid; /* avoid bogus unused warning */
3922
3923 if (debug_threads)
3924 debug_printf ("wait_for_sigstop: pulling events\n");
3925
3926 /* Passing NULL_PTID as filter indicates we want all events to be
3927 left pending. Eventually this returns when there are no
3928 unwaited-for children left. */
3929 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3930 &wstat, __WALL);
3931 gdb_assert (ret == -1);
3932
3933 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3934 current_thread = saved_thread;
3935 else
3936 {
3937 if (debug_threads)
3938 debug_printf ("Previously current thread died.\n");
3939
3940 /* We can't change the current inferior behind GDB's back,
3941 otherwise, a subsequent command may apply to the wrong
3942 process. */
3943 current_thread = NULL;
3944 }
3945}
3946
3947/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3948 move it out, because we need to report the stop event to GDB. For
3949 example, if the user puts a breakpoint in the jump pad, it's
3950 because she wants to debug it. */
3951
3952static int
3953stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3954{
3955 struct thread_info *thread = (struct thread_info *) entry;
3956 struct lwp_info *lwp = get_thread_lwp (thread);
3957
3958 if (lwp->suspended != 0)
3959 {
3960 internal_error (__FILE__, __LINE__,
3961 "LWP %ld is suspended, suspended=%d\n",
3962 lwpid_of (thread), lwp->suspended);
3963 }
3964 gdb_assert (lwp->stopped);
3965
3966 /* Allow debugging the jump pad, gdb_collect, etc.. */
3967 return (supports_fast_tracepoints ()
3968 && agent_loaded_p ()
3969 && (gdb_breakpoint_here (lwp->stop_pc)
3970 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3971 || thread->last_resume_kind == resume_step)
3972 && linux_fast_tracepoint_collecting (lwp, NULL));
3973}
3974
3975static void
3976move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3977{
3978 struct thread_info *thread = (struct thread_info *) entry;
3979 struct thread_info *saved_thread;
3980 struct lwp_info *lwp = get_thread_lwp (thread);
3981 int *wstat;
3982
3983 if (lwp->suspended != 0)
3984 {
3985 internal_error (__FILE__, __LINE__,
3986 "LWP %ld is suspended, suspended=%d\n",
3987 lwpid_of (thread), lwp->suspended);
3988 }
3989 gdb_assert (lwp->stopped);
3990
3991 /* For gdb_breakpoint_here. */
3992 saved_thread = current_thread;
3993 current_thread = thread;
3994
3995 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3996
3997 /* Allow debugging the jump pad, gdb_collect, etc. */
3998 if (!gdb_breakpoint_here (lwp->stop_pc)
3999 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4000 && thread->last_resume_kind != resume_step
4001 && maybe_move_out_of_jump_pad (lwp, wstat))
4002 {
4003 if (debug_threads)
4004 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4005 lwpid_of (thread));
4006
4007 if (wstat)
4008 {
4009 lwp->status_pending_p = 0;
4010 enqueue_one_deferred_signal (lwp, wstat);
4011
4012 if (debug_threads)
4013 debug_printf ("Signal %d for LWP %ld deferred "
4014 "(in jump pad)\n",
4015 WSTOPSIG (*wstat), lwpid_of (thread));
4016 }
4017
4018 linux_resume_one_lwp (lwp, 0, 0, NULL);
4019 }
4020 else
4021 lwp_suspended_inc (lwp);
4022
4023 current_thread = saved_thread;
4024}
4025
4026static int
4027lwp_running (struct inferior_list_entry *entry, void *data)
4028{
4029 struct thread_info *thread = (struct thread_info *) entry;
4030 struct lwp_info *lwp = get_thread_lwp (thread);
4031
4032 if (lwp_is_marked_dead (lwp))
4033 return 0;
4034 if (lwp->stopped)
4035 return 0;
4036 return 1;
4037}
4038
4039/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4040 If SUSPEND, then also increase the suspend count of every LWP,
4041 except EXCEPT. */
4042
4043static void
4044stop_all_lwps (int suspend, struct lwp_info *except)
4045{
4046 /* Should not be called recursively. */
4047 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4048
4049 if (debug_threads)
4050 {
4051 debug_enter ();
4052 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4053 suspend ? "stop-and-suspend" : "stop",
4054 except != NULL
4055 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4056 : "none");
4057 }
4058
4059 stopping_threads = (suspend
4060 ? STOPPING_AND_SUSPENDING_THREADS
4061 : STOPPING_THREADS);
4062
4063 if (suspend)
4064 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4065 else
4066 find_inferior (&all_threads, send_sigstop_callback, except);
4067 wait_for_sigstop ();
4068 stopping_threads = NOT_STOPPING_THREADS;
4069
4070 if (debug_threads)
4071 {
4072 debug_printf ("stop_all_lwps done, setting stopping_threads "
4073 "back to !stopping\n");
4074 debug_exit ();
4075 }
4076}
4077
4078/* Enqueue one signal in the chain of signals which need to be
4079 delivered to this process on next resume. */
4080
4081static void
4082enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4083{
4084 struct pending_signals *p_sig = XNEW (struct pending_signals);
4085
4086 p_sig->prev = lwp->pending_signals;
4087 p_sig->signal = signal;
4088 if (info == NULL)
4089 memset (&p_sig->info, 0, sizeof (siginfo_t));
4090 else
4091 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4092 lwp->pending_signals = p_sig;
4093}
4094
4095/* Install breakpoints for software single stepping. */
4096
4097static void
4098install_software_single_step_breakpoints (struct lwp_info *lwp)
4099{
4100 int i;
4101 CORE_ADDR pc;
4102 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4103 VEC (CORE_ADDR) *next_pcs = NULL;
4104 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4105
4106 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4107
4108 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4109 set_reinsert_breakpoint (pc);
4110
4111 do_cleanups (old_chain);
4112}
4113
4114/* Single step via hardware or software single step.
4115 Return 1 if hardware single stepping, 0 if software single stepping
4116 or can't single step. */
4117
4118static int
4119single_step (struct lwp_info* lwp)
4120{
4121 int step = 0;
4122
4123 if (can_hardware_single_step ())
4124 {
4125 step = 1;
4126 }
4127 else if (can_software_single_step ())
4128 {
4129 install_software_single_step_breakpoints (lwp);
4130 step = 0;
4131 }
4132 else
4133 {
4134 if (debug_threads)
4135 debug_printf ("stepping is not implemented on this target");
4136 }
4137
4138 return step;
4139}
4140
4141/* Resume execution of LWP. If STEP is nonzero, single-step it. If
4142 SIGNAL is nonzero, give it that signal. */
4143
4144static void
4145linux_resume_one_lwp_throw (struct lwp_info *lwp,
4146 int step, int signal, siginfo_t *info)
4147{
4148 struct thread_info *thread = get_lwp_thread (lwp);
4149 struct thread_info *saved_thread;
4150 int fast_tp_collecting;
4151 int ptrace_request;
4152 struct process_info *proc = get_thread_process (thread);
4153
4154 /* Note that target description may not be initialised
4155 (proc->tdesc == NULL) at this point because the program hasn't
4156 stopped at the first instruction yet. It means GDBserver skips
4157 the extra traps from the wrapper program (see option --wrapper).
4158 Code in this function that requires register access should be
4159 guarded by proc->tdesc == NULL or something else. */
4160
4161 if (lwp->stopped == 0)
4162 return;
4163
4164 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4165
4166 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4167
4168 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4169
4170 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4171 user used the "jump" command, or "set $pc = foo"). */
4172 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4173 {
4174 /* Collecting 'while-stepping' actions doesn't make sense
4175 anymore. */
4176 release_while_stepping_state_list (thread);
4177 }
4178
4179 /* If we have pending signals or status, and a new signal, enqueue the
4180 signal. Also enqueue the signal if we are waiting to reinsert a
4181 breakpoint; it will be picked up again below. */
4182 if (signal != 0
4183 && (lwp->status_pending_p
4184 || lwp->pending_signals != NULL
4185 || lwp->bp_reinsert != 0
4186 || fast_tp_collecting))
4187 {
4188 struct pending_signals *p_sig = XNEW (struct pending_signals);
4189
4190 p_sig->prev = lwp->pending_signals;
4191 p_sig->signal = signal;
4192 if (info == NULL)
4193 memset (&p_sig->info, 0, sizeof (siginfo_t));
4194 else
4195 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4196 lwp->pending_signals = p_sig;
4197 }
4198
4199 if (lwp->status_pending_p)
4200 {
4201 if (debug_threads)
4202 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
4203 " has pending status\n",
4204 lwpid_of (thread), step ? "step" : "continue", signal,
4205 lwp->stop_expected ? "expected" : "not expected");
4206 return;
4207 }
4208
4209 saved_thread = current_thread;
4210 current_thread = thread;
4211
4212 if (debug_threads)
4213 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4214 lwpid_of (thread), step ? "step" : "continue", signal,
4215 lwp->stop_expected ? "expected" : "not expected");
4216
4217 /* This bit needs some thinking about. If we get a signal that
4218 we must report while a single-step reinsert is still pending,
4219 we often end up resuming the thread. It might be better to
4220 (ew) allow a stack of pending events; then we could be sure that
4221 the reinsert happened right away and not lose any signals.
4222
4223 Making this stack would also shrink the window in which breakpoints are
4224 uninserted (see comment in linux_wait_for_lwp) but not enough for
4225 complete correctness, so it won't solve that problem. It may be
4226 worthwhile just to solve this one, however. */
4227 if (lwp->bp_reinsert != 0)
4228 {
4229 if (debug_threads)
4230 debug_printf (" pending reinsert at 0x%s\n",
4231 paddress (lwp->bp_reinsert));
4232
4233 if (can_hardware_single_step ())
4234 {
4235 if (fast_tp_collecting == 0)
4236 {
4237 if (step == 0)
4238 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4239 if (lwp->suspended)
4240 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4241 lwp->suspended);
4242 }
4243
4244 step = 1;
4245 }
4246
4247 /* Postpone any pending signal. It was enqueued above. */
4248 signal = 0;
4249 }
4250
4251 if (fast_tp_collecting == 1)
4252 {
4253 if (debug_threads)
4254 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4255 " (exit-jump-pad-bkpt)\n",
4256 lwpid_of (thread));
4257
4258 /* Postpone any pending signal. It was enqueued above. */
4259 signal = 0;
4260 }
4261 else if (fast_tp_collecting == 2)
4262 {
4263 if (debug_threads)
4264 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4265 " single-stepping\n",
4266 lwpid_of (thread));
4267
4268 if (can_hardware_single_step ())
4269 step = 1;
4270 else
4271 {
4272 internal_error (__FILE__, __LINE__,
4273 "moving out of jump pad single-stepping"
4274 " not implemented on this target");
4275 }
4276
4277 /* Postpone any pending signal. It was enqueued above. */
4278 signal = 0;
4279 }
4280
4281 /* If we have while-stepping actions in this thread set it stepping.
4282 If we have a signal to deliver, it may or may not be set to
4283 SIG_IGN, we don't know. Assume so, and allow collecting
4284 while-stepping into a signal handler. A possible smart thing to
4285 do would be to set an internal breakpoint at the signal return
4286 address, continue, and carry on catching this while-stepping
4287 action only when that breakpoint is hit. A future
4288 enhancement. */
4289 if (thread->while_stepping != NULL)
4290 {
4291 if (debug_threads)
4292 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4293 lwpid_of (thread));
4294
4295 step = single_step (lwp);
4296 }
4297
4298 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4299 {
4300 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4301
4302 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4303
4304 if (debug_threads)
4305 {
4306 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4307 (long) lwp->stop_pc);
4308 }
4309 }
4310
4311 /* If we have pending signals, consume one unless we are trying to
4312 reinsert a breakpoint or we're trying to finish a fast tracepoint
4313 collect. */
4314 if (lwp->pending_signals != NULL
4315 && lwp->bp_reinsert == 0
4316 && fast_tp_collecting == 0)
4317 {
4318 struct pending_signals **p_sig;
4319
4320 p_sig = &lwp->pending_signals;
4321 while ((*p_sig)->prev != NULL)
4322 p_sig = &(*p_sig)->prev;
4323
4324 signal = (*p_sig)->signal;
4325 if ((*p_sig)->info.si_signo != 0)
4326 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4327 &(*p_sig)->info);
4328
4329 free (*p_sig);
4330 *p_sig = NULL;
4331 }
4332
4333 if (the_low_target.prepare_to_resume != NULL)
4334 the_low_target.prepare_to_resume (lwp);
4335
4336 regcache_invalidate_thread (thread);
4337 errno = 0;
4338 lwp->stepping = step;
4339 if (step)
4340 ptrace_request = PTRACE_SINGLESTEP;
4341 else if (gdb_catching_syscalls_p (lwp))
4342 ptrace_request = PTRACE_SYSCALL;
4343 else
4344 ptrace_request = PTRACE_CONT;
4345 ptrace (ptrace_request,
4346 lwpid_of (thread),
4347 (PTRACE_TYPE_ARG3) 0,
4348 /* Coerce to a uintptr_t first to avoid potential gcc warning
4349 of coercing an 8 byte integer to a 4 byte pointer. */
4350 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4351
4352 current_thread = saved_thread;
4353 if (errno)
4354 perror_with_name ("resuming thread");
4355
4356 /* Successfully resumed. Clear state that no longer makes sense,
4357 and mark the LWP as running. Must not do this before resuming
4358 otherwise if that fails other code will be confused. E.g., we'd
4359 later try to stop the LWP and hang forever waiting for a stop
4360 status. Note that we must not throw after this is cleared,
4361 otherwise handle_zombie_lwp_error would get confused. */
4362 lwp->stopped = 0;
4363 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4364}
4365
4366/* Called when we try to resume a stopped LWP and that errors out. If
4367 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4368 or about to become), discard the error, clear any pending status
4369 the LWP may have, and return true (we'll collect the exit status
4370 soon enough). Otherwise, return false. */
4371
4372static int
4373check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4374{
4375 struct thread_info *thread = get_lwp_thread (lp);
4376
4377 /* If we get an error after resuming the LWP successfully, we'd
4378 confuse !T state for the LWP being gone. */
4379 gdb_assert (lp->stopped);
4380
4381 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4382 because even if ptrace failed with ESRCH, the tracee may be "not
4383 yet fully dead", but already refusing ptrace requests. In that
4384 case the tracee has 'R (Running)' state for a little bit
4385 (observed in Linux 3.18). See also the note on ESRCH in the
4386 ptrace(2) man page. Instead, check whether the LWP has any state
4387 other than ptrace-stopped. */
4388
4389 /* Don't assume anything if /proc/PID/status can't be read. */
4390 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4391 {
4392 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4393 lp->status_pending_p = 0;
4394 return 1;
4395 }
4396 return 0;
4397}
4398
4399/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4400 disappears while we try to resume it. */
4401
4402static void
4403linux_resume_one_lwp (struct lwp_info *lwp,
4404 int step, int signal, siginfo_t *info)
4405{
4406 TRY
4407 {
4408 linux_resume_one_lwp_throw (lwp, step, signal, info);
4409 }
4410 CATCH (ex, RETURN_MASK_ERROR)
4411 {
4412 if (!check_ptrace_stopped_lwp_gone (lwp))
4413 throw_exception (ex);
4414 }
4415 END_CATCH
4416}
4417
4418struct thread_resume_array
4419{
4420 struct thread_resume *resume;
4421 size_t n;
4422};
4423
4424/* This function is called once per thread via find_inferior.
4425 ARG is a pointer to a thread_resume_array struct.
4426 We look up the thread specified by ENTRY in ARG, and mark the thread
4427 with a pointer to the appropriate resume request.
4428
4429 This algorithm is O(threads * resume elements), but resume elements
4430 is small (and will remain small at least until GDB supports thread
4431 suspension). */
4432
4433static int
4434linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4435{
4436 struct thread_info *thread = (struct thread_info *) entry;
4437 struct lwp_info *lwp = get_thread_lwp (thread);
4438 int ndx;
4439 struct thread_resume_array *r;
4440
4441 r = (struct thread_resume_array *) arg;
4442
4443 for (ndx = 0; ndx < r->n; ndx++)
4444 {
4445 ptid_t ptid = r->resume[ndx].thread;
4446 if (ptid_equal (ptid, minus_one_ptid)
4447 || ptid_equal (ptid, entry->id)
4448 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4449 of PID'. */
4450 || (ptid_get_pid (ptid) == pid_of (thread)
4451 && (ptid_is_pid (ptid)
4452 || ptid_get_lwp (ptid) == -1)))
4453 {
4454 if (r->resume[ndx].kind == resume_stop
4455 && thread->last_resume_kind == resume_stop)
4456 {
4457 if (debug_threads)
4458 debug_printf ("already %s LWP %ld at GDB's request\n",
4459 (thread->last_status.kind
4460 == TARGET_WAITKIND_STOPPED)
4461 ? "stopped"
4462 : "stopping",
4463 lwpid_of (thread));
4464
4465 continue;
4466 }
4467
4468 lwp->resume = &r->resume[ndx];
4469 thread->last_resume_kind = lwp->resume->kind;
4470
4471 lwp->step_range_start = lwp->resume->step_range_start;
4472 lwp->step_range_end = lwp->resume->step_range_end;
4473
4474 /* If we had a deferred signal to report, dequeue one now.
4475 This can happen if LWP gets more than one signal while
4476 trying to get out of a jump pad. */
4477 if (lwp->stopped
4478 && !lwp->status_pending_p
4479 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4480 {
4481 lwp->status_pending_p = 1;
4482
4483 if (debug_threads)
4484 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4485 "leaving status pending.\n",
4486 WSTOPSIG (lwp->status_pending),
4487 lwpid_of (thread));
4488 }
4489
4490 return 0;
4491 }
4492 }
4493
4494 /* No resume action for this thread. */
4495 lwp->resume = NULL;
4496
4497 return 0;
4498}
4499
4500/* find_inferior callback for linux_resume.
4501 Set *FLAG_P if this lwp has an interesting status pending. */
4502
4503static int
4504resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4505{
4506 struct thread_info *thread = (struct thread_info *) entry;
4507 struct lwp_info *lwp = get_thread_lwp (thread);
4508
4509 /* LWPs which will not be resumed are not interesting, because
4510 we might not wait for them next time through linux_wait. */
4511 if (lwp->resume == NULL)
4512 return 0;
4513
4514 if (thread_still_has_status_pending_p (thread))
4515 * (int *) flag_p = 1;
4516
4517 return 0;
4518}
4519
4520/* Return 1 if this lwp that GDB wants running is stopped at an
4521 internal breakpoint that we need to step over. It assumes that any
4522 required STOP_PC adjustment has already been propagated to the
4523 inferior's regcache. */
4524
4525static int
4526need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4527{
4528 struct thread_info *thread = (struct thread_info *) entry;
4529 struct lwp_info *lwp = get_thread_lwp (thread);
4530 struct thread_info *saved_thread;
4531 CORE_ADDR pc;
4532 struct process_info *proc = get_thread_process (thread);
4533
4534 /* GDBserver is skipping the extra traps from the wrapper program,
4535 don't have to do step over. */
4536 if (proc->tdesc == NULL)
4537 return 0;
4538
4539 /* LWPs which will not be resumed are not interesting, because we
4540 might not wait for them next time through linux_wait. */
4541
4542 if (!lwp->stopped)
4543 {
4544 if (debug_threads)
4545 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4546 lwpid_of (thread));
4547 return 0;
4548 }
4549
4550 if (thread->last_resume_kind == resume_stop)
4551 {
4552 if (debug_threads)
4553 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4554 " stopped\n",
4555 lwpid_of (thread));
4556 return 0;
4557 }
4558
4559 gdb_assert (lwp->suspended >= 0);
4560
4561 if (lwp->suspended)
4562 {
4563 if (debug_threads)
4564 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4565 lwpid_of (thread));
4566 return 0;
4567 }
4568
4569 if (!lwp->need_step_over)
4570 {
4571 if (debug_threads)
4572 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4573 }
4574
4575 if (lwp->status_pending_p)
4576 {
4577 if (debug_threads)
4578 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4579 " status.\n",
4580 lwpid_of (thread));
4581 return 0;
4582 }
4583
4584 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4585 or we have. */
4586 pc = get_pc (lwp);
4587
4588 /* If the PC has changed since we stopped, then don't do anything,
4589 and let the breakpoint/tracepoint be hit. This happens if, for
4590 instance, GDB handled the decr_pc_after_break subtraction itself,
4591 GDB is OOL stepping this thread, or the user has issued a "jump"
4592 command, or poked thread's registers herself. */
4593 if (pc != lwp->stop_pc)
4594 {
4595 if (debug_threads)
4596 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4597 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4598 lwpid_of (thread),
4599 paddress (lwp->stop_pc), paddress (pc));
4600
4601 lwp->need_step_over = 0;
4602 return 0;
4603 }
4604
4605 saved_thread = current_thread;
4606 current_thread = thread;
4607
4608 /* We can only step over breakpoints we know about. */
4609 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4610 {
4611 /* Don't step over a breakpoint that GDB expects to hit
4612 though. If the condition is being evaluated on the target's side
4613 and it evaluate to false, step over this breakpoint as well. */
4614 if (gdb_breakpoint_here (pc)
4615 && gdb_condition_true_at_breakpoint (pc)
4616 && gdb_no_commands_at_breakpoint (pc))
4617 {
4618 if (debug_threads)
4619 debug_printf ("Need step over [LWP %ld]? yes, but found"
4620 " GDB breakpoint at 0x%s; skipping step over\n",
4621 lwpid_of (thread), paddress (pc));
4622
4623 current_thread = saved_thread;
4624 return 0;
4625 }
4626 else
4627 {
4628 if (debug_threads)
4629 debug_printf ("Need step over [LWP %ld]? yes, "
4630 "found breakpoint at 0x%s\n",
4631 lwpid_of (thread), paddress (pc));
4632
4633 /* We've found an lwp that needs stepping over --- return 1 so
4634 that find_inferior stops looking. */
4635 current_thread = saved_thread;
4636
4637 /* If the step over is cancelled, this is set again. */
4638 lwp->need_step_over = 0;
4639 return 1;
4640 }
4641 }
4642
4643 current_thread = saved_thread;
4644
4645 if (debug_threads)
4646 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4647 " at 0x%s\n",
4648 lwpid_of (thread), paddress (pc));
4649
4650 return 0;
4651}
4652
4653/* Start a step-over operation on LWP. When LWP stopped at a
4654 breakpoint, to make progress, we need to remove the breakpoint out
4655 of the way. If we let other threads run while we do that, they may
4656 pass by the breakpoint location and miss hitting it. To avoid
4657 that, a step-over momentarily stops all threads while LWP is
4658 single-stepped while the breakpoint is temporarily uninserted from
4659 the inferior. When the single-step finishes, we reinsert the
4660 breakpoint, and let all threads that are supposed to be running,
4661 run again.
4662
4663 On targets that don't support hardware single-step, we don't
4664 currently support full software single-stepping. Instead, we only
4665 support stepping over the thread event breakpoint, by asking the
4666 low target where to place a reinsert breakpoint. Since this
4667 routine assumes the breakpoint being stepped over is a thread event
4668 breakpoint, it usually assumes the return address of the current
4669 function is a good enough place to set the reinsert breakpoint. */
4670
4671static int
4672start_step_over (struct lwp_info *lwp)
4673{
4674 struct thread_info *thread = get_lwp_thread (lwp);
4675 struct thread_info *saved_thread;
4676 CORE_ADDR pc;
4677 int step;
4678
4679 if (debug_threads)
4680 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4681 lwpid_of (thread));
4682
4683 stop_all_lwps (1, lwp);
4684
4685 if (lwp->suspended != 0)
4686 {
4687 internal_error (__FILE__, __LINE__,
4688 "LWP %ld suspended=%d\n", lwpid_of (thread),
4689 lwp->suspended);
4690 }
4691
4692 if (debug_threads)
4693 debug_printf ("Done stopping all threads for step-over.\n");
4694
4695 /* Note, we should always reach here with an already adjusted PC,
4696 either by GDB (if we're resuming due to GDB's request), or by our
4697 caller, if we just finished handling an internal breakpoint GDB
4698 shouldn't care about. */
4699 pc = get_pc (lwp);
4700
4701 saved_thread = current_thread;
4702 current_thread = thread;
4703
4704 lwp->bp_reinsert = pc;
4705 uninsert_breakpoints_at (pc);
4706 uninsert_fast_tracepoint_jumps_at (pc);
4707
4708 step = single_step (lwp);
4709
4710 current_thread = saved_thread;
4711
4712 linux_resume_one_lwp (lwp, step, 0, NULL);
4713
4714 /* Require next event from this LWP. */
4715 step_over_bkpt = thread->entry.id;
4716 return 1;
4717}
4718
4719/* Finish a step-over. Reinsert the breakpoint we had uninserted in
4720 start_step_over, if still there, and delete any reinsert
4721 breakpoints we've set, on non hardware single-step targets. */
4722
4723static int
4724finish_step_over (struct lwp_info *lwp)
4725{
4726 if (lwp->bp_reinsert != 0)
4727 {
4728 if (debug_threads)
4729 debug_printf ("Finished step over.\n");
4730
4731 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4732 may be no breakpoint to reinsert there by now. */
4733 reinsert_breakpoints_at (lwp->bp_reinsert);
4734 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4735
4736 lwp->bp_reinsert = 0;
4737
4738 /* Delete any software-single-step reinsert breakpoints. No
4739 longer needed. We don't have to worry about other threads
4740 hitting this trap, and later not being able to explain it,
4741 because we were stepping over a breakpoint, and we hold all
4742 threads but LWP stopped while doing that. */
4743 if (!can_hardware_single_step ())
4744 delete_reinsert_breakpoints ();
4745
4746 step_over_bkpt = null_ptid;
4747 return 1;
4748 }
4749 else
4750 return 0;
4751}
4752
4753/* If there's a step over in progress, wait until all threads stop
4754 (that is, until the stepping thread finishes its step), and
4755 unsuspend all lwps. The stepping thread ends with its status
4756 pending, which is processed later when we get back to processing
4757 events. */
4758
4759static void
4760complete_ongoing_step_over (void)
4761{
4762 if (!ptid_equal (step_over_bkpt, null_ptid))
4763 {
4764 struct lwp_info *lwp;
4765 int wstat;
4766 int ret;
4767
4768 if (debug_threads)
4769 debug_printf ("detach: step over in progress, finish it first\n");
4770
4771 /* Passing NULL_PTID as filter indicates we want all events to
4772 be left pending. Eventually this returns when there are no
4773 unwaited-for children left. */
4774 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4775 &wstat, __WALL);
4776 gdb_assert (ret == -1);
4777
4778 lwp = find_lwp_pid (step_over_bkpt);
4779 if (lwp != NULL)
4780 finish_step_over (lwp);
4781 step_over_bkpt = null_ptid;
4782 unsuspend_all_lwps (lwp);
4783 }
4784}
4785
4786/* This function is called once per thread. We check the thread's resume
4787 request, which will tell us whether to resume, step, or leave the thread
4788 stopped; and what signal, if any, it should be sent.
4789
4790 For threads which we aren't explicitly told otherwise, we preserve
4791 the stepping flag; this is used for stepping over gdbserver-placed
4792 breakpoints.
4793
4794 If pending_flags was set in any thread, we queue any needed
4795 signals, since we won't actually resume. We already have a pending
4796 event to report, so we don't need to preserve any step requests;
4797 they should be re-issued if necessary. */
4798
4799static int
4800linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4801{
4802 struct thread_info *thread = (struct thread_info *) entry;
4803 struct lwp_info *lwp = get_thread_lwp (thread);
4804 int step;
4805 int leave_all_stopped = * (int *) arg;
4806 int leave_pending;
4807
4808 if (lwp->resume == NULL)
4809 return 0;
4810
4811 if (lwp->resume->kind == resume_stop)
4812 {
4813 if (debug_threads)
4814 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4815
4816 if (!lwp->stopped)
4817 {
4818 if (debug_threads)
4819 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4820
4821 /* Stop the thread, and wait for the event asynchronously,
4822 through the event loop. */
4823 send_sigstop (lwp);
4824 }
4825 else
4826 {
4827 if (debug_threads)
4828 debug_printf ("already stopped LWP %ld\n",
4829 lwpid_of (thread));
4830
4831 /* The LWP may have been stopped in an internal event that
4832 was not meant to be notified back to GDB (e.g., gdbserver
4833 breakpoint), so we should be reporting a stop event in
4834 this case too. */
4835
4836 /* If the thread already has a pending SIGSTOP, this is a
4837 no-op. Otherwise, something later will presumably resume
4838 the thread and this will cause it to cancel any pending
4839 operation, due to last_resume_kind == resume_stop. If
4840 the thread already has a pending status to report, we
4841 will still report it the next time we wait - see
4842 status_pending_p_callback. */
4843
4844 /* If we already have a pending signal to report, then
4845 there's no need to queue a SIGSTOP, as this means we're
4846 midway through moving the LWP out of the jumppad, and we
4847 will report the pending signal as soon as that is
4848 finished. */
4849 if (lwp->pending_signals_to_report == NULL)
4850 send_sigstop (lwp);
4851 }
4852
4853 /* For stop requests, we're done. */
4854 lwp->resume = NULL;
4855 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4856 return 0;
4857 }
4858
4859 /* If this thread which is about to be resumed has a pending status,
4860 then don't resume it - we can just report the pending status.
4861 Likewise if it is suspended, because e.g., another thread is
4862 stepping past a breakpoint. Make sure to queue any signals that
4863 would otherwise be sent. In all-stop mode, we do this decision
4864 based on if *any* thread has a pending status. If there's a
4865 thread that needs the step-over-breakpoint dance, then don't
4866 resume any other thread but that particular one. */
4867 leave_pending = (lwp->suspended
4868 || lwp->status_pending_p
4869 || leave_all_stopped);
4870
4871 if (!leave_pending)
4872 {
4873 if (debug_threads)
4874 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4875
4876 step = (lwp->resume->kind == resume_step);
4877 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4878 }
4879 else
4880 {
4881 if (debug_threads)
4882 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4883
4884 /* If we have a new signal, enqueue the signal. */
4885 if (lwp->resume->sig != 0)
4886 {
4887 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4888
4889 p_sig->prev = lwp->pending_signals;
4890 p_sig->signal = lwp->resume->sig;
4891
4892 /* If this is the same signal we were previously stopped by,
4893 make sure to queue its siginfo. We can ignore the return
4894 value of ptrace; if it fails, we'll skip
4895 PTRACE_SETSIGINFO. */
4896 if (WIFSTOPPED (lwp->last_status)
4897 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4898 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4899 &p_sig->info);
4900
4901 lwp->pending_signals = p_sig;
4902 }
4903 }
4904
4905 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4906 lwp->resume = NULL;
4907 return 0;
4908}
4909
4910static void
4911linux_resume (struct thread_resume *resume_info, size_t n)
4912{
4913 struct thread_resume_array array = { resume_info, n };
4914 struct thread_info *need_step_over = NULL;
4915 int any_pending;
4916 int leave_all_stopped;
4917
4918 if (debug_threads)
4919 {
4920 debug_enter ();
4921 debug_printf ("linux_resume:\n");
4922 }
4923
4924 find_inferior (&all_threads, linux_set_resume_request, &array);
4925
4926 /* If there is a thread which would otherwise be resumed, which has
4927 a pending status, then don't resume any threads - we can just
4928 report the pending status. Make sure to queue any signals that
4929 would otherwise be sent. In non-stop mode, we'll apply this
4930 logic to each thread individually. We consume all pending events
4931 before considering to start a step-over (in all-stop). */
4932 any_pending = 0;
4933 if (!non_stop)
4934 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4935
4936 /* If there is a thread which would otherwise be resumed, which is
4937 stopped at a breakpoint that needs stepping over, then don't
4938 resume any threads - have it step over the breakpoint with all
4939 other threads stopped, then resume all threads again. Make sure
4940 to queue any signals that would otherwise be delivered or
4941 queued. */
4942 if (!any_pending && supports_breakpoints ())
4943 need_step_over
4944 = (struct thread_info *) find_inferior (&all_threads,
4945 need_step_over_p, NULL);
4946
4947 leave_all_stopped = (need_step_over != NULL || any_pending);
4948
4949 if (debug_threads)
4950 {
4951 if (need_step_over != NULL)
4952 debug_printf ("Not resuming all, need step over\n");
4953 else if (any_pending)
4954 debug_printf ("Not resuming, all-stop and found "
4955 "an LWP with pending status\n");
4956 else
4957 debug_printf ("Resuming, no pending status or step over needed\n");
4958 }
4959
4960 /* Even if we're leaving threads stopped, queue all signals we'd
4961 otherwise deliver. */
4962 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4963
4964 if (need_step_over)
4965 start_step_over (get_thread_lwp (need_step_over));
4966
4967 if (debug_threads)
4968 {
4969 debug_printf ("linux_resume done\n");
4970 debug_exit ();
4971 }
4972
4973 /* We may have events that were pending that can/should be sent to
4974 the client now. Trigger a linux_wait call. */
4975 if (target_is_async_p ())
4976 async_file_mark ();
4977}
4978
4979/* This function is called once per thread. We check the thread's
4980 last resume request, which will tell us whether to resume, step, or
4981 leave the thread stopped. Any signal the client requested to be
4982 delivered has already been enqueued at this point.
4983
4984 If any thread that GDB wants running is stopped at an internal
4985 breakpoint that needs stepping over, we start a step-over operation
4986 on that particular thread, and leave all others stopped. */
4987
4988static int
4989proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4990{
4991 struct thread_info *thread = (struct thread_info *) entry;
4992 struct lwp_info *lwp = get_thread_lwp (thread);
4993 int step;
4994
4995 if (lwp == except)
4996 return 0;
4997
4998 if (debug_threads)
4999 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5000
5001 if (!lwp->stopped)
5002 {
5003 if (debug_threads)
5004 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5005 return 0;
5006 }
5007
5008 if (thread->last_resume_kind == resume_stop
5009 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5010 {
5011 if (debug_threads)
5012 debug_printf (" client wants LWP to remain %ld stopped\n",
5013 lwpid_of (thread));
5014 return 0;
5015 }
5016
5017 if (lwp->status_pending_p)
5018 {
5019 if (debug_threads)
5020 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5021 lwpid_of (thread));
5022 return 0;
5023 }
5024
5025 gdb_assert (lwp->suspended >= 0);
5026
5027 if (lwp->suspended)
5028 {
5029 if (debug_threads)
5030 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5031 return 0;
5032 }
5033
5034 if (thread->last_resume_kind == resume_stop
5035 && lwp->pending_signals_to_report == NULL
5036 && lwp->collecting_fast_tracepoint == 0)
5037 {
5038 /* We haven't reported this LWP as stopped yet (otherwise, the
5039 last_status.kind check above would catch it, and we wouldn't
5040 reach here. This LWP may have been momentarily paused by a
5041 stop_all_lwps call while handling for example, another LWP's
5042 step-over. In that case, the pending expected SIGSTOP signal
5043 that was queued at vCont;t handling time will have already
5044 been consumed by wait_for_sigstop, and so we need to requeue
5045 another one here. Note that if the LWP already has a SIGSTOP
5046 pending, this is a no-op. */
5047
5048 if (debug_threads)
5049 debug_printf ("Client wants LWP %ld to stop. "
5050 "Making sure it has a SIGSTOP pending\n",
5051 lwpid_of (thread));
5052
5053 send_sigstop (lwp);
5054 }
5055
5056 if (thread->last_resume_kind == resume_step)
5057 {
5058 if (debug_threads)
5059 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5060 lwpid_of (thread));
5061 step = 1;
5062 }
5063 else if (lwp->bp_reinsert != 0)
5064 {
5065 if (debug_threads)
5066 debug_printf (" stepping LWP %ld, reinsert set\n",
5067 lwpid_of (thread));
5068 step = 1;
5069 }
5070 else
5071 step = 0;
5072
5073 linux_resume_one_lwp (lwp, step, 0, NULL);
5074 return 0;
5075}
5076
5077static int
5078unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5079{
5080 struct thread_info *thread = (struct thread_info *) entry;
5081 struct lwp_info *lwp = get_thread_lwp (thread);
5082
5083 if (lwp == except)
5084 return 0;
5085
5086 lwp_suspended_decr (lwp);
5087
5088 return proceed_one_lwp (entry, except);
5089}
5090
5091/* When we finish a step-over, set threads running again. If there's
5092 another thread that may need a step-over, now's the time to start
5093 it. Eventually, we'll move all threads past their breakpoints. */
5094
5095static void
5096proceed_all_lwps (void)
5097{
5098 struct thread_info *need_step_over;
5099
5100 /* If there is a thread which would otherwise be resumed, which is
5101 stopped at a breakpoint that needs stepping over, then don't
5102 resume any threads - have it step over the breakpoint with all
5103 other threads stopped, then resume all threads again. */
5104
5105 if (supports_breakpoints ())
5106 {
5107 need_step_over
5108 = (struct thread_info *) find_inferior (&all_threads,
5109 need_step_over_p, NULL);
5110
5111 if (need_step_over != NULL)
5112 {
5113 if (debug_threads)
5114 debug_printf ("proceed_all_lwps: found "
5115 "thread %ld needing a step-over\n",
5116 lwpid_of (need_step_over));
5117
5118 start_step_over (get_thread_lwp (need_step_over));
5119 return;
5120 }
5121 }
5122
5123 if (debug_threads)
5124 debug_printf ("Proceeding, no step-over needed\n");
5125
5126 find_inferior (&all_threads, proceed_one_lwp, NULL);
5127}
5128
5129/* Stopped LWPs that the client wanted to be running, that don't have
5130 pending statuses, are set to run again, except for EXCEPT, if not
5131 NULL. This undoes a stop_all_lwps call. */
5132
5133static void
5134unstop_all_lwps (int unsuspend, struct lwp_info *except)
5135{
5136 if (debug_threads)
5137 {
5138 debug_enter ();
5139 if (except)
5140 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5141 lwpid_of (get_lwp_thread (except)));
5142 else
5143 debug_printf ("unstopping all lwps\n");
5144 }
5145
5146 if (unsuspend)
5147 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5148 else
5149 find_inferior (&all_threads, proceed_one_lwp, except);
5150
5151 if (debug_threads)
5152 {
5153 debug_printf ("unstop_all_lwps done\n");
5154 debug_exit ();
5155 }
5156}
5157
5158
5159#ifdef HAVE_LINUX_REGSETS
5160
5161#define use_linux_regsets 1
5162
5163/* Returns true if REGSET has been disabled. */
5164
5165static int
5166regset_disabled (struct regsets_info *info, struct regset_info *regset)
5167{
5168 return (info->disabled_regsets != NULL
5169 && info->disabled_regsets[regset - info->regsets]);
5170}
5171
5172/* Disable REGSET. */
5173
5174static void
5175disable_regset (struct regsets_info *info, struct regset_info *regset)
5176{
5177 int dr_offset;
5178
5179 dr_offset = regset - info->regsets;
5180 if (info->disabled_regsets == NULL)
5181 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5182 info->disabled_regsets[dr_offset] = 1;
5183}
5184
5185static int
5186regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5187 struct regcache *regcache)
5188{
5189 struct regset_info *regset;
5190 int saw_general_regs = 0;
5191 int pid;
5192 struct iovec iov;
5193
5194 pid = lwpid_of (current_thread);
5195 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5196 {
5197 void *buf, *data;
5198 int nt_type, res;
5199
5200 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5201 continue;
5202
5203 buf = xmalloc (regset->size);
5204
5205 nt_type = regset->nt_type;
5206 if (nt_type)
5207 {
5208 iov.iov_base = buf;
5209 iov.iov_len = regset->size;
5210 data = (void *) &iov;
5211 }
5212 else
5213 data = buf;
5214
5215#ifndef __sparc__
5216 res = ptrace (regset->get_request, pid,
5217 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5218#else
5219 res = ptrace (regset->get_request, pid, data, nt_type);
5220#endif
5221 if (res < 0)
5222 {
5223 if (errno == EIO)
5224 {
5225 /* If we get EIO on a regset, do not try it again for
5226 this process mode. */
5227 disable_regset (regsets_info, regset);
5228 }
5229 else if (errno == ENODATA)
5230 {
5231 /* ENODATA may be returned if the regset is currently
5232 not "active". This can happen in normal operation,
5233 so suppress the warning in this case. */
5234 }
5235 else
5236 {
5237 char s[256];
5238 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5239 pid);
5240 perror (s);
5241 }
5242 }
5243 else
5244 {
5245 if (regset->type == GENERAL_REGS)
5246 saw_general_regs = 1;
5247 regset->store_function (regcache, buf);
5248 }
5249 free (buf);
5250 }
5251 if (saw_general_regs)
5252 return 0;
5253 else
5254 return 1;
5255}
5256
5257static int
5258regsets_store_inferior_registers (struct regsets_info *regsets_info,
5259 struct regcache *regcache)
5260{
5261 struct regset_info *regset;
5262 int saw_general_regs = 0;
5263 int pid;
5264 struct iovec iov;
5265
5266 pid = lwpid_of (current_thread);
5267 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5268 {
5269 void *buf, *data;
5270 int nt_type, res;
5271
5272 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5273 || regset->fill_function == NULL)
5274 continue;
5275
5276 buf = xmalloc (regset->size);
5277
5278 /* First fill the buffer with the current register set contents,
5279 in case there are any items in the kernel's regset that are
5280 not in gdbserver's regcache. */
5281
5282 nt_type = regset->nt_type;
5283 if (nt_type)
5284 {
5285 iov.iov_base = buf;
5286 iov.iov_len = regset->size;
5287 data = (void *) &iov;
5288 }
5289 else
5290 data = buf;
5291
5292#ifndef __sparc__
5293 res = ptrace (regset->get_request, pid,
5294 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5295#else
5296 res = ptrace (regset->get_request, pid, data, nt_type);
5297#endif
5298
5299 if (res == 0)
5300 {
5301 /* Then overlay our cached registers on that. */
5302 regset->fill_function (regcache, buf);
5303
5304 /* Only now do we write the register set. */
5305#ifndef __sparc__
5306 res = ptrace (regset->set_request, pid,
5307 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5308#else
5309 res = ptrace (regset->set_request, pid, data, nt_type);
5310#endif
5311 }
5312
5313 if (res < 0)
5314 {
5315 if (errno == EIO)
5316 {
5317 /* If we get EIO on a regset, do not try it again for
5318 this process mode. */
5319 disable_regset (regsets_info, regset);
5320 }
5321 else if (errno == ESRCH)
5322 {
5323 /* At this point, ESRCH should mean the process is
5324 already gone, in which case we simply ignore attempts
5325 to change its registers. See also the related
5326 comment in linux_resume_one_lwp. */
5327 free (buf);
5328 return 0;
5329 }
5330 else
5331 {
5332 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5333 }
5334 }
5335 else if (regset->type == GENERAL_REGS)
5336 saw_general_regs = 1;
5337 free (buf);
5338 }
5339 if (saw_general_regs)
5340 return 0;
5341 else
5342 return 1;
5343}
5344
5345#else /* !HAVE_LINUX_REGSETS */
5346
5347#define use_linux_regsets 0
5348#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5349#define regsets_store_inferior_registers(regsets_info, regcache) 1
5350
5351#endif
5352
5353/* Return 1 if register REGNO is supported by one of the regset ptrace
5354 calls or 0 if it has to be transferred individually. */
5355
5356static int
5357linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5358{
5359 unsigned char mask = 1 << (regno % 8);
5360 size_t index = regno / 8;
5361
5362 return (use_linux_regsets
5363 && (regs_info->regset_bitmap == NULL
5364 || (regs_info->regset_bitmap[index] & mask) != 0));
5365}
5366
5367#ifdef HAVE_LINUX_USRREGS
5368
5369static int
5370register_addr (const struct usrregs_info *usrregs, int regnum)
5371{
5372 int addr;
5373
5374 if (regnum < 0 || regnum >= usrregs->num_regs)
5375 error ("Invalid register number %d.", regnum);
5376
5377 addr = usrregs->regmap[regnum];
5378
5379 return addr;
5380}
5381
5382/* Fetch one register. */
5383static void
5384fetch_register (const struct usrregs_info *usrregs,
5385 struct regcache *regcache, int regno)
5386{
5387 CORE_ADDR regaddr;
5388 int i, size;
5389 char *buf;
5390 int pid;
5391
5392 if (regno >= usrregs->num_regs)
5393 return;
5394 if ((*the_low_target.cannot_fetch_register) (regno))
5395 return;
5396
5397 regaddr = register_addr (usrregs, regno);
5398 if (regaddr == -1)
5399 return;
5400
5401 size = ((register_size (regcache->tdesc, regno)
5402 + sizeof (PTRACE_XFER_TYPE) - 1)
5403 & -sizeof (PTRACE_XFER_TYPE));
5404 buf = (char *) alloca (size);
5405
5406 pid = lwpid_of (current_thread);
5407 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5408 {
5409 errno = 0;
5410 *(PTRACE_XFER_TYPE *) (buf + i) =
5411 ptrace (PTRACE_PEEKUSER, pid,
5412 /* Coerce to a uintptr_t first to avoid potential gcc warning
5413 of coercing an 8 byte integer to a 4 byte pointer. */
5414 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5415 regaddr += sizeof (PTRACE_XFER_TYPE);
5416 if (errno != 0)
5417 error ("reading register %d: %s", regno, strerror (errno));
5418 }
5419
5420 if (the_low_target.supply_ptrace_register)
5421 the_low_target.supply_ptrace_register (regcache, regno, buf);
5422 else
5423 supply_register (regcache, regno, buf);
5424}
5425
5426/* Store one register. */
5427static void
5428store_register (const struct usrregs_info *usrregs,
5429 struct regcache *regcache, int regno)
5430{
5431 CORE_ADDR regaddr;
5432 int i, size;
5433 char *buf;
5434 int pid;
5435
5436 if (regno >= usrregs->num_regs)
5437 return;
5438 if ((*the_low_target.cannot_store_register) (regno))
5439 return;
5440
5441 regaddr = register_addr (usrregs, regno);
5442 if (regaddr == -1)
5443 return;
5444
5445 size = ((register_size (regcache->tdesc, regno)
5446 + sizeof (PTRACE_XFER_TYPE) - 1)
5447 & -sizeof (PTRACE_XFER_TYPE));
5448 buf = (char *) alloca (size);
5449 memset (buf, 0, size);
5450
5451 if (the_low_target.collect_ptrace_register)
5452 the_low_target.collect_ptrace_register (regcache, regno, buf);
5453 else
5454 collect_register (regcache, regno, buf);
5455
5456 pid = lwpid_of (current_thread);
5457 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5458 {
5459 errno = 0;
5460 ptrace (PTRACE_POKEUSER, pid,
5461 /* Coerce to a uintptr_t first to avoid potential gcc warning
5462 about coercing an 8 byte integer to a 4 byte pointer. */
5463 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5464 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5465 if (errno != 0)
5466 {
5467 /* At this point, ESRCH should mean the process is
5468 already gone, in which case we simply ignore attempts
5469 to change its registers. See also the related
5470 comment in linux_resume_one_lwp. */
5471 if (errno == ESRCH)
5472 return;
5473
5474 if ((*the_low_target.cannot_store_register) (regno) == 0)
5475 error ("writing register %d: %s", regno, strerror (errno));
5476 }
5477 regaddr += sizeof (PTRACE_XFER_TYPE);
5478 }
5479}
5480
5481/* Fetch all registers, or just one, from the child process.
5482 If REGNO is -1, do this for all registers, skipping any that are
5483 assumed to have been retrieved by regsets_fetch_inferior_registers,
5484 unless ALL is non-zero.
5485 Otherwise, REGNO specifies which register (so we can save time). */
5486static void
5487usr_fetch_inferior_registers (const struct regs_info *regs_info,
5488 struct regcache *regcache, int regno, int all)
5489{
5490 struct usrregs_info *usr = regs_info->usrregs;
5491
5492 if (regno == -1)
5493 {
5494 for (regno = 0; regno < usr->num_regs; regno++)
5495 if (all || !linux_register_in_regsets (regs_info, regno))
5496 fetch_register (usr, regcache, regno);
5497 }
5498 else
5499 fetch_register (usr, regcache, regno);
5500}
5501
5502/* Store our register values back into the inferior.
5503 If REGNO is -1, do this for all registers, skipping any that are
5504 assumed to have been saved by regsets_store_inferior_registers,
5505 unless ALL is non-zero.
5506 Otherwise, REGNO specifies which register (so we can save time). */
5507static void
5508usr_store_inferior_registers (const struct regs_info *regs_info,
5509 struct regcache *regcache, int regno, int all)
5510{
5511 struct usrregs_info *usr = regs_info->usrregs;
5512
5513 if (regno == -1)
5514 {
5515 for (regno = 0; regno < usr->num_regs; regno++)
5516 if (all || !linux_register_in_regsets (regs_info, regno))
5517 store_register (usr, regcache, regno);
5518 }
5519 else
5520 store_register (usr, regcache, regno);
5521}
5522
5523#else /* !HAVE_LINUX_USRREGS */
5524
5525#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5526#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5527
5528#endif
5529
5530
5531static void
5532linux_fetch_registers (struct regcache *regcache, int regno)
5533{
5534 int use_regsets;
5535 int all = 0;
5536 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5537
5538 if (regno == -1)
5539 {
5540 if (the_low_target.fetch_register != NULL
5541 && regs_info->usrregs != NULL)
5542 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5543 (*the_low_target.fetch_register) (regcache, regno);
5544
5545 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5546 if (regs_info->usrregs != NULL)
5547 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5548 }
5549 else
5550 {
5551 if (the_low_target.fetch_register != NULL
5552 && (*the_low_target.fetch_register) (regcache, regno))
5553 return;
5554
5555 use_regsets = linux_register_in_regsets (regs_info, regno);
5556 if (use_regsets)
5557 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5558 regcache);
5559 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5560 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5561 }
5562}
5563
5564static void
5565linux_store_registers (struct regcache *regcache, int regno)
5566{
5567 int use_regsets;
5568 int all = 0;
5569 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5570
5571 if (regno == -1)
5572 {
5573 all = regsets_store_inferior_registers (regs_info->regsets_info,
5574 regcache);
5575 if (regs_info->usrregs != NULL)
5576 usr_store_inferior_registers (regs_info, regcache, regno, all);
5577 }
5578 else
5579 {
5580 use_regsets = linux_register_in_regsets (regs_info, regno);
5581 if (use_regsets)
5582 all = regsets_store_inferior_registers (regs_info->regsets_info,
5583 regcache);
5584 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5585 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5586 }
5587}
5588
5589
5590/* Copy LEN bytes from inferior's memory starting at MEMADDR
5591 to debugger memory starting at MYADDR. */
5592
5593static int
5594linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5595{
5596 int pid = lwpid_of (current_thread);
5597 register PTRACE_XFER_TYPE *buffer;
5598 register CORE_ADDR addr;
5599 register int count;
5600 char filename[64];
5601 register int i;
5602 int ret;
5603 int fd;
5604
5605 /* Try using /proc. Don't bother for one word. */
5606 if (len >= 3 * sizeof (long))
5607 {
5608 int bytes;
5609
5610 /* We could keep this file open and cache it - possibly one per
5611 thread. That requires some juggling, but is even faster. */
5612 sprintf (filename, "/proc/%d/mem", pid);
5613 fd = open (filename, O_RDONLY | O_LARGEFILE);
5614 if (fd == -1)
5615 goto no_proc;
5616
5617 /* If pread64 is available, use it. It's faster if the kernel
5618 supports it (only one syscall), and it's 64-bit safe even on
5619 32-bit platforms (for instance, SPARC debugging a SPARC64
5620 application). */
5621#ifdef HAVE_PREAD64
5622 bytes = pread64 (fd, myaddr, len, memaddr);
5623#else
5624 bytes = -1;
5625 if (lseek (fd, memaddr, SEEK_SET) != -1)
5626 bytes = read (fd, myaddr, len);
5627#endif
5628
5629 close (fd);
5630 if (bytes == len)
5631 return 0;
5632
5633 /* Some data was read, we'll try to get the rest with ptrace. */
5634 if (bytes > 0)
5635 {
5636 memaddr += bytes;
5637 myaddr += bytes;
5638 len -= bytes;
5639 }
5640 }
5641
5642 no_proc:
5643 /* Round starting address down to longword boundary. */
5644 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5645 /* Round ending address up; get number of longwords that makes. */
5646 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5647 / sizeof (PTRACE_XFER_TYPE));
5648 /* Allocate buffer of that many longwords. */
5649 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5650
5651 /* Read all the longwords */
5652 errno = 0;
5653 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5654 {
5655 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5656 about coercing an 8 byte integer to a 4 byte pointer. */
5657 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5658 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5659 (PTRACE_TYPE_ARG4) 0);
5660 if (errno)
5661 break;
5662 }
5663 ret = errno;
5664
5665 /* Copy appropriate bytes out of the buffer. */
5666 if (i > 0)
5667 {
5668 i *= sizeof (PTRACE_XFER_TYPE);
5669 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5670 memcpy (myaddr,
5671 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5672 i < len ? i : len);
5673 }
5674
5675 return ret;
5676}
5677
5678/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5679 memory at MEMADDR. On failure (cannot write to the inferior)
5680 returns the value of errno. Always succeeds if LEN is zero. */
5681
5682static int
5683linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5684{
5685 register int i;
5686 /* Round starting address down to longword boundary. */
5687 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5688 /* Round ending address up; get number of longwords that makes. */
5689 register int count
5690 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5691 / sizeof (PTRACE_XFER_TYPE);
5692
5693 /* Allocate buffer of that many longwords. */
5694 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5695
5696 int pid = lwpid_of (current_thread);
5697
5698 if (len == 0)
5699 {
5700 /* Zero length write always succeeds. */
5701 return 0;
5702 }
5703
5704 if (debug_threads)
5705 {
5706 /* Dump up to four bytes. */
5707 char str[4 * 2 + 1];
5708 char *p = str;
5709 int dump = len < 4 ? len : 4;
5710
5711 for (i = 0; i < dump; i++)
5712 {
5713 sprintf (p, "%02x", myaddr[i]);
5714 p += 2;
5715 }
5716 *p = '\0';
5717
5718 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5719 str, (long) memaddr, pid);
5720 }
5721
5722 /* Fill start and end extra bytes of buffer with existing memory data. */
5723
5724 errno = 0;
5725 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5726 about coercing an 8 byte integer to a 4 byte pointer. */
5727 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5728 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5729 (PTRACE_TYPE_ARG4) 0);
5730 if (errno)
5731 return errno;
5732
5733 if (count > 1)
5734 {
5735 errno = 0;
5736 buffer[count - 1]
5737 = ptrace (PTRACE_PEEKTEXT, pid,
5738 /* Coerce to a uintptr_t first to avoid potential gcc warning
5739 about coercing an 8 byte integer to a 4 byte pointer. */
5740 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5741 * sizeof (PTRACE_XFER_TYPE)),
5742 (PTRACE_TYPE_ARG4) 0);
5743 if (errno)
5744 return errno;
5745 }
5746
5747 /* Copy data to be written over corresponding part of buffer. */
5748
5749 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5750 myaddr, len);
5751
5752 /* Write the entire buffer. */
5753
5754 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5755 {
5756 errno = 0;
5757 ptrace (PTRACE_POKETEXT, pid,
5758 /* Coerce to a uintptr_t first to avoid potential gcc warning
5759 about coercing an 8 byte integer to a 4 byte pointer. */
5760 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5761 (PTRACE_TYPE_ARG4) buffer[i]);
5762 if (errno)
5763 return errno;
5764 }
5765
5766 return 0;
5767}
5768
5769static void
5770linux_look_up_symbols (void)
5771{
5772#ifdef USE_THREAD_DB
5773 struct process_info *proc = current_process ();
5774
5775 if (proc->priv->thread_db != NULL)
5776 return;
5777
5778 thread_db_init ();
5779#endif
5780}
5781
5782static void
5783linux_request_interrupt (void)
5784{
5785 extern unsigned long signal_pid;
5786
5787 /* Send a SIGINT to the process group. This acts just like the user
5788 typed a ^C on the controlling terminal. */
5789 kill (-signal_pid, SIGINT);
5790}
5791
5792/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5793 to debugger memory starting at MYADDR. */
5794
5795static int
5796linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5797{
5798 char filename[PATH_MAX];
5799 int fd, n;
5800 int pid = lwpid_of (current_thread);
5801
5802 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5803
5804 fd = open (filename, O_RDONLY);
5805 if (fd < 0)
5806 return -1;
5807
5808 if (offset != (CORE_ADDR) 0
5809 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5810 n = -1;
5811 else
5812 n = read (fd, myaddr, len);
5813
5814 close (fd);
5815
5816 return n;
5817}
5818
5819/* These breakpoint and watchpoint related wrapper functions simply
5820 pass on the function call if the target has registered a
5821 corresponding function. */
5822
5823static int
5824linux_supports_z_point_type (char z_type)
5825{
5826 return (the_low_target.supports_z_point_type != NULL
5827 && the_low_target.supports_z_point_type (z_type));
5828}
5829
5830static int
5831linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5832 int size, struct raw_breakpoint *bp)
5833{
5834 if (type == raw_bkpt_type_sw)
5835 return insert_memory_breakpoint (bp);
5836 else if (the_low_target.insert_point != NULL)
5837 return the_low_target.insert_point (type, addr, size, bp);
5838 else
5839 /* Unsupported (see target.h). */
5840 return 1;
5841}
5842
5843static int
5844linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5845 int size, struct raw_breakpoint *bp)
5846{
5847 if (type == raw_bkpt_type_sw)
5848 return remove_memory_breakpoint (bp);
5849 else if (the_low_target.remove_point != NULL)
5850 return the_low_target.remove_point (type, addr, size, bp);
5851 else
5852 /* Unsupported (see target.h). */
5853 return 1;
5854}
5855
5856/* Implement the to_stopped_by_sw_breakpoint target_ops
5857 method. */
5858
5859static int
5860linux_stopped_by_sw_breakpoint (void)
5861{
5862 struct lwp_info *lwp = get_thread_lwp (current_thread);
5863
5864 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5865}
5866
5867/* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5868 method. */
5869
5870static int
5871linux_supports_stopped_by_sw_breakpoint (void)
5872{
5873 return USE_SIGTRAP_SIGINFO;
5874}
5875
5876/* Implement the to_stopped_by_hw_breakpoint target_ops
5877 method. */
5878
5879static int
5880linux_stopped_by_hw_breakpoint (void)
5881{
5882 struct lwp_info *lwp = get_thread_lwp (current_thread);
5883
5884 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5885}
5886
5887/* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5888 method. */
5889
5890static int
5891linux_supports_stopped_by_hw_breakpoint (void)
5892{
5893 return USE_SIGTRAP_SIGINFO;
5894}
5895
5896/* Implement the supports_hardware_single_step target_ops method. */
5897
5898static int
5899linux_supports_hardware_single_step (void)
5900{
5901 return can_hardware_single_step ();
5902}
5903
5904static int
5905linux_supports_software_single_step (void)
5906{
5907 return can_software_single_step ();
5908}
5909
5910static int
5911linux_stopped_by_watchpoint (void)
5912{
5913 struct lwp_info *lwp = get_thread_lwp (current_thread);
5914
5915 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5916}
5917
5918static CORE_ADDR
5919linux_stopped_data_address (void)
5920{
5921 struct lwp_info *lwp = get_thread_lwp (current_thread);
5922
5923 return lwp->stopped_data_address;
5924}
5925
5926#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5927 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5928 && defined(PT_TEXT_END_ADDR)
5929
5930/* This is only used for targets that define PT_TEXT_ADDR,
5931 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5932 the target has different ways of acquiring this information, like
5933 loadmaps. */
5934
5935/* Under uClinux, programs are loaded at non-zero offsets, which we need
5936 to tell gdb about. */
5937
5938static int
5939linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5940{
5941 unsigned long text, text_end, data;
5942 int pid = lwpid_of (current_thread);
5943
5944 errno = 0;
5945
5946 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5947 (PTRACE_TYPE_ARG4) 0);
5948 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5949 (PTRACE_TYPE_ARG4) 0);
5950 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5951 (PTRACE_TYPE_ARG4) 0);
5952
5953 if (errno == 0)
5954 {
5955 /* Both text and data offsets produced at compile-time (and so
5956 used by gdb) are relative to the beginning of the program,
5957 with the data segment immediately following the text segment.
5958 However, the actual runtime layout in memory may put the data
5959 somewhere else, so when we send gdb a data base-address, we
5960 use the real data base address and subtract the compile-time
5961 data base-address from it (which is just the length of the
5962 text segment). BSS immediately follows data in both
5963 cases. */
5964 *text_p = text;
5965 *data_p = data - (text_end - text);
5966
5967 return 1;
5968 }
5969 return 0;
5970}
5971#endif
5972
5973static int
5974linux_qxfer_osdata (const char *annex,
5975 unsigned char *readbuf, unsigned const char *writebuf,
5976 CORE_ADDR offset, int len)
5977{
5978 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5979}
5980
5981/* Convert a native/host siginfo object, into/from the siginfo in the
5982 layout of the inferiors' architecture. */
5983
5984static void
5985siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5986{
5987 int done = 0;
5988
5989 if (the_low_target.siginfo_fixup != NULL)
5990 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5991
5992 /* If there was no callback, or the callback didn't do anything,
5993 then just do a straight memcpy. */
5994 if (!done)
5995 {
5996 if (direction == 1)
5997 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5998 else
5999 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6000 }
6001}
6002
6003static int
6004linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6005 unsigned const char *writebuf, CORE_ADDR offset, int len)
6006{
6007 int pid;
6008 siginfo_t siginfo;
6009 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6010
6011 if (current_thread == NULL)
6012 return -1;
6013
6014 pid = lwpid_of (current_thread);
6015
6016 if (debug_threads)
6017 debug_printf ("%s siginfo for lwp %d.\n",
6018 readbuf != NULL ? "Reading" : "Writing",
6019 pid);
6020
6021 if (offset >= sizeof (siginfo))
6022 return -1;
6023
6024 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6025 return -1;
6026
6027 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6028 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6029 inferior with a 64-bit GDBSERVER should look the same as debugging it
6030 with a 32-bit GDBSERVER, we need to convert it. */
6031 siginfo_fixup (&siginfo, inf_siginfo, 0);
6032
6033 if (offset + len > sizeof (siginfo))
6034 len = sizeof (siginfo) - offset;
6035
6036 if (readbuf != NULL)
6037 memcpy (readbuf, inf_siginfo + offset, len);
6038 else
6039 {
6040 memcpy (inf_siginfo + offset, writebuf, len);
6041
6042 /* Convert back to ptrace layout before flushing it out. */
6043 siginfo_fixup (&siginfo, inf_siginfo, 1);
6044
6045 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6046 return -1;
6047 }
6048
6049 return len;
6050}
6051
6052/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6053 so we notice when children change state; as the handler for the
6054 sigsuspend in my_waitpid. */
6055
6056static void
6057sigchld_handler (int signo)
6058{
6059 int old_errno = errno;
6060
6061 if (debug_threads)
6062 {
6063 do
6064 {
6065 /* fprintf is not async-signal-safe, so call write
6066 directly. */
6067 if (write (2, "sigchld_handler\n",
6068 sizeof ("sigchld_handler\n") - 1) < 0)
6069 break; /* just ignore */
6070 } while (0);
6071 }
6072
6073 if (target_is_async_p ())
6074 async_file_mark (); /* trigger a linux_wait */
6075
6076 errno = old_errno;
6077}
6078
6079static int
6080linux_supports_non_stop (void)
6081{
6082 return 1;
6083}
6084
6085static int
6086linux_async (int enable)
6087{
6088 int previous = target_is_async_p ();
6089
6090 if (debug_threads)
6091 debug_printf ("linux_async (%d), previous=%d\n",
6092 enable, previous);
6093
6094 if (previous != enable)
6095 {
6096 sigset_t mask;
6097 sigemptyset (&mask);
6098 sigaddset (&mask, SIGCHLD);
6099
6100 sigprocmask (SIG_BLOCK, &mask, NULL);
6101
6102 if (enable)
6103 {
6104 if (pipe (linux_event_pipe) == -1)
6105 {
6106 linux_event_pipe[0] = -1;
6107 linux_event_pipe[1] = -1;
6108 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6109
6110 warning ("creating event pipe failed.");
6111 return previous;
6112 }
6113
6114 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6115 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6116
6117 /* Register the event loop handler. */
6118 add_file_handler (linux_event_pipe[0],
6119 handle_target_event, NULL);
6120
6121 /* Always trigger a linux_wait. */
6122 async_file_mark ();
6123 }
6124 else
6125 {
6126 delete_file_handler (linux_event_pipe[0]);
6127
6128 close (linux_event_pipe[0]);
6129 close (linux_event_pipe[1]);
6130 linux_event_pipe[0] = -1;
6131 linux_event_pipe[1] = -1;
6132 }
6133
6134 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6135 }
6136
6137 return previous;
6138}
6139
6140static int
6141linux_start_non_stop (int nonstop)
6142{
6143 /* Register or unregister from event-loop accordingly. */
6144 linux_async (nonstop);
6145
6146 if (target_is_async_p () != (nonstop != 0))
6147 return -1;
6148
6149 return 0;
6150}
6151
6152static int
6153linux_supports_multi_process (void)
6154{
6155 return 1;
6156}
6157
6158/* Check if fork events are supported. */
6159
6160static int
6161linux_supports_fork_events (void)
6162{
6163 return linux_supports_tracefork ();
6164}
6165
6166/* Check if vfork events are supported. */
6167
6168static int
6169linux_supports_vfork_events (void)
6170{
6171 return linux_supports_tracefork ();
6172}
6173
6174/* Check if exec events are supported. */
6175
6176static int
6177linux_supports_exec_events (void)
6178{
6179 return linux_supports_traceexec ();
6180}
6181
6182/* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6183 options for the specified lwp. */
6184
6185static int
6186reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6187 void *args)
6188{
6189 struct thread_info *thread = (struct thread_info *) entry;
6190 struct lwp_info *lwp = get_thread_lwp (thread);
6191
6192 if (!lwp->stopped)
6193 {
6194 /* Stop the lwp so we can modify its ptrace options. */
6195 lwp->must_set_ptrace_flags = 1;
6196 linux_stop_lwp (lwp);
6197 }
6198 else
6199 {
6200 /* Already stopped; go ahead and set the ptrace options. */
6201 struct process_info *proc = find_process_pid (pid_of (thread));
6202 int options = linux_low_ptrace_options (proc->attached);
6203
6204 linux_enable_event_reporting (lwpid_of (thread), options);
6205 lwp->must_set_ptrace_flags = 0;
6206 }
6207
6208 return 0;
6209}
6210
6211/* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6212 ptrace flags for all inferiors. This is in case the new GDB connection
6213 doesn't support the same set of events that the previous one did. */
6214
6215static void
6216linux_handle_new_gdb_connection (void)
6217{
6218 pid_t pid;
6219
6220 /* Request that all the lwps reset their ptrace options. */
6221 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6222}
6223
6224static int
6225linux_supports_disable_randomization (void)
6226{
6227#ifdef HAVE_PERSONALITY
6228 return 1;
6229#else
6230 return 0;
6231#endif
6232}
6233
6234static int
6235linux_supports_agent (void)
6236{
6237 return 1;
6238}
6239
6240static int
6241linux_supports_range_stepping (void)
6242{
6243 if (*the_low_target.supports_range_stepping == NULL)
6244 return 0;
6245
6246 return (*the_low_target.supports_range_stepping) ();
6247}
6248
6249/* Enumerate spufs IDs for process PID. */
6250static int
6251spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6252{
6253 int pos = 0;
6254 int written = 0;
6255 char path[128];
6256 DIR *dir;
6257 struct dirent *entry;
6258
6259 sprintf (path, "/proc/%ld/fd", pid);
6260 dir = opendir (path);
6261 if (!dir)
6262 return -1;
6263
6264 rewinddir (dir);
6265 while ((entry = readdir (dir)) != NULL)
6266 {
6267 struct stat st;
6268 struct statfs stfs;
6269 int fd;
6270
6271 fd = atoi (entry->d_name);
6272 if (!fd)
6273 continue;
6274
6275 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6276 if (stat (path, &st) != 0)
6277 continue;
6278 if (!S_ISDIR (st.st_mode))
6279 continue;
6280
6281 if (statfs (path, &stfs) != 0)
6282 continue;
6283 if (stfs.f_type != SPUFS_MAGIC)
6284 continue;
6285
6286 if (pos >= offset && pos + 4 <= offset + len)
6287 {
6288 *(unsigned int *)(buf + pos - offset) = fd;
6289 written += 4;
6290 }
6291 pos += 4;
6292 }
6293
6294 closedir (dir);
6295 return written;
6296}
6297
6298/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6299 object type, using the /proc file system. */
6300static int
6301linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6302 unsigned const char *writebuf,
6303 CORE_ADDR offset, int len)
6304{
6305 long pid = lwpid_of (current_thread);
6306 char buf[128];
6307 int fd = 0;
6308 int ret = 0;
6309
6310 if (!writebuf && !readbuf)
6311 return -1;
6312
6313 if (!*annex)
6314 {
6315 if (!readbuf)
6316 return -1;
6317 else
6318 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6319 }
6320
6321 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6322 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6323 if (fd <= 0)
6324 return -1;
6325
6326 if (offset != 0
6327 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6328 {
6329 close (fd);
6330 return 0;
6331 }
6332
6333 if (writebuf)
6334 ret = write (fd, writebuf, (size_t) len);
6335 else
6336 ret = read (fd, readbuf, (size_t) len);
6337
6338 close (fd);
6339 return ret;
6340}
6341
6342#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6343struct target_loadseg
6344{
6345 /* Core address to which the segment is mapped. */
6346 Elf32_Addr addr;
6347 /* VMA recorded in the program header. */
6348 Elf32_Addr p_vaddr;
6349 /* Size of this segment in memory. */
6350 Elf32_Word p_memsz;
6351};
6352
6353# if defined PT_GETDSBT
6354struct target_loadmap
6355{
6356 /* Protocol version number, must be zero. */
6357 Elf32_Word version;
6358 /* Pointer to the DSBT table, its size, and the DSBT index. */
6359 unsigned *dsbt_table;
6360 unsigned dsbt_size, dsbt_index;
6361 /* Number of segments in this map. */
6362 Elf32_Word nsegs;
6363 /* The actual memory map. */
6364 struct target_loadseg segs[/*nsegs*/];
6365};
6366# define LINUX_LOADMAP PT_GETDSBT
6367# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6368# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6369# else
6370struct target_loadmap
6371{
6372 /* Protocol version number, must be zero. */
6373 Elf32_Half version;
6374 /* Number of segments in this map. */
6375 Elf32_Half nsegs;
6376 /* The actual memory map. */
6377 struct target_loadseg segs[/*nsegs*/];
6378};
6379# define LINUX_LOADMAP PTRACE_GETFDPIC
6380# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6381# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6382# endif
6383
6384static int
6385linux_read_loadmap (const char *annex, CORE_ADDR offset,
6386 unsigned char *myaddr, unsigned int len)
6387{
6388 int pid = lwpid_of (current_thread);
6389 int addr = -1;
6390 struct target_loadmap *data = NULL;
6391 unsigned int actual_length, copy_length;
6392
6393 if (strcmp (annex, "exec") == 0)
6394 addr = (int) LINUX_LOADMAP_EXEC;
6395 else if (strcmp (annex, "interp") == 0)
6396 addr = (int) LINUX_LOADMAP_INTERP;
6397 else
6398 return -1;
6399
6400 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6401 return -1;
6402
6403 if (data == NULL)
6404 return -1;
6405
6406 actual_length = sizeof (struct target_loadmap)
6407 + sizeof (struct target_loadseg) * data->nsegs;
6408
6409 if (offset < 0 || offset > actual_length)
6410 return -1;
6411
6412 copy_length = actual_length - offset < len ? actual_length - offset : len;
6413 memcpy (myaddr, (char *) data + offset, copy_length);
6414 return copy_length;
6415}
6416#else
6417# define linux_read_loadmap NULL
6418#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6419
6420static void
6421linux_process_qsupported (char **features, int count)
6422{
6423 if (the_low_target.process_qsupported != NULL)
6424 the_low_target.process_qsupported (features, count);
6425}
6426
6427static int
6428linux_supports_catch_syscall (void)
6429{
6430 return (the_low_target.get_syscall_trapinfo != NULL
6431 && linux_supports_tracesysgood ());
6432}
6433
6434static int
6435linux_supports_tracepoints (void)
6436{
6437 if (*the_low_target.supports_tracepoints == NULL)
6438 return 0;
6439
6440 return (*the_low_target.supports_tracepoints) ();
6441}
6442
6443static CORE_ADDR
6444linux_read_pc (struct regcache *regcache)
6445{
6446 if (the_low_target.get_pc == NULL)
6447 return 0;
6448
6449 return (*the_low_target.get_pc) (regcache);
6450}
6451
6452static void
6453linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6454{
6455 gdb_assert (the_low_target.set_pc != NULL);
6456
6457 (*the_low_target.set_pc) (regcache, pc);
6458}
6459
6460static int
6461linux_thread_stopped (struct thread_info *thread)
6462{
6463 return get_thread_lwp (thread)->stopped;
6464}
6465
6466/* This exposes stop-all-threads functionality to other modules. */
6467
6468static void
6469linux_pause_all (int freeze)
6470{
6471 stop_all_lwps (freeze, NULL);
6472}
6473
6474/* This exposes unstop-all-threads functionality to other gdbserver
6475 modules. */
6476
6477static void
6478linux_unpause_all (int unfreeze)
6479{
6480 unstop_all_lwps (unfreeze, NULL);
6481}
6482
6483static int
6484linux_prepare_to_access_memory (void)
6485{
6486 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6487 running LWP. */
6488 if (non_stop)
6489 linux_pause_all (1);
6490 return 0;
6491}
6492
6493static void
6494linux_done_accessing_memory (void)
6495{
6496 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6497 running LWP. */
6498 if (non_stop)
6499 linux_unpause_all (1);
6500}
6501
6502static int
6503linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6504 CORE_ADDR collector,
6505 CORE_ADDR lockaddr,
6506 ULONGEST orig_size,
6507 CORE_ADDR *jump_entry,
6508 CORE_ADDR *trampoline,
6509 ULONGEST *trampoline_size,
6510 unsigned char *jjump_pad_insn,
6511 ULONGEST *jjump_pad_insn_size,
6512 CORE_ADDR *adjusted_insn_addr,
6513 CORE_ADDR *adjusted_insn_addr_end,
6514 char *err)
6515{
6516 return (*the_low_target.install_fast_tracepoint_jump_pad)
6517 (tpoint, tpaddr, collector, lockaddr, orig_size,
6518 jump_entry, trampoline, trampoline_size,
6519 jjump_pad_insn, jjump_pad_insn_size,
6520 adjusted_insn_addr, adjusted_insn_addr_end,
6521 err);
6522}
6523
6524static struct emit_ops *
6525linux_emit_ops (void)
6526{
6527 if (the_low_target.emit_ops != NULL)
6528 return (*the_low_target.emit_ops) ();
6529 else
6530 return NULL;
6531}
6532
6533static int
6534linux_get_min_fast_tracepoint_insn_len (void)
6535{
6536 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6537}
6538
6539/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6540
6541static int
6542get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6543 CORE_ADDR *phdr_memaddr, int *num_phdr)
6544{
6545 char filename[PATH_MAX];
6546 int fd;
6547 const int auxv_size = is_elf64
6548 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6549 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6550
6551 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6552
6553 fd = open (filename, O_RDONLY);
6554 if (fd < 0)
6555 return 1;
6556
6557 *phdr_memaddr = 0;
6558 *num_phdr = 0;
6559 while (read (fd, buf, auxv_size) == auxv_size
6560 && (*phdr_memaddr == 0 || *num_phdr == 0))
6561 {
6562 if (is_elf64)
6563 {
6564 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6565
6566 switch (aux->a_type)
6567 {
6568 case AT_PHDR:
6569 *phdr_memaddr = aux->a_un.a_val;
6570 break;
6571 case AT_PHNUM:
6572 *num_phdr = aux->a_un.a_val;
6573 break;
6574 }
6575 }
6576 else
6577 {
6578 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6579
6580 switch (aux->a_type)
6581 {
6582 case AT_PHDR:
6583 *phdr_memaddr = aux->a_un.a_val;
6584 break;
6585 case AT_PHNUM:
6586 *num_phdr = aux->a_un.a_val;
6587 break;
6588 }
6589 }
6590 }
6591
6592 close (fd);
6593
6594 if (*phdr_memaddr == 0 || *num_phdr == 0)
6595 {
6596 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6597 "phdr_memaddr = %ld, phdr_num = %d",
6598 (long) *phdr_memaddr, *num_phdr);
6599 return 2;
6600 }
6601
6602 return 0;
6603}
6604
6605/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6606
6607static CORE_ADDR
6608get_dynamic (const int pid, const int is_elf64)
6609{
6610 CORE_ADDR phdr_memaddr, relocation;
6611 int num_phdr, i;
6612 unsigned char *phdr_buf;
6613 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6614
6615 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6616 return 0;
6617
6618 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6619 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6620
6621 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6622 return 0;
6623
6624 /* Compute relocation: it is expected to be 0 for "regular" executables,
6625 non-zero for PIE ones. */
6626 relocation = -1;
6627 for (i = 0; relocation == -1 && i < num_phdr; i++)
6628 if (is_elf64)
6629 {
6630 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6631
6632 if (p->p_type == PT_PHDR)
6633 relocation = phdr_memaddr - p->p_vaddr;
6634 }
6635 else
6636 {
6637 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6638
6639 if (p->p_type == PT_PHDR)
6640 relocation = phdr_memaddr - p->p_vaddr;
6641 }
6642
6643 if (relocation == -1)
6644 {
6645 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6646 any real world executables, including PIE executables, have always
6647 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6648 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6649 or present DT_DEBUG anyway (fpc binaries are statically linked).
6650
6651 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6652
6653 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6654
6655 return 0;
6656 }
6657
6658 for (i = 0; i < num_phdr; i++)
6659 {
6660 if (is_elf64)
6661 {
6662 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6663
6664 if (p->p_type == PT_DYNAMIC)
6665 return p->p_vaddr + relocation;
6666 }
6667 else
6668 {
6669 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6670
6671 if (p->p_type == PT_DYNAMIC)
6672 return p->p_vaddr + relocation;
6673 }
6674 }
6675
6676 return 0;
6677}
6678
6679/* Return &_r_debug in the inferior, or -1 if not present. Return value
6680 can be 0 if the inferior does not yet have the library list initialized.
6681 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6682 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6683
6684static CORE_ADDR
6685get_r_debug (const int pid, const int is_elf64)
6686{
6687 CORE_ADDR dynamic_memaddr;
6688 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6689 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6690 CORE_ADDR map = -1;
6691
6692 dynamic_memaddr = get_dynamic (pid, is_elf64);
6693 if (dynamic_memaddr == 0)
6694 return map;
6695
6696 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6697 {
6698 if (is_elf64)
6699 {
6700 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6701#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6702 union
6703 {
6704 Elf64_Xword map;
6705 unsigned char buf[sizeof (Elf64_Xword)];
6706 }
6707 rld_map;
6708#endif
6709#ifdef DT_MIPS_RLD_MAP
6710 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6711 {
6712 if (linux_read_memory (dyn->d_un.d_val,
6713 rld_map.buf, sizeof (rld_map.buf)) == 0)
6714 return rld_map.map;
6715 else
6716 break;
6717 }
6718#endif /* DT_MIPS_RLD_MAP */
6719#ifdef DT_MIPS_RLD_MAP_REL
6720 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6721 {
6722 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6723 rld_map.buf, sizeof (rld_map.buf)) == 0)
6724 return rld_map.map;
6725 else
6726 break;
6727 }
6728#endif /* DT_MIPS_RLD_MAP_REL */
6729
6730 if (dyn->d_tag == DT_DEBUG && map == -1)
6731 map = dyn->d_un.d_val;
6732
6733 if (dyn->d_tag == DT_NULL)
6734 break;
6735 }
6736 else
6737 {
6738 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6739#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6740 union
6741 {
6742 Elf32_Word map;
6743 unsigned char buf[sizeof (Elf32_Word)];
6744 }
6745 rld_map;
6746#endif
6747#ifdef DT_MIPS_RLD_MAP
6748 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6749 {
6750 if (linux_read_memory (dyn->d_un.d_val,
6751 rld_map.buf, sizeof (rld_map.buf)) == 0)
6752 return rld_map.map;
6753 else
6754 break;
6755 }
6756#endif /* DT_MIPS_RLD_MAP */
6757#ifdef DT_MIPS_RLD_MAP_REL
6758 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6759 {
6760 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6761 rld_map.buf, sizeof (rld_map.buf)) == 0)
6762 return rld_map.map;
6763 else
6764 break;
6765 }
6766#endif /* DT_MIPS_RLD_MAP_REL */
6767
6768 if (dyn->d_tag == DT_DEBUG && map == -1)
6769 map = dyn->d_un.d_val;
6770
6771 if (dyn->d_tag == DT_NULL)
6772 break;
6773 }
6774
6775 dynamic_memaddr += dyn_size;
6776 }
6777
6778 return map;
6779}
6780
6781/* Read one pointer from MEMADDR in the inferior. */
6782
6783static int
6784read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6785{
6786 int ret;
6787
6788 /* Go through a union so this works on either big or little endian
6789 hosts, when the inferior's pointer size is smaller than the size
6790 of CORE_ADDR. It is assumed the inferior's endianness is the
6791 same of the superior's. */
6792 union
6793 {
6794 CORE_ADDR core_addr;
6795 unsigned int ui;
6796 unsigned char uc;
6797 } addr;
6798
6799 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6800 if (ret == 0)
6801 {
6802 if (ptr_size == sizeof (CORE_ADDR))
6803 *ptr = addr.core_addr;
6804 else if (ptr_size == sizeof (unsigned int))
6805 *ptr = addr.ui;
6806 else
6807 gdb_assert_not_reached ("unhandled pointer size");
6808 }
6809 return ret;
6810}
6811
6812struct link_map_offsets
6813 {
6814 /* Offset and size of r_debug.r_version. */
6815 int r_version_offset;
6816
6817 /* Offset and size of r_debug.r_map. */
6818 int r_map_offset;
6819
6820 /* Offset to l_addr field in struct link_map. */
6821 int l_addr_offset;
6822
6823 /* Offset to l_name field in struct link_map. */
6824 int l_name_offset;
6825
6826 /* Offset to l_ld field in struct link_map. */
6827 int l_ld_offset;
6828
6829 /* Offset to l_next field in struct link_map. */
6830 int l_next_offset;
6831
6832 /* Offset to l_prev field in struct link_map. */
6833 int l_prev_offset;
6834 };
6835
6836/* Construct qXfer:libraries-svr4:read reply. */
6837
6838static int
6839linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6840 unsigned const char *writebuf,
6841 CORE_ADDR offset, int len)
6842{
6843 char *document;
6844 unsigned document_len;
6845 struct process_info_private *const priv = current_process ()->priv;
6846 char filename[PATH_MAX];
6847 int pid, is_elf64;
6848
6849 static const struct link_map_offsets lmo_32bit_offsets =
6850 {
6851 0, /* r_version offset. */
6852 4, /* r_debug.r_map offset. */
6853 0, /* l_addr offset in link_map. */
6854 4, /* l_name offset in link_map. */
6855 8, /* l_ld offset in link_map. */
6856 12, /* l_next offset in link_map. */
6857 16 /* l_prev offset in link_map. */
6858 };
6859
6860 static const struct link_map_offsets lmo_64bit_offsets =
6861 {
6862 0, /* r_version offset. */
6863 8, /* r_debug.r_map offset. */
6864 0, /* l_addr offset in link_map. */
6865 8, /* l_name offset in link_map. */
6866 16, /* l_ld offset in link_map. */
6867 24, /* l_next offset in link_map. */
6868 32 /* l_prev offset in link_map. */
6869 };
6870 const struct link_map_offsets *lmo;
6871 unsigned int machine;
6872 int ptr_size;
6873 CORE_ADDR lm_addr = 0, lm_prev = 0;
6874 int allocated = 1024;
6875 char *p;
6876 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6877 int header_done = 0;
6878
6879 if (writebuf != NULL)
6880 return -2;
6881 if (readbuf == NULL)
6882 return -1;
6883
6884 pid = lwpid_of (current_thread);
6885 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6886 is_elf64 = elf_64_file_p (filename, &machine);
6887 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6888 ptr_size = is_elf64 ? 8 : 4;
6889
6890 while (annex[0] != '\0')
6891 {
6892 const char *sep;
6893 CORE_ADDR *addrp;
6894 int len;
6895
6896 sep = strchr (annex, '=');
6897 if (sep == NULL)
6898 break;
6899
6900 len = sep - annex;
6901 if (len == 5 && startswith (annex, "start"))
6902 addrp = &lm_addr;
6903 else if (len == 4 && startswith (annex, "prev"))
6904 addrp = &lm_prev;
6905 else
6906 {
6907 annex = strchr (sep, ';');
6908 if (annex == NULL)
6909 break;
6910 annex++;
6911 continue;
6912 }
6913
6914 annex = decode_address_to_semicolon (addrp, sep + 1);
6915 }
6916
6917 if (lm_addr == 0)
6918 {
6919 int r_version = 0;
6920
6921 if (priv->r_debug == 0)
6922 priv->r_debug = get_r_debug (pid, is_elf64);
6923
6924 /* We failed to find DT_DEBUG. Such situation will not change
6925 for this inferior - do not retry it. Report it to GDB as
6926 E01, see for the reasons at the GDB solib-svr4.c side. */
6927 if (priv->r_debug == (CORE_ADDR) -1)
6928 return -1;
6929
6930 if (priv->r_debug != 0)
6931 {
6932 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6933 (unsigned char *) &r_version,
6934 sizeof (r_version)) != 0
6935 || r_version != 1)
6936 {
6937 warning ("unexpected r_debug version %d", r_version);
6938 }
6939 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6940 &lm_addr, ptr_size) != 0)
6941 {
6942 warning ("unable to read r_map from 0x%lx",
6943 (long) priv->r_debug + lmo->r_map_offset);
6944 }
6945 }
6946 }
6947
6948 document = (char *) xmalloc (allocated);
6949 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6950 p = document + strlen (document);
6951
6952 while (lm_addr
6953 && read_one_ptr (lm_addr + lmo->l_name_offset,
6954 &l_name, ptr_size) == 0
6955 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6956 &l_addr, ptr_size) == 0
6957 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6958 &l_ld, ptr_size) == 0
6959 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6960 &l_prev, ptr_size) == 0
6961 && read_one_ptr (lm_addr + lmo->l_next_offset,
6962 &l_next, ptr_size) == 0)
6963 {
6964 unsigned char libname[PATH_MAX];
6965
6966 if (lm_prev != l_prev)
6967 {
6968 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6969 (long) lm_prev, (long) l_prev);
6970 break;
6971 }
6972
6973 /* Ignore the first entry even if it has valid name as the first entry
6974 corresponds to the main executable. The first entry should not be
6975 skipped if the dynamic loader was loaded late by a static executable
6976 (see solib-svr4.c parameter ignore_first). But in such case the main
6977 executable does not have PT_DYNAMIC present and this function already
6978 exited above due to failed get_r_debug. */
6979 if (lm_prev == 0)
6980 {
6981 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6982 p = p + strlen (p);
6983 }
6984 else
6985 {
6986 /* Not checking for error because reading may stop before
6987 we've got PATH_MAX worth of characters. */
6988 libname[0] = '\0';
6989 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6990 libname[sizeof (libname) - 1] = '\0';
6991 if (libname[0] != '\0')
6992 {
6993 /* 6x the size for xml_escape_text below. */
6994 size_t len = 6 * strlen ((char *) libname);
6995 char *name;
6996
6997 if (!header_done)
6998 {
6999 /* Terminate `<library-list-svr4'. */
7000 *p++ = '>';
7001 header_done = 1;
7002 }
7003
7004 while (allocated < p - document + len + 200)
7005 {
7006 /* Expand to guarantee sufficient storage. */
7007 uintptr_t document_len = p - document;
7008
7009 document = (char *) xrealloc (document, 2 * allocated);
7010 allocated *= 2;
7011 p = document + document_len;
7012 }
7013
7014 name = xml_escape_text ((char *) libname);
7015 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7016 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7017 name, (unsigned long) lm_addr,
7018 (unsigned long) l_addr, (unsigned long) l_ld);
7019 free (name);
7020 }
7021 }
7022
7023 lm_prev = lm_addr;
7024 lm_addr = l_next;
7025 }
7026
7027 if (!header_done)
7028 {
7029 /* Empty list; terminate `<library-list-svr4'. */
7030 strcpy (p, "/>");
7031 }
7032 else
7033 strcpy (p, "</library-list-svr4>");
7034
7035 document_len = strlen (document);
7036 if (offset < document_len)
7037 document_len -= offset;
7038 else
7039 document_len = 0;
7040 if (len > document_len)
7041 len = document_len;
7042
7043 memcpy (readbuf, document + offset, len);
7044 xfree (document);
7045
7046 return len;
7047}
7048
7049#ifdef HAVE_LINUX_BTRACE
7050
7051/* See to_disable_btrace target method. */
7052
7053static int
7054linux_low_disable_btrace (struct btrace_target_info *tinfo)
7055{
7056 enum btrace_error err;
7057
7058 err = linux_disable_btrace (tinfo);
7059 return (err == BTRACE_ERR_NONE ? 0 : -1);
7060}
7061
7062/* Encode an Intel Processor Trace configuration. */
7063
7064static void
7065linux_low_encode_pt_config (struct buffer *buffer,
7066 const struct btrace_data_pt_config *config)
7067{
7068 buffer_grow_str (buffer, "<pt-config>\n");
7069
7070 switch (config->cpu.vendor)
7071 {
7072 case CV_INTEL:
7073 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7074 "model=\"%u\" stepping=\"%u\"/>\n",
7075 config->cpu.family, config->cpu.model,
7076 config->cpu.stepping);
7077 break;
7078
7079 default:
7080 break;
7081 }
7082
7083 buffer_grow_str (buffer, "</pt-config>\n");
7084}
7085
7086/* Encode a raw buffer. */
7087
7088static void
7089linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7090 unsigned int size)
7091{
7092 if (size == 0)
7093 return;
7094
7095 /* We use hex encoding - see common/rsp-low.h. */
7096 buffer_grow_str (buffer, "<raw>\n");
7097
7098 while (size-- > 0)
7099 {
7100 char elem[2];
7101
7102 elem[0] = tohex ((*data >> 4) & 0xf);
7103 elem[1] = tohex (*data++ & 0xf);
7104
7105 buffer_grow (buffer, elem, 2);
7106 }
7107
7108 buffer_grow_str (buffer, "</raw>\n");
7109}
7110
7111/* See to_read_btrace target method. */
7112
7113static int
7114linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7115 enum btrace_read_type type)
7116{
7117 struct btrace_data btrace;
7118 struct btrace_block *block;
7119 enum btrace_error err;
7120 int i;
7121
7122 btrace_data_init (&btrace);
7123
7124 err = linux_read_btrace (&btrace, tinfo, type);
7125 if (err != BTRACE_ERR_NONE)
7126 {
7127 if (err == BTRACE_ERR_OVERFLOW)
7128 buffer_grow_str0 (buffer, "E.Overflow.");
7129 else
7130 buffer_grow_str0 (buffer, "E.Generic Error.");
7131
7132 goto err;
7133 }
7134
7135 switch (btrace.format)
7136 {
7137 case BTRACE_FORMAT_NONE:
7138 buffer_grow_str0 (buffer, "E.No Trace.");
7139 goto err;
7140
7141 case BTRACE_FORMAT_BTS:
7142 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7143 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7144
7145 for (i = 0;
7146 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7147 i++)
7148 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7149 paddress (block->begin), paddress (block->end));
7150
7151 buffer_grow_str0 (buffer, "</btrace>\n");
7152 break;
7153
7154 case BTRACE_FORMAT_PT:
7155 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7156 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7157 buffer_grow_str (buffer, "<pt>\n");
7158
7159 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7160
7161 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7162 btrace.variant.pt.size);
7163
7164 buffer_grow_str (buffer, "</pt>\n");
7165 buffer_grow_str0 (buffer, "</btrace>\n");
7166 break;
7167
7168 default:
7169 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7170 goto err;
7171 }
7172
7173 btrace_data_fini (&btrace);
7174 return 0;
7175
7176err:
7177 btrace_data_fini (&btrace);
7178 return -1;
7179}
7180
7181/* See to_btrace_conf target method. */
7182
7183static int
7184linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7185 struct buffer *buffer)
7186{
7187 const struct btrace_config *conf;
7188
7189 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7190 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7191
7192 conf = linux_btrace_conf (tinfo);
7193 if (conf != NULL)
7194 {
7195 switch (conf->format)
7196 {
7197 case BTRACE_FORMAT_NONE:
7198 break;
7199
7200 case BTRACE_FORMAT_BTS:
7201 buffer_xml_printf (buffer, "<bts");
7202 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7203 buffer_xml_printf (buffer, " />\n");
7204 break;
7205
7206 case BTRACE_FORMAT_PT:
7207 buffer_xml_printf (buffer, "<pt");
7208 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7209 buffer_xml_printf (buffer, "/>\n");
7210 break;
7211 }
7212 }
7213
7214 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7215 return 0;
7216}
7217#endif /* HAVE_LINUX_BTRACE */
7218
7219/* See nat/linux-nat.h. */
7220
7221ptid_t
7222current_lwp_ptid (void)
7223{
7224 return ptid_of (current_thread);
7225}
7226
7227/* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7228
7229static int
7230linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7231{
7232 if (the_low_target.breakpoint_kind_from_pc != NULL)
7233 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7234 else
7235 return default_breakpoint_kind_from_pc (pcptr);
7236}
7237
7238/* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7239
7240static const gdb_byte *
7241linux_sw_breakpoint_from_kind (int kind, int *size)
7242{
7243 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7244
7245 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7246}
7247
7248/* Implementation of the target_ops method
7249 "breakpoint_kind_from_current_state". */
7250
7251static int
7252linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7253{
7254 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7255 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7256 else
7257 return linux_breakpoint_kind_from_pc (pcptr);
7258}
7259
7260/* Default implementation of linux_target_ops method "set_pc" for
7261 32-bit pc register which is literally named "pc". */
7262
7263void
7264linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7265{
7266 uint32_t newpc = pc;
7267
7268 supply_register_by_name (regcache, "pc", &newpc);
7269}
7270
7271/* Default implementation of linux_target_ops method "get_pc" for
7272 32-bit pc register which is literally named "pc". */
7273
7274CORE_ADDR
7275linux_get_pc_32bit (struct regcache *regcache)
7276{
7277 uint32_t pc;
7278
7279 collect_register_by_name (regcache, "pc", &pc);
7280 if (debug_threads)
7281 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7282 return pc;
7283}
7284
7285/* Default implementation of linux_target_ops method "set_pc" for
7286 64-bit pc register which is literally named "pc". */
7287
7288void
7289linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7290{
7291 uint64_t newpc = pc;
7292
7293 supply_register_by_name (regcache, "pc", &newpc);
7294}
7295
7296/* Default implementation of linux_target_ops method "get_pc" for
7297 64-bit pc register which is literally named "pc". */
7298
7299CORE_ADDR
7300linux_get_pc_64bit (struct regcache *regcache)
7301{
7302 uint64_t pc;
7303
7304 collect_register_by_name (regcache, "pc", &pc);
7305 if (debug_threads)
7306 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7307 return pc;
7308}
7309
7310
7311static struct target_ops linux_target_ops = {
7312 linux_create_inferior,
7313 linux_post_create_inferior,
7314 linux_attach,
7315 linux_kill,
7316 linux_detach,
7317 linux_mourn,
7318 linux_join,
7319 linux_thread_alive,
7320 linux_resume,
7321 linux_wait,
7322 linux_fetch_registers,
7323 linux_store_registers,
7324 linux_prepare_to_access_memory,
7325 linux_done_accessing_memory,
7326 linux_read_memory,
7327 linux_write_memory,
7328 linux_look_up_symbols,
7329 linux_request_interrupt,
7330 linux_read_auxv,
7331 linux_supports_z_point_type,
7332 linux_insert_point,
7333 linux_remove_point,
7334 linux_stopped_by_sw_breakpoint,
7335 linux_supports_stopped_by_sw_breakpoint,
7336 linux_stopped_by_hw_breakpoint,
7337 linux_supports_stopped_by_hw_breakpoint,
7338 linux_supports_hardware_single_step,
7339 linux_stopped_by_watchpoint,
7340 linux_stopped_data_address,
7341#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7342 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7343 && defined(PT_TEXT_END_ADDR)
7344 linux_read_offsets,
7345#else
7346 NULL,
7347#endif
7348#ifdef USE_THREAD_DB
7349 thread_db_get_tls_address,
7350#else
7351 NULL,
7352#endif
7353 linux_qxfer_spu,
7354 hostio_last_error_from_errno,
7355 linux_qxfer_osdata,
7356 linux_xfer_siginfo,
7357 linux_supports_non_stop,
7358 linux_async,
7359 linux_start_non_stop,
7360 linux_supports_multi_process,
7361 linux_supports_fork_events,
7362 linux_supports_vfork_events,
7363 linux_supports_exec_events,
7364 linux_handle_new_gdb_connection,
7365#ifdef USE_THREAD_DB
7366 thread_db_handle_monitor_command,
7367#else
7368 NULL,
7369#endif
7370 linux_common_core_of_thread,
7371 linux_read_loadmap,
7372 linux_process_qsupported,
7373 linux_supports_tracepoints,
7374 linux_read_pc,
7375 linux_write_pc,
7376 linux_thread_stopped,
7377 NULL,
7378 linux_pause_all,
7379 linux_unpause_all,
7380 linux_stabilize_threads,
7381 linux_install_fast_tracepoint_jump_pad,
7382 linux_emit_ops,
7383 linux_supports_disable_randomization,
7384 linux_get_min_fast_tracepoint_insn_len,
7385 linux_qxfer_libraries_svr4,
7386 linux_supports_agent,
7387#ifdef HAVE_LINUX_BTRACE
7388 linux_supports_btrace,
7389 linux_enable_btrace,
7390 linux_low_disable_btrace,
7391 linux_low_read_btrace,
7392 linux_low_btrace_conf,
7393#else
7394 NULL,
7395 NULL,
7396 NULL,
7397 NULL,
7398 NULL,
7399#endif
7400 linux_supports_range_stepping,
7401 linux_proc_pid_to_exec_file,
7402 linux_mntns_open_cloexec,
7403 linux_mntns_unlink,
7404 linux_mntns_readlink,
7405 linux_breakpoint_kind_from_pc,
7406 linux_sw_breakpoint_from_kind,
7407 linux_proc_tid_get_name,
7408 linux_breakpoint_kind_from_current_state,
7409 linux_supports_software_single_step,
7410 linux_supports_catch_syscall,
7411};
7412
7413#ifdef HAVE_LINUX_REGSETS
7414void
7415initialize_regsets_info (struct regsets_info *info)
7416{
7417 for (info->num_regsets = 0;
7418 info->regsets[info->num_regsets].size >= 0;
7419 info->num_regsets++)
7420 ;
7421}
7422#endif
7423
7424void
7425initialize_low (void)
7426{
7427 struct sigaction sigchld_action;
7428
7429 memset (&sigchld_action, 0, sizeof (sigchld_action));
7430 set_target_ops (&linux_target_ops);
7431
7432 linux_ptrace_init_warnings ();
7433
7434 sigchld_action.sa_handler = sigchld_handler;
7435 sigemptyset (&sigchld_action.sa_mask);
7436 sigchld_action.sa_flags = SA_RESTART;
7437 sigaction (SIGCHLD, &sigchld_action, NULL);
7438
7439 initialize_low_arch ();
7440
7441 linux_check_ptrace_features ();
7442}
This page took 0.057175 seconds and 4 git commands to generate.