[gdbserver] Split a new hostio.h file out of server.h.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
... / ...
CommitLineData
1/* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19#include "server.h"
20#include "linux-low.h"
21#include "linux-osdata.h"
22#include "agent.h"
23
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
26#include "gdb_wait.h"
27#include <stdio.h>
28#include <sys/ptrace.h>
29#include "linux-ptrace.h"
30#include "linux-procfs.h"
31#include <signal.h>
32#include <sys/ioctl.h>
33#include <fcntl.h>
34#include <string.h>
35#include <stdlib.h>
36#include <unistd.h>
37#include <errno.h>
38#include <sys/syscall.h>
39#include <sched.h>
40#include <ctype.h>
41#include <pwd.h>
42#include <sys/types.h>
43#include <dirent.h>
44#include "gdb_stat.h"
45#include <sys/vfs.h>
46#include <sys/uio.h>
47#include "filestuff.h"
48#include "tracepoint.h"
49#include "hostio.h"
50#ifndef ELFMAG0
51/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55#include <elf.h>
56#endif
57
58#ifndef SPUFS_MAGIC
59#define SPUFS_MAGIC 0x23c9b64e
60#endif
61
62#ifdef HAVE_PERSONALITY
63# include <sys/personality.h>
64# if !HAVE_DECL_ADDR_NO_RANDOMIZE
65# define ADDR_NO_RANDOMIZE 0x0040000
66# endif
67#endif
68
69#ifndef O_LARGEFILE
70#define O_LARGEFILE 0
71#endif
72
73#ifndef W_STOPCODE
74#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75#endif
76
77/* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79#ifndef __SIGRTMIN
80#define __SIGRTMIN 32
81#endif
82
83/* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86#if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89#if defined(__mcoldfire__)
90/* These are still undefined in 3.10 kernels. */
91#define PT_TEXT_ADDR 49*4
92#define PT_DATA_ADDR 50*4
93#define PT_TEXT_END_ADDR 51*4
94/* BFIN already defines these since at least 2.6.32 kernels. */
95#elif defined(BFIN)
96#define PT_TEXT_ADDR 220
97#define PT_TEXT_END_ADDR 224
98#define PT_DATA_ADDR 228
99/* These are still undefined in 3.10 kernels. */
100#elif defined(__TMS320C6X__)
101#define PT_TEXT_ADDR (0x10000*4)
102#define PT_DATA_ADDR (0x10004*4)
103#define PT_TEXT_END_ADDR (0x10008*4)
104#endif
105#endif
106
107#ifdef HAVE_LINUX_BTRACE
108# include "linux-btrace.h"
109#endif
110
111#ifndef HAVE_ELF32_AUXV_T
112/* Copied from glibc's elf.h. */
113typedef struct
114{
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123} Elf32_auxv_t;
124#endif
125
126#ifndef HAVE_ELF64_AUXV_T
127/* Copied from glibc's elf.h. */
128typedef struct
129{
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138} Elf64_auxv_t;
139#endif
140
141/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
142 representation of the thread ID.
143
144 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
145 the same as the LWP ID.
146
147 ``all_processes'' is keyed by the "overall process ID", which
148 GNU/Linux calls tgid, "thread group ID". */
149
150struct inferior_list all_lwps;
151
152/* A list of all unknown processes which receive stop signals. Some
153 other process will presumably claim each of these as forked
154 children momentarily. */
155
156struct simple_pid_list
157{
158 /* The process ID. */
159 int pid;
160
161 /* The status as reported by waitpid. */
162 int status;
163
164 /* Next in chain. */
165 struct simple_pid_list *next;
166};
167struct simple_pid_list *stopped_pids;
168
169/* Trivial list manipulation functions to keep track of a list of new
170 stopped processes. */
171
172static void
173add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
174{
175 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
176
177 new_pid->pid = pid;
178 new_pid->status = status;
179 new_pid->next = *listp;
180 *listp = new_pid;
181}
182
183static int
184pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
185{
186 struct simple_pid_list **p;
187
188 for (p = listp; *p != NULL; p = &(*p)->next)
189 if ((*p)->pid == pid)
190 {
191 struct simple_pid_list *next = (*p)->next;
192
193 *statusp = (*p)->status;
194 xfree (*p);
195 *p = next;
196 return 1;
197 }
198 return 0;
199}
200
201enum stopping_threads_kind
202 {
203 /* Not stopping threads presently. */
204 NOT_STOPPING_THREADS,
205
206 /* Stopping threads. */
207 STOPPING_THREADS,
208
209 /* Stopping and suspending threads. */
210 STOPPING_AND_SUSPENDING_THREADS
211 };
212
213/* This is set while stop_all_lwps is in effect. */
214enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
215
216/* FIXME make into a target method? */
217int using_threads = 1;
218
219/* True if we're presently stabilizing threads (moving them out of
220 jump pads). */
221static int stabilizing_threads;
222
223static void linux_resume_one_lwp (struct lwp_info *lwp,
224 int step, int signal, siginfo_t *info);
225static void linux_resume (struct thread_resume *resume_info, size_t n);
226static void stop_all_lwps (int suspend, struct lwp_info *except);
227static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
228static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
229static void *add_lwp (ptid_t ptid);
230static int linux_stopped_by_watchpoint (void);
231static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
232static void proceed_all_lwps (void);
233static int finish_step_over (struct lwp_info *lwp);
234static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
235static int kill_lwp (unsigned long lwpid, int signo);
236
237/* True if the low target can hardware single-step. Such targets
238 don't need a BREAKPOINT_REINSERT_ADDR callback. */
239
240static int
241can_hardware_single_step (void)
242{
243 return (the_low_target.breakpoint_reinsert_addr == NULL);
244}
245
246/* True if the low target supports memory breakpoints. If so, we'll
247 have a GET_PC implementation. */
248
249static int
250supports_breakpoints (void)
251{
252 return (the_low_target.get_pc != NULL);
253}
254
255/* Returns true if this target can support fast tracepoints. This
256 does not mean that the in-process agent has been loaded in the
257 inferior. */
258
259static int
260supports_fast_tracepoints (void)
261{
262 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
263}
264
265/* True if LWP is stopped in its stepping range. */
266
267static int
268lwp_in_step_range (struct lwp_info *lwp)
269{
270 CORE_ADDR pc = lwp->stop_pc;
271
272 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
273}
274
275struct pending_signals
276{
277 int signal;
278 siginfo_t info;
279 struct pending_signals *prev;
280};
281
282/* The read/write ends of the pipe registered as waitable file in the
283 event loop. */
284static int linux_event_pipe[2] = { -1, -1 };
285
286/* True if we're currently in async mode. */
287#define target_is_async_p() (linux_event_pipe[0] != -1)
288
289static void send_sigstop (struct lwp_info *lwp);
290static void wait_for_sigstop (struct inferior_list_entry *entry);
291
292/* Return non-zero if HEADER is a 64-bit ELF file. */
293
294static int
295elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
296{
297 if (header->e_ident[EI_MAG0] == ELFMAG0
298 && header->e_ident[EI_MAG1] == ELFMAG1
299 && header->e_ident[EI_MAG2] == ELFMAG2
300 && header->e_ident[EI_MAG3] == ELFMAG3)
301 {
302 *machine = header->e_machine;
303 return header->e_ident[EI_CLASS] == ELFCLASS64;
304
305 }
306 *machine = EM_NONE;
307 return -1;
308}
309
310/* Return non-zero if FILE is a 64-bit ELF file,
311 zero if the file is not a 64-bit ELF file,
312 and -1 if the file is not accessible or doesn't exist. */
313
314static int
315elf_64_file_p (const char *file, unsigned int *machine)
316{
317 Elf64_Ehdr header;
318 int fd;
319
320 fd = open (file, O_RDONLY);
321 if (fd < 0)
322 return -1;
323
324 if (read (fd, &header, sizeof (header)) != sizeof (header))
325 {
326 close (fd);
327 return 0;
328 }
329 close (fd);
330
331 return elf_64_header_p (&header, machine);
332}
333
334/* Accepts an integer PID; Returns true if the executable PID is
335 running is a 64-bit ELF file.. */
336
337int
338linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
339{
340 char file[PATH_MAX];
341
342 sprintf (file, "/proc/%d/exe", pid);
343 return elf_64_file_p (file, machine);
344}
345
346static void
347delete_lwp (struct lwp_info *lwp)
348{
349 remove_thread (get_lwp_thread (lwp));
350 remove_inferior (&all_lwps, &lwp->head);
351 free (lwp->arch_private);
352 free (lwp);
353}
354
355/* Add a process to the common process list, and set its private
356 data. */
357
358static struct process_info *
359linux_add_process (int pid, int attached)
360{
361 struct process_info *proc;
362
363 proc = add_process (pid, attached);
364 proc->private = xcalloc (1, sizeof (*proc->private));
365
366 /* Set the arch when the first LWP stops. */
367 proc->private->new_inferior = 1;
368
369 if (the_low_target.new_process != NULL)
370 proc->private->arch_private = the_low_target.new_process ();
371
372 return proc;
373}
374
375/* Handle a GNU/Linux extended wait response. If we see a clone
376 event, we need to add the new LWP to our list (and not report the
377 trap to higher layers). */
378
379static void
380handle_extended_wait (struct lwp_info *event_child, int wstat)
381{
382 int event = wstat >> 16;
383 struct lwp_info *new_lwp;
384
385 if (event == PTRACE_EVENT_CLONE)
386 {
387 ptid_t ptid;
388 unsigned long new_pid;
389 int ret, status;
390
391 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
392 &new_pid);
393
394 /* If we haven't already seen the new PID stop, wait for it now. */
395 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
396 {
397 /* The new child has a pending SIGSTOP. We can't affect it until it
398 hits the SIGSTOP, but we're already attached. */
399
400 ret = my_waitpid (new_pid, &status, __WALL);
401
402 if (ret == -1)
403 perror_with_name ("waiting for new child");
404 else if (ret != new_pid)
405 warning ("wait returned unexpected PID %d", ret);
406 else if (!WIFSTOPPED (status))
407 warning ("wait returned unexpected status 0x%x", status);
408 }
409
410 ptid = ptid_build (pid_of (event_child), new_pid, 0);
411 new_lwp = (struct lwp_info *) add_lwp (ptid);
412 add_thread (ptid, new_lwp);
413
414 /* Either we're going to immediately resume the new thread
415 or leave it stopped. linux_resume_one_lwp is a nop if it
416 thinks the thread is currently running, so set this first
417 before calling linux_resume_one_lwp. */
418 new_lwp->stopped = 1;
419
420 /* If we're suspending all threads, leave this one suspended
421 too. */
422 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
423 new_lwp->suspended = 1;
424
425 /* Normally we will get the pending SIGSTOP. But in some cases
426 we might get another signal delivered to the group first.
427 If we do get another signal, be sure not to lose it. */
428 if (WSTOPSIG (status) == SIGSTOP)
429 {
430 if (stopping_threads != NOT_STOPPING_THREADS)
431 new_lwp->stop_pc = get_stop_pc (new_lwp);
432 else
433 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
434 }
435 else
436 {
437 new_lwp->stop_expected = 1;
438
439 if (stopping_threads != NOT_STOPPING_THREADS)
440 {
441 new_lwp->stop_pc = get_stop_pc (new_lwp);
442 new_lwp->status_pending_p = 1;
443 new_lwp->status_pending = status;
444 }
445 else
446 /* Pass the signal on. This is what GDB does - except
447 shouldn't we really report it instead? */
448 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
449 }
450
451 /* Always resume the current thread. If we are stopping
452 threads, it will have a pending SIGSTOP; we may as well
453 collect it now. */
454 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
455 }
456}
457
458/* Return the PC as read from the regcache of LWP, without any
459 adjustment. */
460
461static CORE_ADDR
462get_pc (struct lwp_info *lwp)
463{
464 struct thread_info *saved_inferior;
465 struct regcache *regcache;
466 CORE_ADDR pc;
467
468 if (the_low_target.get_pc == NULL)
469 return 0;
470
471 saved_inferior = current_inferior;
472 current_inferior = get_lwp_thread (lwp);
473
474 regcache = get_thread_regcache (current_inferior, 1);
475 pc = (*the_low_target.get_pc) (regcache);
476
477 if (debug_threads)
478 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
479
480 current_inferior = saved_inferior;
481 return pc;
482}
483
484/* This function should only be called if LWP got a SIGTRAP.
485 The SIGTRAP could mean several things.
486
487 On i386, where decr_pc_after_break is non-zero:
488 If we were single-stepping this process using PTRACE_SINGLESTEP,
489 we will get only the one SIGTRAP (even if the instruction we
490 stepped over was a breakpoint). The value of $eip will be the
491 next instruction.
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If we cancel the SIGTRAP, we
497 must resume at the decremented PC.
498
499 (Presumably, not yet tested) On a non-decr_pc_after_break machine
500 with hardware or kernel single-step:
501 If we single-step over a breakpoint instruction, our PC will
502 point at the following instruction. If we continue and hit a
503 breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506static CORE_ADDR
507get_stop_pc (struct lwp_info *lwp)
508{
509 CORE_ADDR stop_pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 stop_pc = get_pc (lwp);
515
516 if (WSTOPSIG (lwp->last_status) == SIGTRAP
517 && !lwp->stepping
518 && !lwp->stopped_by_watchpoint
519 && lwp->last_status >> 16 == 0)
520 stop_pc -= the_low_target.decr_pc_after_break;
521
522 if (debug_threads)
523 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
524
525 return stop_pc;
526}
527
528static void *
529add_lwp (ptid_t ptid)
530{
531 struct lwp_info *lwp;
532
533 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
534 memset (lwp, 0, sizeof (*lwp));
535
536 lwp->head.id = ptid;
537
538 if (the_low_target.new_thread != NULL)
539 lwp->arch_private = the_low_target.new_thread ();
540
541 add_inferior_to_list (&all_lwps, &lwp->head);
542
543 return lwp;
544}
545
546/* Start an inferior process and returns its pid.
547 ALLARGS is a vector of program-name and args. */
548
549static int
550linux_create_inferior (char *program, char **allargs)
551{
552#ifdef HAVE_PERSONALITY
553 int personality_orig = 0, personality_set = 0;
554#endif
555 struct lwp_info *new_lwp;
556 int pid;
557 ptid_t ptid;
558
559#ifdef HAVE_PERSONALITY
560 if (disable_randomization)
561 {
562 errno = 0;
563 personality_orig = personality (0xffffffff);
564 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
565 {
566 personality_set = 1;
567 personality (personality_orig | ADDR_NO_RANDOMIZE);
568 }
569 if (errno != 0 || (personality_set
570 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
571 warning ("Error disabling address space randomization: %s",
572 strerror (errno));
573 }
574#endif
575
576#if defined(__UCLIBC__) && defined(HAS_NOMMU)
577 pid = vfork ();
578#else
579 pid = fork ();
580#endif
581 if (pid < 0)
582 perror_with_name ("fork");
583
584 if (pid == 0)
585 {
586 close_most_fds ();
587 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
588
589#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
590 signal (__SIGRTMIN + 1, SIG_DFL);
591#endif
592
593 setpgid (0, 0);
594
595 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
596 stdout to stderr so that inferior i/o doesn't corrupt the connection.
597 Also, redirect stdin to /dev/null. */
598 if (remote_connection_is_stdio ())
599 {
600 close (0);
601 open ("/dev/null", O_RDONLY);
602 dup2 (2, 1);
603 if (write (2, "stdin/stdout redirected\n",
604 sizeof ("stdin/stdout redirected\n") - 1) < 0)
605 {
606 /* Errors ignored. */;
607 }
608 }
609
610 execv (program, allargs);
611 if (errno == ENOENT)
612 execvp (program, allargs);
613
614 fprintf (stderr, "Cannot exec %s: %s.\n", program,
615 strerror (errno));
616 fflush (stderr);
617 _exit (0177);
618 }
619
620#ifdef HAVE_PERSONALITY
621 if (personality_set)
622 {
623 errno = 0;
624 personality (personality_orig);
625 if (errno != 0)
626 warning ("Error restoring address space randomization: %s",
627 strerror (errno));
628 }
629#endif
630
631 linux_add_process (pid, 0);
632
633 ptid = ptid_build (pid, pid, 0);
634 new_lwp = add_lwp (ptid);
635 add_thread (ptid, new_lwp);
636 new_lwp->must_set_ptrace_flags = 1;
637
638 return pid;
639}
640
641/* Attach to an inferior process. */
642
643static void
644linux_attach_lwp_1 (unsigned long lwpid, int initial)
645{
646 ptid_t ptid;
647 struct lwp_info *new_lwp;
648
649 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
650 != 0)
651 {
652 struct buffer buffer;
653
654 if (!initial)
655 {
656 /* If we fail to attach to an LWP, just warn. */
657 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
658 strerror (errno), errno);
659 fflush (stderr);
660 return;
661 }
662
663 /* If we fail to attach to a process, report an error. */
664 buffer_init (&buffer);
665 linux_ptrace_attach_warnings (lwpid, &buffer);
666 buffer_grow_str0 (&buffer, "");
667 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
668 lwpid, strerror (errno), errno);
669 }
670
671 if (initial)
672 /* If lwp is the tgid, we handle adding existing threads later.
673 Otherwise we just add lwp without bothering about any other
674 threads. */
675 ptid = ptid_build (lwpid, lwpid, 0);
676 else
677 {
678 /* Note that extracting the pid from the current inferior is
679 safe, since we're always called in the context of the same
680 process as this new thread. */
681 int pid = pid_of (get_thread_lwp (current_inferior));
682 ptid = ptid_build (pid, lwpid, 0);
683 }
684
685 new_lwp = (struct lwp_info *) add_lwp (ptid);
686 add_thread (ptid, new_lwp);
687
688 /* We need to wait for SIGSTOP before being able to make the next
689 ptrace call on this LWP. */
690 new_lwp->must_set_ptrace_flags = 1;
691
692 if (linux_proc_pid_is_stopped (lwpid))
693 {
694 if (debug_threads)
695 fprintf (stderr,
696 "Attached to a stopped process\n");
697
698 /* The process is definitely stopped. It is in a job control
699 stop, unless the kernel predates the TASK_STOPPED /
700 TASK_TRACED distinction, in which case it might be in a
701 ptrace stop. Make sure it is in a ptrace stop; from there we
702 can kill it, signal it, et cetera.
703
704 First make sure there is a pending SIGSTOP. Since we are
705 already attached, the process can not transition from stopped
706 to running without a PTRACE_CONT; so we know this signal will
707 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
708 probably already in the queue (unless this kernel is old
709 enough to use TASK_STOPPED for ptrace stops); but since
710 SIGSTOP is not an RT signal, it can only be queued once. */
711 kill_lwp (lwpid, SIGSTOP);
712
713 /* Finally, resume the stopped process. This will deliver the
714 SIGSTOP (or a higher priority signal, just like normal
715 PTRACE_ATTACH), which we'll catch later on. */
716 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
717 }
718
719 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
720 brings it to a halt.
721
722 There are several cases to consider here:
723
724 1) gdbserver has already attached to the process and is being notified
725 of a new thread that is being created.
726 In this case we should ignore that SIGSTOP and resume the
727 process. This is handled below by setting stop_expected = 1,
728 and the fact that add_thread sets last_resume_kind ==
729 resume_continue.
730
731 2) This is the first thread (the process thread), and we're attaching
732 to it via attach_inferior.
733 In this case we want the process thread to stop.
734 This is handled by having linux_attach set last_resume_kind ==
735 resume_stop after we return.
736
737 If the pid we are attaching to is also the tgid, we attach to and
738 stop all the existing threads. Otherwise, we attach to pid and
739 ignore any other threads in the same group as this pid.
740
741 3) GDB is connecting to gdbserver and is requesting an enumeration of all
742 existing threads.
743 In this case we want the thread to stop.
744 FIXME: This case is currently not properly handled.
745 We should wait for the SIGSTOP but don't. Things work apparently
746 because enough time passes between when we ptrace (ATTACH) and when
747 gdb makes the next ptrace call on the thread.
748
749 On the other hand, if we are currently trying to stop all threads, we
750 should treat the new thread as if we had sent it a SIGSTOP. This works
751 because we are guaranteed that the add_lwp call above added us to the
752 end of the list, and so the new thread has not yet reached
753 wait_for_sigstop (but will). */
754 new_lwp->stop_expected = 1;
755}
756
757void
758linux_attach_lwp (unsigned long lwpid)
759{
760 linux_attach_lwp_1 (lwpid, 0);
761}
762
763/* Attach to PID. If PID is the tgid, attach to it and all
764 of its threads. */
765
766static int
767linux_attach (unsigned long pid)
768{
769 /* Attach to PID. We will check for other threads
770 soon. */
771 linux_attach_lwp_1 (pid, 1);
772 linux_add_process (pid, 1);
773
774 if (!non_stop)
775 {
776 struct thread_info *thread;
777
778 /* Don't ignore the initial SIGSTOP if we just attached to this
779 process. It will be collected by wait shortly. */
780 thread = find_thread_ptid (ptid_build (pid, pid, 0));
781 thread->last_resume_kind = resume_stop;
782 }
783
784 if (linux_proc_get_tgid (pid) == pid)
785 {
786 DIR *dir;
787 char pathname[128];
788
789 sprintf (pathname, "/proc/%ld/task", pid);
790
791 dir = opendir (pathname);
792
793 if (!dir)
794 {
795 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
796 fflush (stderr);
797 }
798 else
799 {
800 /* At this point we attached to the tgid. Scan the task for
801 existing threads. */
802 unsigned long lwp;
803 int new_threads_found;
804 int iterations = 0;
805 struct dirent *dp;
806
807 while (iterations < 2)
808 {
809 new_threads_found = 0;
810 /* Add all the other threads. While we go through the
811 threads, new threads may be spawned. Cycle through
812 the list of threads until we have done two iterations without
813 finding new threads. */
814 while ((dp = readdir (dir)) != NULL)
815 {
816 /* Fetch one lwp. */
817 lwp = strtoul (dp->d_name, NULL, 10);
818
819 /* Is this a new thread? */
820 if (lwp
821 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
822 {
823 linux_attach_lwp_1 (lwp, 0);
824 new_threads_found++;
825
826 if (debug_threads)
827 fprintf (stderr, "\
828Found and attached to new lwp %ld\n", lwp);
829 }
830 }
831
832 if (!new_threads_found)
833 iterations++;
834 else
835 iterations = 0;
836
837 rewinddir (dir);
838 }
839 closedir (dir);
840 }
841 }
842
843 return 0;
844}
845
846struct counter
847{
848 int pid;
849 int count;
850};
851
852static int
853second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
854{
855 struct counter *counter = args;
856
857 if (ptid_get_pid (entry->id) == counter->pid)
858 {
859 if (++counter->count > 1)
860 return 1;
861 }
862
863 return 0;
864}
865
866static int
867last_thread_of_process_p (struct thread_info *thread)
868{
869 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
870 int pid = ptid_get_pid (ptid);
871 struct counter counter = { pid , 0 };
872
873 return (find_inferior (&all_threads,
874 second_thread_of_pid_p, &counter) == NULL);
875}
876
877/* Kill LWP. */
878
879static void
880linux_kill_one_lwp (struct lwp_info *lwp)
881{
882 int pid = lwpid_of (lwp);
883
884 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
885 there is no signal context, and ptrace(PTRACE_KILL) (or
886 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
887 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
888 alternative is to kill with SIGKILL. We only need one SIGKILL
889 per process, not one for each thread. But since we still support
890 linuxthreads, and we also support debugging programs using raw
891 clone without CLONE_THREAD, we send one for each thread. For
892 years, we used PTRACE_KILL only, so we're being a bit paranoid
893 about some old kernels where PTRACE_KILL might work better
894 (dubious if there are any such, but that's why it's paranoia), so
895 we try SIGKILL first, PTRACE_KILL second, and so we're fine
896 everywhere. */
897
898 errno = 0;
899 kill (pid, SIGKILL);
900 if (debug_threads)
901 fprintf (stderr,
902 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
903 target_pid_to_str (ptid_of (lwp)),
904 errno ? strerror (errno) : "OK");
905
906 errno = 0;
907 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
908 if (debug_threads)
909 fprintf (stderr,
910 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
911 target_pid_to_str (ptid_of (lwp)),
912 errno ? strerror (errno) : "OK");
913}
914
915/* Callback for `find_inferior'. Kills an lwp of a given process,
916 except the leader. */
917
918static int
919kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
920{
921 struct thread_info *thread = (struct thread_info *) entry;
922 struct lwp_info *lwp = get_thread_lwp (thread);
923 int wstat;
924 int pid = * (int *) args;
925
926 if (ptid_get_pid (entry->id) != pid)
927 return 0;
928
929 /* We avoid killing the first thread here, because of a Linux kernel (at
930 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
931 the children get a chance to be reaped, it will remain a zombie
932 forever. */
933
934 if (lwpid_of (lwp) == pid)
935 {
936 if (debug_threads)
937 fprintf (stderr, "lkop: is last of process %s\n",
938 target_pid_to_str (entry->id));
939 return 0;
940 }
941
942 do
943 {
944 linux_kill_one_lwp (lwp);
945
946 /* Make sure it died. The loop is most likely unnecessary. */
947 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
948 } while (pid > 0 && WIFSTOPPED (wstat));
949
950 return 0;
951}
952
953static int
954linux_kill (int pid)
955{
956 struct process_info *process;
957 struct lwp_info *lwp;
958 int wstat;
959 int lwpid;
960
961 process = find_process_pid (pid);
962 if (process == NULL)
963 return -1;
964
965 /* If we're killing a running inferior, make sure it is stopped
966 first, as PTRACE_KILL will not work otherwise. */
967 stop_all_lwps (0, NULL);
968
969 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
970
971 /* See the comment in linux_kill_one_lwp. We did not kill the first
972 thread in the list, so do so now. */
973 lwp = find_lwp_pid (pid_to_ptid (pid));
974
975 if (lwp == NULL)
976 {
977 if (debug_threads)
978 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
979 lwpid_of (lwp), pid);
980 }
981 else
982 {
983 if (debug_threads)
984 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
985 lwpid_of (lwp), pid);
986
987 do
988 {
989 linux_kill_one_lwp (lwp);
990
991 /* Make sure it died. The loop is most likely unnecessary. */
992 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
993 } while (lwpid > 0 && WIFSTOPPED (wstat));
994 }
995
996 the_target->mourn (process);
997
998 /* Since we presently can only stop all lwps of all processes, we
999 need to unstop lwps of other processes. */
1000 unstop_all_lwps (0, NULL);
1001 return 0;
1002}
1003
1004/* Get pending signal of THREAD, for detaching purposes. This is the
1005 signal the thread last stopped for, which we need to deliver to the
1006 thread when detaching, otherwise, it'd be suppressed/lost. */
1007
1008static int
1009get_detach_signal (struct thread_info *thread)
1010{
1011 enum gdb_signal signo = GDB_SIGNAL_0;
1012 int status;
1013 struct lwp_info *lp = get_thread_lwp (thread);
1014
1015 if (lp->status_pending_p)
1016 status = lp->status_pending;
1017 else
1018 {
1019 /* If the thread had been suspended by gdbserver, and it stopped
1020 cleanly, then it'll have stopped with SIGSTOP. But we don't
1021 want to deliver that SIGSTOP. */
1022 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1023 || thread->last_status.value.sig == GDB_SIGNAL_0)
1024 return 0;
1025
1026 /* Otherwise, we may need to deliver the signal we
1027 intercepted. */
1028 status = lp->last_status;
1029 }
1030
1031 if (!WIFSTOPPED (status))
1032 {
1033 if (debug_threads)
1034 fprintf (stderr,
1035 "GPS: lwp %s hasn't stopped: no pending signal\n",
1036 target_pid_to_str (ptid_of (lp)));
1037 return 0;
1038 }
1039
1040 /* Extended wait statuses aren't real SIGTRAPs. */
1041 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1042 {
1043 if (debug_threads)
1044 fprintf (stderr,
1045 "GPS: lwp %s had stopped with extended "
1046 "status: no pending signal\n",
1047 target_pid_to_str (ptid_of (lp)));
1048 return 0;
1049 }
1050
1051 signo = gdb_signal_from_host (WSTOPSIG (status));
1052
1053 if (program_signals_p && !program_signals[signo])
1054 {
1055 if (debug_threads)
1056 fprintf (stderr,
1057 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1058 target_pid_to_str (ptid_of (lp)),
1059 gdb_signal_to_string (signo));
1060 return 0;
1061 }
1062 else if (!program_signals_p
1063 /* If we have no way to know which signals GDB does not
1064 want to have passed to the program, assume
1065 SIGTRAP/SIGINT, which is GDB's default. */
1066 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1067 {
1068 if (debug_threads)
1069 fprintf (stderr,
1070 "GPS: lwp %s had signal %s, "
1071 "but we don't know if we should pass it. Default to not.\n",
1072 target_pid_to_str (ptid_of (lp)),
1073 gdb_signal_to_string (signo));
1074 return 0;
1075 }
1076 else
1077 {
1078 if (debug_threads)
1079 fprintf (stderr,
1080 "GPS: lwp %s has pending signal %s: delivering it.\n",
1081 target_pid_to_str (ptid_of (lp)),
1082 gdb_signal_to_string (signo));
1083
1084 return WSTOPSIG (status);
1085 }
1086}
1087
1088static int
1089linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1090{
1091 struct thread_info *thread = (struct thread_info *) entry;
1092 struct lwp_info *lwp = get_thread_lwp (thread);
1093 int pid = * (int *) args;
1094 int sig;
1095
1096 if (ptid_get_pid (entry->id) != pid)
1097 return 0;
1098
1099 /* If there is a pending SIGSTOP, get rid of it. */
1100 if (lwp->stop_expected)
1101 {
1102 if (debug_threads)
1103 fprintf (stderr,
1104 "Sending SIGCONT to %s\n",
1105 target_pid_to_str (ptid_of (lwp)));
1106
1107 kill_lwp (lwpid_of (lwp), SIGCONT);
1108 lwp->stop_expected = 0;
1109 }
1110
1111 /* Flush any pending changes to the process's registers. */
1112 regcache_invalidate_thread (get_lwp_thread (lwp));
1113
1114 /* Pass on any pending signal for this thread. */
1115 sig = get_detach_signal (thread);
1116
1117 /* Finally, let it resume. */
1118 if (the_low_target.prepare_to_resume != NULL)
1119 the_low_target.prepare_to_resume (lwp);
1120 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1121 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1122 error (_("Can't detach %s: %s"),
1123 target_pid_to_str (ptid_of (lwp)),
1124 strerror (errno));
1125
1126 delete_lwp (lwp);
1127 return 0;
1128}
1129
1130static int
1131linux_detach (int pid)
1132{
1133 struct process_info *process;
1134
1135 process = find_process_pid (pid);
1136 if (process == NULL)
1137 return -1;
1138
1139 /* Stop all threads before detaching. First, ptrace requires that
1140 the thread is stopped to sucessfully detach. Second, thread_db
1141 may need to uninstall thread event breakpoints from memory, which
1142 only works with a stopped process anyway. */
1143 stop_all_lwps (0, NULL);
1144
1145#ifdef USE_THREAD_DB
1146 thread_db_detach (process);
1147#endif
1148
1149 /* Stabilize threads (move out of jump pads). */
1150 stabilize_threads ();
1151
1152 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1153
1154 the_target->mourn (process);
1155
1156 /* Since we presently can only stop all lwps of all processes, we
1157 need to unstop lwps of other processes. */
1158 unstop_all_lwps (0, NULL);
1159 return 0;
1160}
1161
1162/* Remove all LWPs that belong to process PROC from the lwp list. */
1163
1164static int
1165delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1166{
1167 struct lwp_info *lwp = (struct lwp_info *) entry;
1168 struct process_info *process = proc;
1169
1170 if (pid_of (lwp) == pid_of (process))
1171 delete_lwp (lwp);
1172
1173 return 0;
1174}
1175
1176static void
1177linux_mourn (struct process_info *process)
1178{
1179 struct process_info_private *priv;
1180
1181#ifdef USE_THREAD_DB
1182 thread_db_mourn (process);
1183#endif
1184
1185 find_inferior (&all_lwps, delete_lwp_callback, process);
1186
1187 /* Freeing all private data. */
1188 priv = process->private;
1189 free (priv->arch_private);
1190 free (priv);
1191 process->private = NULL;
1192
1193 remove_process (process);
1194}
1195
1196static void
1197linux_join (int pid)
1198{
1199 int status, ret;
1200
1201 do {
1202 ret = my_waitpid (pid, &status, 0);
1203 if (WIFEXITED (status) || WIFSIGNALED (status))
1204 break;
1205 } while (ret != -1 || errno != ECHILD);
1206}
1207
1208/* Return nonzero if the given thread is still alive. */
1209static int
1210linux_thread_alive (ptid_t ptid)
1211{
1212 struct lwp_info *lwp = find_lwp_pid (ptid);
1213
1214 /* We assume we always know if a thread exits. If a whole process
1215 exited but we still haven't been able to report it to GDB, we'll
1216 hold on to the last lwp of the dead process. */
1217 if (lwp != NULL)
1218 return !lwp->dead;
1219 else
1220 return 0;
1221}
1222
1223/* Return 1 if this lwp has an interesting status pending. */
1224static int
1225status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1226{
1227 struct lwp_info *lwp = (struct lwp_info *) entry;
1228 ptid_t ptid = * (ptid_t *) arg;
1229 struct thread_info *thread;
1230
1231 /* Check if we're only interested in events from a specific process
1232 or its lwps. */
1233 if (!ptid_equal (minus_one_ptid, ptid)
1234 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1235 return 0;
1236
1237 thread = get_lwp_thread (lwp);
1238
1239 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1240 report any status pending the LWP may have. */
1241 if (thread->last_resume_kind == resume_stop
1242 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1243 return 0;
1244
1245 return lwp->status_pending_p;
1246}
1247
1248static int
1249same_lwp (struct inferior_list_entry *entry, void *data)
1250{
1251 ptid_t ptid = *(ptid_t *) data;
1252 int lwp;
1253
1254 if (ptid_get_lwp (ptid) != 0)
1255 lwp = ptid_get_lwp (ptid);
1256 else
1257 lwp = ptid_get_pid (ptid);
1258
1259 if (ptid_get_lwp (entry->id) == lwp)
1260 return 1;
1261
1262 return 0;
1263}
1264
1265struct lwp_info *
1266find_lwp_pid (ptid_t ptid)
1267{
1268 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1269}
1270
1271static struct lwp_info *
1272linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1273{
1274 int ret;
1275 int to_wait_for = -1;
1276 struct lwp_info *child = NULL;
1277
1278 if (debug_threads)
1279 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1280
1281 if (ptid_equal (ptid, minus_one_ptid))
1282 to_wait_for = -1; /* any child */
1283 else
1284 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1285
1286 options |= __WALL;
1287
1288retry:
1289
1290 ret = my_waitpid (to_wait_for, wstatp, options);
1291 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1292 return NULL;
1293 else if (ret == -1)
1294 perror_with_name ("waitpid");
1295
1296 if (debug_threads
1297 && (!WIFSTOPPED (*wstatp)
1298 || (WSTOPSIG (*wstatp) != 32
1299 && WSTOPSIG (*wstatp) != 33)))
1300 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1301
1302 child = find_lwp_pid (pid_to_ptid (ret));
1303
1304 /* If we didn't find a process, one of two things presumably happened:
1305 - A process we started and then detached from has exited. Ignore it.
1306 - A process we are controlling has forked and the new child's stop
1307 was reported to us by the kernel. Save its PID. */
1308 if (child == NULL && WIFSTOPPED (*wstatp))
1309 {
1310 add_to_pid_list (&stopped_pids, ret, *wstatp);
1311 goto retry;
1312 }
1313 else if (child == NULL)
1314 goto retry;
1315
1316 child->stopped = 1;
1317
1318 child->last_status = *wstatp;
1319
1320 if (WIFSTOPPED (*wstatp))
1321 {
1322 struct process_info *proc;
1323
1324 /* Architecture-specific setup after inferior is running. This
1325 needs to happen after we have attached to the inferior and it
1326 is stopped for the first time, but before we access any
1327 inferior registers. */
1328 proc = find_process_pid (pid_of (child));
1329 if (proc->private->new_inferior)
1330 {
1331 struct thread_info *saved_inferior;
1332
1333 saved_inferior = current_inferior;
1334 current_inferior = get_lwp_thread (child);
1335
1336 the_low_target.arch_setup ();
1337
1338 current_inferior = saved_inferior;
1339
1340 proc->private->new_inferior = 0;
1341 }
1342 }
1343
1344 /* Fetch the possibly triggered data watchpoint info and store it in
1345 CHILD.
1346
1347 On some archs, like x86, that use debug registers to set
1348 watchpoints, it's possible that the way to know which watched
1349 address trapped, is to check the register that is used to select
1350 which address to watch. Problem is, between setting the
1351 watchpoint and reading back which data address trapped, the user
1352 may change the set of watchpoints, and, as a consequence, GDB
1353 changes the debug registers in the inferior. To avoid reading
1354 back a stale stopped-data-address when that happens, we cache in
1355 LP the fact that a watchpoint trapped, and the corresponding data
1356 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1357 changes the debug registers meanwhile, we have the cached data we
1358 can rely on. */
1359
1360 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1361 {
1362 if (the_low_target.stopped_by_watchpoint == NULL)
1363 {
1364 child->stopped_by_watchpoint = 0;
1365 }
1366 else
1367 {
1368 struct thread_info *saved_inferior;
1369
1370 saved_inferior = current_inferior;
1371 current_inferior = get_lwp_thread (child);
1372
1373 child->stopped_by_watchpoint
1374 = the_low_target.stopped_by_watchpoint ();
1375
1376 if (child->stopped_by_watchpoint)
1377 {
1378 if (the_low_target.stopped_data_address != NULL)
1379 child->stopped_data_address
1380 = the_low_target.stopped_data_address ();
1381 else
1382 child->stopped_data_address = 0;
1383 }
1384
1385 current_inferior = saved_inferior;
1386 }
1387 }
1388
1389 /* Store the STOP_PC, with adjustment applied. This depends on the
1390 architecture being defined already (so that CHILD has a valid
1391 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1392 not). */
1393 if (WIFSTOPPED (*wstatp))
1394 child->stop_pc = get_stop_pc (child);
1395
1396 if (debug_threads
1397 && WIFSTOPPED (*wstatp)
1398 && the_low_target.get_pc != NULL)
1399 {
1400 struct thread_info *saved_inferior = current_inferior;
1401 struct regcache *regcache;
1402 CORE_ADDR pc;
1403
1404 current_inferior = get_lwp_thread (child);
1405 regcache = get_thread_regcache (current_inferior, 1);
1406 pc = (*the_low_target.get_pc) (regcache);
1407 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1408 current_inferior = saved_inferior;
1409 }
1410
1411 return child;
1412}
1413
1414/* This function should only be called if the LWP got a SIGTRAP.
1415
1416 Handle any tracepoint steps or hits. Return true if a tracepoint
1417 event was handled, 0 otherwise. */
1418
1419static int
1420handle_tracepoints (struct lwp_info *lwp)
1421{
1422 struct thread_info *tinfo = get_lwp_thread (lwp);
1423 int tpoint_related_event = 0;
1424
1425 /* If this tracepoint hit causes a tracing stop, we'll immediately
1426 uninsert tracepoints. To do this, we temporarily pause all
1427 threads, unpatch away, and then unpause threads. We need to make
1428 sure the unpausing doesn't resume LWP too. */
1429 lwp->suspended++;
1430
1431 /* And we need to be sure that any all-threads-stopping doesn't try
1432 to move threads out of the jump pads, as it could deadlock the
1433 inferior (LWP could be in the jump pad, maybe even holding the
1434 lock.) */
1435
1436 /* Do any necessary step collect actions. */
1437 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1438
1439 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1440
1441 /* See if we just hit a tracepoint and do its main collect
1442 actions. */
1443 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1444
1445 lwp->suspended--;
1446
1447 gdb_assert (lwp->suspended == 0);
1448 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1449
1450 if (tpoint_related_event)
1451 {
1452 if (debug_threads)
1453 fprintf (stderr, "got a tracepoint event\n");
1454 return 1;
1455 }
1456
1457 return 0;
1458}
1459
1460/* Convenience wrapper. Returns true if LWP is presently collecting a
1461 fast tracepoint. */
1462
1463static int
1464linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1465 struct fast_tpoint_collect_status *status)
1466{
1467 CORE_ADDR thread_area;
1468
1469 if (the_low_target.get_thread_area == NULL)
1470 return 0;
1471
1472 /* Get the thread area address. This is used to recognize which
1473 thread is which when tracing with the in-process agent library.
1474 We don't read anything from the address, and treat it as opaque;
1475 it's the address itself that we assume is unique per-thread. */
1476 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1477 return 0;
1478
1479 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1480}
1481
1482/* The reason we resume in the caller, is because we want to be able
1483 to pass lwp->status_pending as WSTAT, and we need to clear
1484 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1485 refuses to resume. */
1486
1487static int
1488maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1489{
1490 struct thread_info *saved_inferior;
1491
1492 saved_inferior = current_inferior;
1493 current_inferior = get_lwp_thread (lwp);
1494
1495 if ((wstat == NULL
1496 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1497 && supports_fast_tracepoints ()
1498 && agent_loaded_p ())
1499 {
1500 struct fast_tpoint_collect_status status;
1501 int r;
1502
1503 if (debug_threads)
1504 fprintf (stderr, "\
1505Checking whether LWP %ld needs to move out of the jump pad.\n",
1506 lwpid_of (lwp));
1507
1508 r = linux_fast_tracepoint_collecting (lwp, &status);
1509
1510 if (wstat == NULL
1511 || (WSTOPSIG (*wstat) != SIGILL
1512 && WSTOPSIG (*wstat) != SIGFPE
1513 && WSTOPSIG (*wstat) != SIGSEGV
1514 && WSTOPSIG (*wstat) != SIGBUS))
1515 {
1516 lwp->collecting_fast_tracepoint = r;
1517
1518 if (r != 0)
1519 {
1520 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1521 {
1522 /* Haven't executed the original instruction yet.
1523 Set breakpoint there, and wait till it's hit,
1524 then single-step until exiting the jump pad. */
1525 lwp->exit_jump_pad_bkpt
1526 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1527 }
1528
1529 if (debug_threads)
1530 fprintf (stderr, "\
1531Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1532 lwpid_of (lwp));
1533 current_inferior = saved_inferior;
1534
1535 return 1;
1536 }
1537 }
1538 else
1539 {
1540 /* If we get a synchronous signal while collecting, *and*
1541 while executing the (relocated) original instruction,
1542 reset the PC to point at the tpoint address, before
1543 reporting to GDB. Otherwise, it's an IPA lib bug: just
1544 report the signal to GDB, and pray for the best. */
1545
1546 lwp->collecting_fast_tracepoint = 0;
1547
1548 if (r != 0
1549 && (status.adjusted_insn_addr <= lwp->stop_pc
1550 && lwp->stop_pc < status.adjusted_insn_addr_end))
1551 {
1552 siginfo_t info;
1553 struct regcache *regcache;
1554
1555 /* The si_addr on a few signals references the address
1556 of the faulting instruction. Adjust that as
1557 well. */
1558 if ((WSTOPSIG (*wstat) == SIGILL
1559 || WSTOPSIG (*wstat) == SIGFPE
1560 || WSTOPSIG (*wstat) == SIGBUS
1561 || WSTOPSIG (*wstat) == SIGSEGV)
1562 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1563 (PTRACE_TYPE_ARG3) 0, &info) == 0
1564 /* Final check just to make sure we don't clobber
1565 the siginfo of non-kernel-sent signals. */
1566 && (uintptr_t) info.si_addr == lwp->stop_pc)
1567 {
1568 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1569 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1570 (PTRACE_TYPE_ARG3) 0, &info);
1571 }
1572
1573 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1574 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1575 lwp->stop_pc = status.tpoint_addr;
1576
1577 /* Cancel any fast tracepoint lock this thread was
1578 holding. */
1579 force_unlock_trace_buffer ();
1580 }
1581
1582 if (lwp->exit_jump_pad_bkpt != NULL)
1583 {
1584 if (debug_threads)
1585 fprintf (stderr,
1586 "Cancelling fast exit-jump-pad: removing bkpt. "
1587 "stopping all threads momentarily.\n");
1588
1589 stop_all_lwps (1, lwp);
1590 cancel_breakpoints ();
1591
1592 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1593 lwp->exit_jump_pad_bkpt = NULL;
1594
1595 unstop_all_lwps (1, lwp);
1596
1597 gdb_assert (lwp->suspended >= 0);
1598 }
1599 }
1600 }
1601
1602 if (debug_threads)
1603 fprintf (stderr, "\
1604Checking whether LWP %ld needs to move out of the jump pad...no\n",
1605 lwpid_of (lwp));
1606
1607 current_inferior = saved_inferior;
1608 return 0;
1609}
1610
1611/* Enqueue one signal in the "signals to report later when out of the
1612 jump pad" list. */
1613
1614static void
1615enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1616{
1617 struct pending_signals *p_sig;
1618
1619 if (debug_threads)
1620 fprintf (stderr, "\
1621Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1622
1623 if (debug_threads)
1624 {
1625 struct pending_signals *sig;
1626
1627 for (sig = lwp->pending_signals_to_report;
1628 sig != NULL;
1629 sig = sig->prev)
1630 fprintf (stderr,
1631 " Already queued %d\n",
1632 sig->signal);
1633
1634 fprintf (stderr, " (no more currently queued signals)\n");
1635 }
1636
1637 /* Don't enqueue non-RT signals if they are already in the deferred
1638 queue. (SIGSTOP being the easiest signal to see ending up here
1639 twice) */
1640 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1641 {
1642 struct pending_signals *sig;
1643
1644 for (sig = lwp->pending_signals_to_report;
1645 sig != NULL;
1646 sig = sig->prev)
1647 {
1648 if (sig->signal == WSTOPSIG (*wstat))
1649 {
1650 if (debug_threads)
1651 fprintf (stderr,
1652 "Not requeuing already queued non-RT signal %d"
1653 " for LWP %ld\n",
1654 sig->signal,
1655 lwpid_of (lwp));
1656 return;
1657 }
1658 }
1659 }
1660
1661 p_sig = xmalloc (sizeof (*p_sig));
1662 p_sig->prev = lwp->pending_signals_to_report;
1663 p_sig->signal = WSTOPSIG (*wstat);
1664 memset (&p_sig->info, 0, sizeof (siginfo_t));
1665 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1666 &p_sig->info);
1667
1668 lwp->pending_signals_to_report = p_sig;
1669}
1670
1671/* Dequeue one signal from the "signals to report later when out of
1672 the jump pad" list. */
1673
1674static int
1675dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1676{
1677 if (lwp->pending_signals_to_report != NULL)
1678 {
1679 struct pending_signals **p_sig;
1680
1681 p_sig = &lwp->pending_signals_to_report;
1682 while ((*p_sig)->prev != NULL)
1683 p_sig = &(*p_sig)->prev;
1684
1685 *wstat = W_STOPCODE ((*p_sig)->signal);
1686 if ((*p_sig)->info.si_signo != 0)
1687 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1688 &(*p_sig)->info);
1689 free (*p_sig);
1690 *p_sig = NULL;
1691
1692 if (debug_threads)
1693 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1694 WSTOPSIG (*wstat), lwpid_of (lwp));
1695
1696 if (debug_threads)
1697 {
1698 struct pending_signals *sig;
1699
1700 for (sig = lwp->pending_signals_to_report;
1701 sig != NULL;
1702 sig = sig->prev)
1703 fprintf (stderr,
1704 " Still queued %d\n",
1705 sig->signal);
1706
1707 fprintf (stderr, " (no more queued signals)\n");
1708 }
1709
1710 return 1;
1711 }
1712
1713 return 0;
1714}
1715
1716/* Arrange for a breakpoint to be hit again later. We don't keep the
1717 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1718 will handle the current event, eventually we will resume this LWP,
1719 and this breakpoint will trap again. */
1720
1721static int
1722cancel_breakpoint (struct lwp_info *lwp)
1723{
1724 struct thread_info *saved_inferior;
1725
1726 /* There's nothing to do if we don't support breakpoints. */
1727 if (!supports_breakpoints ())
1728 return 0;
1729
1730 /* breakpoint_at reads from current inferior. */
1731 saved_inferior = current_inferior;
1732 current_inferior = get_lwp_thread (lwp);
1733
1734 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1735 {
1736 if (debug_threads)
1737 fprintf (stderr,
1738 "CB: Push back breakpoint for %s\n",
1739 target_pid_to_str (ptid_of (lwp)));
1740
1741 /* Back up the PC if necessary. */
1742 if (the_low_target.decr_pc_after_break)
1743 {
1744 struct regcache *regcache
1745 = get_thread_regcache (current_inferior, 1);
1746 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1747 }
1748
1749 current_inferior = saved_inferior;
1750 return 1;
1751 }
1752 else
1753 {
1754 if (debug_threads)
1755 fprintf (stderr,
1756 "CB: No breakpoint found at %s for [%s]\n",
1757 paddress (lwp->stop_pc),
1758 target_pid_to_str (ptid_of (lwp)));
1759 }
1760
1761 current_inferior = saved_inferior;
1762 return 0;
1763}
1764
1765/* When the event-loop is doing a step-over, this points at the thread
1766 being stepped. */
1767ptid_t step_over_bkpt;
1768
1769/* Wait for an event from child PID. If PID is -1, wait for any
1770 child. Store the stop status through the status pointer WSTAT.
1771 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1772 event was found and OPTIONS contains WNOHANG. Return the PID of
1773 the stopped child otherwise. */
1774
1775static int
1776linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1777{
1778 struct lwp_info *event_child, *requested_child;
1779 ptid_t wait_ptid;
1780
1781 event_child = NULL;
1782 requested_child = NULL;
1783
1784 /* Check for a lwp with a pending status. */
1785
1786 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1787 {
1788 event_child = (struct lwp_info *)
1789 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1790 if (debug_threads && event_child)
1791 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1792 }
1793 else
1794 {
1795 requested_child = find_lwp_pid (ptid);
1796
1797 if (stopping_threads == NOT_STOPPING_THREADS
1798 && requested_child->status_pending_p
1799 && requested_child->collecting_fast_tracepoint)
1800 {
1801 enqueue_one_deferred_signal (requested_child,
1802 &requested_child->status_pending);
1803 requested_child->status_pending_p = 0;
1804 requested_child->status_pending = 0;
1805 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1806 }
1807
1808 if (requested_child->suspended
1809 && requested_child->status_pending_p)
1810 fatal ("requesting an event out of a suspended child?");
1811
1812 if (requested_child->status_pending_p)
1813 event_child = requested_child;
1814 }
1815
1816 if (event_child != NULL)
1817 {
1818 if (debug_threads)
1819 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1820 lwpid_of (event_child), event_child->status_pending);
1821 *wstat = event_child->status_pending;
1822 event_child->status_pending_p = 0;
1823 event_child->status_pending = 0;
1824 current_inferior = get_lwp_thread (event_child);
1825 return lwpid_of (event_child);
1826 }
1827
1828 if (ptid_is_pid (ptid))
1829 {
1830 /* A request to wait for a specific tgid. This is not possible
1831 with waitpid, so instead, we wait for any child, and leave
1832 children we're not interested in right now with a pending
1833 status to report later. */
1834 wait_ptid = minus_one_ptid;
1835 }
1836 else
1837 wait_ptid = ptid;
1838
1839 /* We only enter this loop if no process has a pending wait status. Thus
1840 any action taken in response to a wait status inside this loop is
1841 responding as soon as we detect the status, not after any pending
1842 events. */
1843 while (1)
1844 {
1845 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1846
1847 if ((options & WNOHANG) && event_child == NULL)
1848 {
1849 if (debug_threads)
1850 fprintf (stderr, "WNOHANG set, no event found\n");
1851 return 0;
1852 }
1853
1854 if (event_child == NULL)
1855 error ("event from unknown child");
1856
1857 if (ptid_is_pid (ptid)
1858 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1859 {
1860 if (! WIFSTOPPED (*wstat))
1861 mark_lwp_dead (event_child, *wstat);
1862 else
1863 {
1864 event_child->status_pending_p = 1;
1865 event_child->status_pending = *wstat;
1866 }
1867 continue;
1868 }
1869
1870 current_inferior = get_lwp_thread (event_child);
1871
1872 /* Check for thread exit. */
1873 if (! WIFSTOPPED (*wstat))
1874 {
1875 if (debug_threads)
1876 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1877
1878 /* If the last thread is exiting, just return. */
1879 if (last_thread_of_process_p (current_inferior))
1880 {
1881 if (debug_threads)
1882 fprintf (stderr, "LWP %ld is last lwp of process\n",
1883 lwpid_of (event_child));
1884 return lwpid_of (event_child);
1885 }
1886
1887 if (!non_stop)
1888 {
1889 current_inferior = (struct thread_info *) all_threads.head;
1890 if (debug_threads)
1891 fprintf (stderr, "Current inferior is now %ld\n",
1892 lwpid_of (get_thread_lwp (current_inferior)));
1893 }
1894 else
1895 {
1896 current_inferior = NULL;
1897 if (debug_threads)
1898 fprintf (stderr, "Current inferior is now <NULL>\n");
1899 }
1900
1901 /* If we were waiting for this particular child to do something...
1902 well, it did something. */
1903 if (requested_child != NULL)
1904 {
1905 int lwpid = lwpid_of (event_child);
1906
1907 /* Cancel the step-over operation --- the thread that
1908 started it is gone. */
1909 if (finish_step_over (event_child))
1910 unstop_all_lwps (1, event_child);
1911 delete_lwp (event_child);
1912 return lwpid;
1913 }
1914
1915 delete_lwp (event_child);
1916
1917 /* Wait for a more interesting event. */
1918 continue;
1919 }
1920
1921 if (event_child->must_set_ptrace_flags)
1922 {
1923 linux_enable_event_reporting (lwpid_of (event_child));
1924 event_child->must_set_ptrace_flags = 0;
1925 }
1926
1927 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1928 && *wstat >> 16 != 0)
1929 {
1930 handle_extended_wait (event_child, *wstat);
1931 continue;
1932 }
1933
1934 if (WIFSTOPPED (*wstat)
1935 && WSTOPSIG (*wstat) == SIGSTOP
1936 && event_child->stop_expected)
1937 {
1938 int should_stop;
1939
1940 if (debug_threads)
1941 fprintf (stderr, "Expected stop.\n");
1942 event_child->stop_expected = 0;
1943
1944 should_stop = (current_inferior->last_resume_kind == resume_stop
1945 || stopping_threads != NOT_STOPPING_THREADS);
1946
1947 if (!should_stop)
1948 {
1949 linux_resume_one_lwp (event_child,
1950 event_child->stepping, 0, NULL);
1951 continue;
1952 }
1953 }
1954
1955 return lwpid_of (event_child);
1956 }
1957
1958 /* NOTREACHED */
1959 return 0;
1960}
1961
1962/* Count the LWP's that have had events. */
1963
1964static int
1965count_events_callback (struct inferior_list_entry *entry, void *data)
1966{
1967 struct lwp_info *lp = (struct lwp_info *) entry;
1968 struct thread_info *thread = get_lwp_thread (lp);
1969 int *count = data;
1970
1971 gdb_assert (count != NULL);
1972
1973 /* Count only resumed LWPs that have a SIGTRAP event pending that
1974 should be reported to GDB. */
1975 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1976 && thread->last_resume_kind != resume_stop
1977 && lp->status_pending_p
1978 && WIFSTOPPED (lp->status_pending)
1979 && WSTOPSIG (lp->status_pending) == SIGTRAP
1980 && !breakpoint_inserted_here (lp->stop_pc))
1981 (*count)++;
1982
1983 return 0;
1984}
1985
1986/* Select the LWP (if any) that is currently being single-stepped. */
1987
1988static int
1989select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1990{
1991 struct lwp_info *lp = (struct lwp_info *) entry;
1992 struct thread_info *thread = get_lwp_thread (lp);
1993
1994 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1995 && thread->last_resume_kind == resume_step
1996 && lp->status_pending_p)
1997 return 1;
1998 else
1999 return 0;
2000}
2001
2002/* Select the Nth LWP that has had a SIGTRAP event that should be
2003 reported to GDB. */
2004
2005static int
2006select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2007{
2008 struct lwp_info *lp = (struct lwp_info *) entry;
2009 struct thread_info *thread = get_lwp_thread (lp);
2010 int *selector = data;
2011
2012 gdb_assert (selector != NULL);
2013
2014 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2015 if (thread->last_resume_kind != resume_stop
2016 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2017 && lp->status_pending_p
2018 && WIFSTOPPED (lp->status_pending)
2019 && WSTOPSIG (lp->status_pending) == SIGTRAP
2020 && !breakpoint_inserted_here (lp->stop_pc))
2021 if ((*selector)-- == 0)
2022 return 1;
2023
2024 return 0;
2025}
2026
2027static int
2028cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2029{
2030 struct lwp_info *lp = (struct lwp_info *) entry;
2031 struct thread_info *thread = get_lwp_thread (lp);
2032 struct lwp_info *event_lp = data;
2033
2034 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2035 if (lp == event_lp)
2036 return 0;
2037
2038 /* If a LWP other than the LWP that we're reporting an event for has
2039 hit a GDB breakpoint (as opposed to some random trap signal),
2040 then just arrange for it to hit it again later. We don't keep
2041 the SIGTRAP status and don't forward the SIGTRAP signal to the
2042 LWP. We will handle the current event, eventually we will resume
2043 all LWPs, and this one will get its breakpoint trap again.
2044
2045 If we do not do this, then we run the risk that the user will
2046 delete or disable the breakpoint, but the LWP will have already
2047 tripped on it. */
2048
2049 if (thread->last_resume_kind != resume_stop
2050 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2051 && lp->status_pending_p
2052 && WIFSTOPPED (lp->status_pending)
2053 && WSTOPSIG (lp->status_pending) == SIGTRAP
2054 && !lp->stepping
2055 && !lp->stopped_by_watchpoint
2056 && cancel_breakpoint (lp))
2057 /* Throw away the SIGTRAP. */
2058 lp->status_pending_p = 0;
2059
2060 return 0;
2061}
2062
2063static void
2064linux_cancel_breakpoints (void)
2065{
2066 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2067}
2068
2069/* Select one LWP out of those that have events pending. */
2070
2071static void
2072select_event_lwp (struct lwp_info **orig_lp)
2073{
2074 int num_events = 0;
2075 int random_selector;
2076 struct lwp_info *event_lp;
2077
2078 /* Give preference to any LWP that is being single-stepped. */
2079 event_lp
2080 = (struct lwp_info *) find_inferior (&all_lwps,
2081 select_singlestep_lwp_callback, NULL);
2082 if (event_lp != NULL)
2083 {
2084 if (debug_threads)
2085 fprintf (stderr,
2086 "SEL: Select single-step %s\n",
2087 target_pid_to_str (ptid_of (event_lp)));
2088 }
2089 else
2090 {
2091 /* No single-stepping LWP. Select one at random, out of those
2092 which have had SIGTRAP events. */
2093
2094 /* First see how many SIGTRAP events we have. */
2095 find_inferior (&all_lwps, count_events_callback, &num_events);
2096
2097 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2098 random_selector = (int)
2099 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2100
2101 if (debug_threads && num_events > 1)
2102 fprintf (stderr,
2103 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2104 num_events, random_selector);
2105
2106 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2107 select_event_lwp_callback,
2108 &random_selector);
2109 }
2110
2111 if (event_lp != NULL)
2112 {
2113 /* Switch the event LWP. */
2114 *orig_lp = event_lp;
2115 }
2116}
2117
2118/* Decrement the suspend count of an LWP. */
2119
2120static int
2121unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2122{
2123 struct lwp_info *lwp = (struct lwp_info *) entry;
2124
2125 /* Ignore EXCEPT. */
2126 if (lwp == except)
2127 return 0;
2128
2129 lwp->suspended--;
2130
2131 gdb_assert (lwp->suspended >= 0);
2132 return 0;
2133}
2134
2135/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2136 NULL. */
2137
2138static void
2139unsuspend_all_lwps (struct lwp_info *except)
2140{
2141 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2142}
2143
2144static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2145static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2146 void *data);
2147static int lwp_running (struct inferior_list_entry *entry, void *data);
2148static ptid_t linux_wait_1 (ptid_t ptid,
2149 struct target_waitstatus *ourstatus,
2150 int target_options);
2151
2152/* Stabilize threads (move out of jump pads).
2153
2154 If a thread is midway collecting a fast tracepoint, we need to
2155 finish the collection and move it out of the jump pad before
2156 reporting the signal.
2157
2158 This avoids recursion while collecting (when a signal arrives
2159 midway, and the signal handler itself collects), which would trash
2160 the trace buffer. In case the user set a breakpoint in a signal
2161 handler, this avoids the backtrace showing the jump pad, etc..
2162 Most importantly, there are certain things we can't do safely if
2163 threads are stopped in a jump pad (or in its callee's). For
2164 example:
2165
2166 - starting a new trace run. A thread still collecting the
2167 previous run, could trash the trace buffer when resumed. The trace
2168 buffer control structures would have been reset but the thread had
2169 no way to tell. The thread could even midway memcpy'ing to the
2170 buffer, which would mean that when resumed, it would clobber the
2171 trace buffer that had been set for a new run.
2172
2173 - we can't rewrite/reuse the jump pads for new tracepoints
2174 safely. Say you do tstart while a thread is stopped midway while
2175 collecting. When the thread is later resumed, it finishes the
2176 collection, and returns to the jump pad, to execute the original
2177 instruction that was under the tracepoint jump at the time the
2178 older run had been started. If the jump pad had been rewritten
2179 since for something else in the new run, the thread would now
2180 execute the wrong / random instructions. */
2181
2182static void
2183linux_stabilize_threads (void)
2184{
2185 struct thread_info *save_inferior;
2186 struct lwp_info *lwp_stuck;
2187
2188 lwp_stuck
2189 = (struct lwp_info *) find_inferior (&all_lwps,
2190 stuck_in_jump_pad_callback, NULL);
2191 if (lwp_stuck != NULL)
2192 {
2193 if (debug_threads)
2194 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2195 lwpid_of (lwp_stuck));
2196 return;
2197 }
2198
2199 save_inferior = current_inferior;
2200
2201 stabilizing_threads = 1;
2202
2203 /* Kick 'em all. */
2204 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2205
2206 /* Loop until all are stopped out of the jump pads. */
2207 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2208 {
2209 struct target_waitstatus ourstatus;
2210 struct lwp_info *lwp;
2211 int wstat;
2212
2213 /* Note that we go through the full wait even loop. While
2214 moving threads out of jump pad, we need to be able to step
2215 over internal breakpoints and such. */
2216 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2217
2218 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2219 {
2220 lwp = get_thread_lwp (current_inferior);
2221
2222 /* Lock it. */
2223 lwp->suspended++;
2224
2225 if (ourstatus.value.sig != GDB_SIGNAL_0
2226 || current_inferior->last_resume_kind == resume_stop)
2227 {
2228 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2229 enqueue_one_deferred_signal (lwp, &wstat);
2230 }
2231 }
2232 }
2233
2234 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2235
2236 stabilizing_threads = 0;
2237
2238 current_inferior = save_inferior;
2239
2240 if (debug_threads)
2241 {
2242 lwp_stuck
2243 = (struct lwp_info *) find_inferior (&all_lwps,
2244 stuck_in_jump_pad_callback, NULL);
2245 if (lwp_stuck != NULL)
2246 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2247 lwpid_of (lwp_stuck));
2248 }
2249}
2250
2251/* Wait for process, returns status. */
2252
2253static ptid_t
2254linux_wait_1 (ptid_t ptid,
2255 struct target_waitstatus *ourstatus, int target_options)
2256{
2257 int w;
2258 struct lwp_info *event_child;
2259 int options;
2260 int pid;
2261 int step_over_finished;
2262 int bp_explains_trap;
2263 int maybe_internal_trap;
2264 int report_to_gdb;
2265 int trace_event;
2266 int in_step_range;
2267
2268 /* Translate generic target options into linux options. */
2269 options = __WALL;
2270 if (target_options & TARGET_WNOHANG)
2271 options |= WNOHANG;
2272
2273retry:
2274 bp_explains_trap = 0;
2275 trace_event = 0;
2276 in_step_range = 0;
2277 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2278
2279 /* If we were only supposed to resume one thread, only wait for
2280 that thread - if it's still alive. If it died, however - which
2281 can happen if we're coming from the thread death case below -
2282 then we need to make sure we restart the other threads. We could
2283 pick a thread at random or restart all; restarting all is less
2284 arbitrary. */
2285 if (!non_stop
2286 && !ptid_equal (cont_thread, null_ptid)
2287 && !ptid_equal (cont_thread, minus_one_ptid))
2288 {
2289 struct thread_info *thread;
2290
2291 thread = (struct thread_info *) find_inferior_id (&all_threads,
2292 cont_thread);
2293
2294 /* No stepping, no signal - unless one is pending already, of course. */
2295 if (thread == NULL)
2296 {
2297 struct thread_resume resume_info;
2298 resume_info.thread = minus_one_ptid;
2299 resume_info.kind = resume_continue;
2300 resume_info.sig = 0;
2301 linux_resume (&resume_info, 1);
2302 }
2303 else
2304 ptid = cont_thread;
2305 }
2306
2307 if (ptid_equal (step_over_bkpt, null_ptid))
2308 pid = linux_wait_for_event (ptid, &w, options);
2309 else
2310 {
2311 if (debug_threads)
2312 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2313 target_pid_to_str (step_over_bkpt));
2314 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2315 }
2316
2317 if (pid == 0) /* only if TARGET_WNOHANG */
2318 return null_ptid;
2319
2320 event_child = get_thread_lwp (current_inferior);
2321
2322 /* If we are waiting for a particular child, and it exited,
2323 linux_wait_for_event will return its exit status. Similarly if
2324 the last child exited. If this is not the last child, however,
2325 do not report it as exited until there is a 'thread exited' response
2326 available in the remote protocol. Instead, just wait for another event.
2327 This should be safe, because if the thread crashed we will already
2328 have reported the termination signal to GDB; that should stop any
2329 in-progress stepping operations, etc.
2330
2331 Report the exit status of the last thread to exit. This matches
2332 LinuxThreads' behavior. */
2333
2334 if (last_thread_of_process_p (current_inferior))
2335 {
2336 if (WIFEXITED (w) || WIFSIGNALED (w))
2337 {
2338 if (WIFEXITED (w))
2339 {
2340 ourstatus->kind = TARGET_WAITKIND_EXITED;
2341 ourstatus->value.integer = WEXITSTATUS (w);
2342
2343 if (debug_threads)
2344 fprintf (stderr,
2345 "\nChild exited with retcode = %x \n",
2346 WEXITSTATUS (w));
2347 }
2348 else
2349 {
2350 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2351 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2352
2353 if (debug_threads)
2354 fprintf (stderr,
2355 "\nChild terminated with signal = %x \n",
2356 WTERMSIG (w));
2357
2358 }
2359
2360 return ptid_of (event_child);
2361 }
2362 }
2363 else
2364 {
2365 if (!WIFSTOPPED (w))
2366 goto retry;
2367 }
2368
2369 /* If this event was not handled before, and is not a SIGTRAP, we
2370 report it. SIGILL and SIGSEGV are also treated as traps in case
2371 a breakpoint is inserted at the current PC. If this target does
2372 not support internal breakpoints at all, we also report the
2373 SIGTRAP without further processing; it's of no concern to us. */
2374 maybe_internal_trap
2375 = (supports_breakpoints ()
2376 && (WSTOPSIG (w) == SIGTRAP
2377 || ((WSTOPSIG (w) == SIGILL
2378 || WSTOPSIG (w) == SIGSEGV)
2379 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2380
2381 if (maybe_internal_trap)
2382 {
2383 /* Handle anything that requires bookkeeping before deciding to
2384 report the event or continue waiting. */
2385
2386 /* First check if we can explain the SIGTRAP with an internal
2387 breakpoint, or if we should possibly report the event to GDB.
2388 Do this before anything that may remove or insert a
2389 breakpoint. */
2390 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2391
2392 /* We have a SIGTRAP, possibly a step-over dance has just
2393 finished. If so, tweak the state machine accordingly,
2394 reinsert breakpoints and delete any reinsert (software
2395 single-step) breakpoints. */
2396 step_over_finished = finish_step_over (event_child);
2397
2398 /* Now invoke the callbacks of any internal breakpoints there. */
2399 check_breakpoints (event_child->stop_pc);
2400
2401 /* Handle tracepoint data collecting. This may overflow the
2402 trace buffer, and cause a tracing stop, removing
2403 breakpoints. */
2404 trace_event = handle_tracepoints (event_child);
2405
2406 if (bp_explains_trap)
2407 {
2408 /* If we stepped or ran into an internal breakpoint, we've
2409 already handled it. So next time we resume (from this
2410 PC), we should step over it. */
2411 if (debug_threads)
2412 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2413
2414 if (breakpoint_here (event_child->stop_pc))
2415 event_child->need_step_over = 1;
2416 }
2417 }
2418 else
2419 {
2420 /* We have some other signal, possibly a step-over dance was in
2421 progress, and it should be cancelled too. */
2422 step_over_finished = finish_step_over (event_child);
2423 }
2424
2425 /* We have all the data we need. Either report the event to GDB, or
2426 resume threads and keep waiting for more. */
2427
2428 /* If we're collecting a fast tracepoint, finish the collection and
2429 move out of the jump pad before delivering a signal. See
2430 linux_stabilize_threads. */
2431
2432 if (WIFSTOPPED (w)
2433 && WSTOPSIG (w) != SIGTRAP
2434 && supports_fast_tracepoints ()
2435 && agent_loaded_p ())
2436 {
2437 if (debug_threads)
2438 fprintf (stderr,
2439 "Got signal %d for LWP %ld. Check if we need "
2440 "to defer or adjust it.\n",
2441 WSTOPSIG (w), lwpid_of (event_child));
2442
2443 /* Allow debugging the jump pad itself. */
2444 if (current_inferior->last_resume_kind != resume_step
2445 && maybe_move_out_of_jump_pad (event_child, &w))
2446 {
2447 enqueue_one_deferred_signal (event_child, &w);
2448
2449 if (debug_threads)
2450 fprintf (stderr,
2451 "Signal %d for LWP %ld deferred (in jump pad)\n",
2452 WSTOPSIG (w), lwpid_of (event_child));
2453
2454 linux_resume_one_lwp (event_child, 0, 0, NULL);
2455 goto retry;
2456 }
2457 }
2458
2459 if (event_child->collecting_fast_tracepoint)
2460 {
2461 if (debug_threads)
2462 fprintf (stderr, "\
2463LWP %ld was trying to move out of the jump pad (%d). \
2464Check if we're already there.\n",
2465 lwpid_of (event_child),
2466 event_child->collecting_fast_tracepoint);
2467
2468 trace_event = 1;
2469
2470 event_child->collecting_fast_tracepoint
2471 = linux_fast_tracepoint_collecting (event_child, NULL);
2472
2473 if (event_child->collecting_fast_tracepoint != 1)
2474 {
2475 /* No longer need this breakpoint. */
2476 if (event_child->exit_jump_pad_bkpt != NULL)
2477 {
2478 if (debug_threads)
2479 fprintf (stderr,
2480 "No longer need exit-jump-pad bkpt; removing it."
2481 "stopping all threads momentarily.\n");
2482
2483 /* Other running threads could hit this breakpoint.
2484 We don't handle moribund locations like GDB does,
2485 instead we always pause all threads when removing
2486 breakpoints, so that any step-over or
2487 decr_pc_after_break adjustment is always taken
2488 care of while the breakpoint is still
2489 inserted. */
2490 stop_all_lwps (1, event_child);
2491 cancel_breakpoints ();
2492
2493 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2494 event_child->exit_jump_pad_bkpt = NULL;
2495
2496 unstop_all_lwps (1, event_child);
2497
2498 gdb_assert (event_child->suspended >= 0);
2499 }
2500 }
2501
2502 if (event_child->collecting_fast_tracepoint == 0)
2503 {
2504 if (debug_threads)
2505 fprintf (stderr,
2506 "fast tracepoint finished "
2507 "collecting successfully.\n");
2508
2509 /* We may have a deferred signal to report. */
2510 if (dequeue_one_deferred_signal (event_child, &w))
2511 {
2512 if (debug_threads)
2513 fprintf (stderr, "dequeued one signal.\n");
2514 }
2515 else
2516 {
2517 if (debug_threads)
2518 fprintf (stderr, "no deferred signals.\n");
2519
2520 if (stabilizing_threads)
2521 {
2522 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2523 ourstatus->value.sig = GDB_SIGNAL_0;
2524 return ptid_of (event_child);
2525 }
2526 }
2527 }
2528 }
2529
2530 /* Check whether GDB would be interested in this event. */
2531
2532 /* If GDB is not interested in this signal, don't stop other
2533 threads, and don't report it to GDB. Just resume the inferior
2534 right away. We do this for threading-related signals as well as
2535 any that GDB specifically requested we ignore. But never ignore
2536 SIGSTOP if we sent it ourselves, and do not ignore signals when
2537 stepping - they may require special handling to skip the signal
2538 handler. */
2539 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2540 thread library? */
2541 if (WIFSTOPPED (w)
2542 && current_inferior->last_resume_kind != resume_step
2543 && (
2544#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2545 (current_process ()->private->thread_db != NULL
2546 && (WSTOPSIG (w) == __SIGRTMIN
2547 || WSTOPSIG (w) == __SIGRTMIN + 1))
2548 ||
2549#endif
2550 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2551 && !(WSTOPSIG (w) == SIGSTOP
2552 && current_inferior->last_resume_kind == resume_stop))))
2553 {
2554 siginfo_t info, *info_p;
2555
2556 if (debug_threads)
2557 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2558 WSTOPSIG (w), lwpid_of (event_child));
2559
2560 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2561 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2562 info_p = &info;
2563 else
2564 info_p = NULL;
2565 linux_resume_one_lwp (event_child, event_child->stepping,
2566 WSTOPSIG (w), info_p);
2567 goto retry;
2568 }
2569
2570 /* Note that all addresses are always "out of the step range" when
2571 there's no range to begin with. */
2572 in_step_range = lwp_in_step_range (event_child);
2573
2574 /* If GDB wanted this thread to single step, and the thread is out
2575 of the step range, we always want to report the SIGTRAP, and let
2576 GDB handle it. Watchpoints should always be reported. So should
2577 signals we can't explain. A SIGTRAP we can't explain could be a
2578 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2579 do, we're be able to handle GDB breakpoints on top of internal
2580 breakpoints, by handling the internal breakpoint and still
2581 reporting the event to GDB. If we don't, we're out of luck, GDB
2582 won't see the breakpoint hit. */
2583 report_to_gdb = (!maybe_internal_trap
2584 || (current_inferior->last_resume_kind == resume_step
2585 && !in_step_range)
2586 || event_child->stopped_by_watchpoint
2587 || (!step_over_finished && !in_step_range
2588 && !bp_explains_trap && !trace_event)
2589 || (gdb_breakpoint_here (event_child->stop_pc)
2590 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2591 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2592
2593 run_breakpoint_commands (event_child->stop_pc);
2594
2595 /* We found no reason GDB would want us to stop. We either hit one
2596 of our own breakpoints, or finished an internal step GDB
2597 shouldn't know about. */
2598 if (!report_to_gdb)
2599 {
2600 if (debug_threads)
2601 {
2602 if (bp_explains_trap)
2603 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2604 if (step_over_finished)
2605 fprintf (stderr, "Step-over finished.\n");
2606 if (trace_event)
2607 fprintf (stderr, "Tracepoint event.\n");
2608 if (lwp_in_step_range (event_child))
2609 fprintf (stderr, "Range stepping pc 0x%s [0x%s, 0x%s).\n",
2610 paddress (event_child->stop_pc),
2611 paddress (event_child->step_range_start),
2612 paddress (event_child->step_range_end));
2613 }
2614
2615 /* We're not reporting this breakpoint to GDB, so apply the
2616 decr_pc_after_break adjustment to the inferior's regcache
2617 ourselves. */
2618
2619 if (the_low_target.set_pc != NULL)
2620 {
2621 struct regcache *regcache
2622 = get_thread_regcache (get_lwp_thread (event_child), 1);
2623 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2624 }
2625
2626 /* We may have finished stepping over a breakpoint. If so,
2627 we've stopped and suspended all LWPs momentarily except the
2628 stepping one. This is where we resume them all again. We're
2629 going to keep waiting, so use proceed, which handles stepping
2630 over the next breakpoint. */
2631 if (debug_threads)
2632 fprintf (stderr, "proceeding all threads.\n");
2633
2634 if (step_over_finished)
2635 unsuspend_all_lwps (event_child);
2636
2637 proceed_all_lwps ();
2638 goto retry;
2639 }
2640
2641 if (debug_threads)
2642 {
2643 if (current_inferior->last_resume_kind == resume_step)
2644 {
2645 if (event_child->step_range_start == event_child->step_range_end)
2646 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2647 else if (!lwp_in_step_range (event_child))
2648 fprintf (stderr, "Out of step range, reporting event.\n");
2649 }
2650 if (event_child->stopped_by_watchpoint)
2651 fprintf (stderr, "Stopped by watchpoint.\n");
2652 if (gdb_breakpoint_here (event_child->stop_pc))
2653 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2654 if (debug_threads)
2655 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2656 }
2657
2658 /* Alright, we're going to report a stop. */
2659
2660 if (!non_stop && !stabilizing_threads)
2661 {
2662 /* In all-stop, stop all threads. */
2663 stop_all_lwps (0, NULL);
2664
2665 /* If we're not waiting for a specific LWP, choose an event LWP
2666 from among those that have had events. Giving equal priority
2667 to all LWPs that have had events helps prevent
2668 starvation. */
2669 if (ptid_equal (ptid, minus_one_ptid))
2670 {
2671 event_child->status_pending_p = 1;
2672 event_child->status_pending = w;
2673
2674 select_event_lwp (&event_child);
2675
2676 event_child->status_pending_p = 0;
2677 w = event_child->status_pending;
2678 }
2679
2680 /* Now that we've selected our final event LWP, cancel any
2681 breakpoints in other LWPs that have hit a GDB breakpoint.
2682 See the comment in cancel_breakpoints_callback to find out
2683 why. */
2684 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2685
2686 /* If we were going a step-over, all other threads but the stepping one
2687 had been paused in start_step_over, with their suspend counts
2688 incremented. We don't want to do a full unstop/unpause, because we're
2689 in all-stop mode (so we want threads stopped), but we still need to
2690 unsuspend the other threads, to decrement their `suspended' count
2691 back. */
2692 if (step_over_finished)
2693 unsuspend_all_lwps (event_child);
2694
2695 /* Stabilize threads (move out of jump pads). */
2696 stabilize_threads ();
2697 }
2698 else
2699 {
2700 /* If we just finished a step-over, then all threads had been
2701 momentarily paused. In all-stop, that's fine, we want
2702 threads stopped by now anyway. In non-stop, we need to
2703 re-resume threads that GDB wanted to be running. */
2704 if (step_over_finished)
2705 unstop_all_lwps (1, event_child);
2706 }
2707
2708 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2709
2710 if (current_inferior->last_resume_kind == resume_stop
2711 && WSTOPSIG (w) == SIGSTOP)
2712 {
2713 /* A thread that has been requested to stop by GDB with vCont;t,
2714 and it stopped cleanly, so report as SIG0. The use of
2715 SIGSTOP is an implementation detail. */
2716 ourstatus->value.sig = GDB_SIGNAL_0;
2717 }
2718 else if (current_inferior->last_resume_kind == resume_stop
2719 && WSTOPSIG (w) != SIGSTOP)
2720 {
2721 /* A thread that has been requested to stop by GDB with vCont;t,
2722 but, it stopped for other reasons. */
2723 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2724 }
2725 else
2726 {
2727 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2728 }
2729
2730 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2731
2732 if (debug_threads)
2733 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2734 target_pid_to_str (ptid_of (event_child)),
2735 ourstatus->kind,
2736 ourstatus->value.sig);
2737
2738 return ptid_of (event_child);
2739}
2740
2741/* Get rid of any pending event in the pipe. */
2742static void
2743async_file_flush (void)
2744{
2745 int ret;
2746 char buf;
2747
2748 do
2749 ret = read (linux_event_pipe[0], &buf, 1);
2750 while (ret >= 0 || (ret == -1 && errno == EINTR));
2751}
2752
2753/* Put something in the pipe, so the event loop wakes up. */
2754static void
2755async_file_mark (void)
2756{
2757 int ret;
2758
2759 async_file_flush ();
2760
2761 do
2762 ret = write (linux_event_pipe[1], "+", 1);
2763 while (ret == 0 || (ret == -1 && errno == EINTR));
2764
2765 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2766 be awakened anyway. */
2767}
2768
2769static ptid_t
2770linux_wait (ptid_t ptid,
2771 struct target_waitstatus *ourstatus, int target_options)
2772{
2773 ptid_t event_ptid;
2774
2775 if (debug_threads)
2776 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2777
2778 /* Flush the async file first. */
2779 if (target_is_async_p ())
2780 async_file_flush ();
2781
2782 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2783
2784 /* If at least one stop was reported, there may be more. A single
2785 SIGCHLD can signal more than one child stop. */
2786 if (target_is_async_p ()
2787 && (target_options & TARGET_WNOHANG) != 0
2788 && !ptid_equal (event_ptid, null_ptid))
2789 async_file_mark ();
2790
2791 return event_ptid;
2792}
2793
2794/* Send a signal to an LWP. */
2795
2796static int
2797kill_lwp (unsigned long lwpid, int signo)
2798{
2799 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2800 fails, then we are not using nptl threads and we should be using kill. */
2801
2802#ifdef __NR_tkill
2803 {
2804 static int tkill_failed;
2805
2806 if (!tkill_failed)
2807 {
2808 int ret;
2809
2810 errno = 0;
2811 ret = syscall (__NR_tkill, lwpid, signo);
2812 if (errno != ENOSYS)
2813 return ret;
2814 tkill_failed = 1;
2815 }
2816 }
2817#endif
2818
2819 return kill (lwpid, signo);
2820}
2821
2822void
2823linux_stop_lwp (struct lwp_info *lwp)
2824{
2825 send_sigstop (lwp);
2826}
2827
2828static void
2829send_sigstop (struct lwp_info *lwp)
2830{
2831 int pid;
2832
2833 pid = lwpid_of (lwp);
2834
2835 /* If we already have a pending stop signal for this process, don't
2836 send another. */
2837 if (lwp->stop_expected)
2838 {
2839 if (debug_threads)
2840 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2841
2842 return;
2843 }
2844
2845 if (debug_threads)
2846 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2847
2848 lwp->stop_expected = 1;
2849 kill_lwp (pid, SIGSTOP);
2850}
2851
2852static int
2853send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2854{
2855 struct lwp_info *lwp = (struct lwp_info *) entry;
2856
2857 /* Ignore EXCEPT. */
2858 if (lwp == except)
2859 return 0;
2860
2861 if (lwp->stopped)
2862 return 0;
2863
2864 send_sigstop (lwp);
2865 return 0;
2866}
2867
2868/* Increment the suspend count of an LWP, and stop it, if not stopped
2869 yet. */
2870static int
2871suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2872 void *except)
2873{
2874 struct lwp_info *lwp = (struct lwp_info *) entry;
2875
2876 /* Ignore EXCEPT. */
2877 if (lwp == except)
2878 return 0;
2879
2880 lwp->suspended++;
2881
2882 return send_sigstop_callback (entry, except);
2883}
2884
2885static void
2886mark_lwp_dead (struct lwp_info *lwp, int wstat)
2887{
2888 /* It's dead, really. */
2889 lwp->dead = 1;
2890
2891 /* Store the exit status for later. */
2892 lwp->status_pending_p = 1;
2893 lwp->status_pending = wstat;
2894
2895 /* Prevent trying to stop it. */
2896 lwp->stopped = 1;
2897
2898 /* No further stops are expected from a dead lwp. */
2899 lwp->stop_expected = 0;
2900}
2901
2902static void
2903wait_for_sigstop (struct inferior_list_entry *entry)
2904{
2905 struct lwp_info *lwp = (struct lwp_info *) entry;
2906 struct thread_info *saved_inferior;
2907 int wstat;
2908 ptid_t saved_tid;
2909 ptid_t ptid;
2910 int pid;
2911
2912 if (lwp->stopped)
2913 {
2914 if (debug_threads)
2915 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2916 lwpid_of (lwp));
2917 return;
2918 }
2919
2920 saved_inferior = current_inferior;
2921 if (saved_inferior != NULL)
2922 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2923 else
2924 saved_tid = null_ptid; /* avoid bogus unused warning */
2925
2926 ptid = lwp->head.id;
2927
2928 if (debug_threads)
2929 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2930
2931 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2932
2933 /* If we stopped with a non-SIGSTOP signal, save it for later
2934 and record the pending SIGSTOP. If the process exited, just
2935 return. */
2936 if (WIFSTOPPED (wstat))
2937 {
2938 if (debug_threads)
2939 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2940 lwpid_of (lwp), WSTOPSIG (wstat));
2941
2942 if (WSTOPSIG (wstat) != SIGSTOP)
2943 {
2944 if (debug_threads)
2945 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2946 lwpid_of (lwp), wstat);
2947
2948 lwp->status_pending_p = 1;
2949 lwp->status_pending = wstat;
2950 }
2951 }
2952 else
2953 {
2954 if (debug_threads)
2955 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2956
2957 lwp = find_lwp_pid (pid_to_ptid (pid));
2958 if (lwp)
2959 {
2960 /* Leave this status pending for the next time we're able to
2961 report it. In the mean time, we'll report this lwp as
2962 dead to GDB, so GDB doesn't try to read registers and
2963 memory from it. This can only happen if this was the
2964 last thread of the process; otherwise, PID is removed
2965 from the thread tables before linux_wait_for_event
2966 returns. */
2967 mark_lwp_dead (lwp, wstat);
2968 }
2969 }
2970
2971 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2972 current_inferior = saved_inferior;
2973 else
2974 {
2975 if (debug_threads)
2976 fprintf (stderr, "Previously current thread died.\n");
2977
2978 if (non_stop)
2979 {
2980 /* We can't change the current inferior behind GDB's back,
2981 otherwise, a subsequent command may apply to the wrong
2982 process. */
2983 current_inferior = NULL;
2984 }
2985 else
2986 {
2987 /* Set a valid thread as current. */
2988 set_desired_inferior (0);
2989 }
2990 }
2991}
2992
2993/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2994 move it out, because we need to report the stop event to GDB. For
2995 example, if the user puts a breakpoint in the jump pad, it's
2996 because she wants to debug it. */
2997
2998static int
2999stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3000{
3001 struct lwp_info *lwp = (struct lwp_info *) entry;
3002 struct thread_info *thread = get_lwp_thread (lwp);
3003
3004 gdb_assert (lwp->suspended == 0);
3005 gdb_assert (lwp->stopped);
3006
3007 /* Allow debugging the jump pad, gdb_collect, etc.. */
3008 return (supports_fast_tracepoints ()
3009 && agent_loaded_p ()
3010 && (gdb_breakpoint_here (lwp->stop_pc)
3011 || lwp->stopped_by_watchpoint
3012 || thread->last_resume_kind == resume_step)
3013 && linux_fast_tracepoint_collecting (lwp, NULL));
3014}
3015
3016static void
3017move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3018{
3019 struct lwp_info *lwp = (struct lwp_info *) entry;
3020 struct thread_info *thread = get_lwp_thread (lwp);
3021 int *wstat;
3022
3023 gdb_assert (lwp->suspended == 0);
3024 gdb_assert (lwp->stopped);
3025
3026 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3027
3028 /* Allow debugging the jump pad, gdb_collect, etc. */
3029 if (!gdb_breakpoint_here (lwp->stop_pc)
3030 && !lwp->stopped_by_watchpoint
3031 && thread->last_resume_kind != resume_step
3032 && maybe_move_out_of_jump_pad (lwp, wstat))
3033 {
3034 if (debug_threads)
3035 fprintf (stderr,
3036 "LWP %ld needs stabilizing (in jump pad)\n",
3037 lwpid_of (lwp));
3038
3039 if (wstat)
3040 {
3041 lwp->status_pending_p = 0;
3042 enqueue_one_deferred_signal (lwp, wstat);
3043
3044 if (debug_threads)
3045 fprintf (stderr,
3046 "Signal %d for LWP %ld deferred "
3047 "(in jump pad)\n",
3048 WSTOPSIG (*wstat), lwpid_of (lwp));
3049 }
3050
3051 linux_resume_one_lwp (lwp, 0, 0, NULL);
3052 }
3053 else
3054 lwp->suspended++;
3055}
3056
3057static int
3058lwp_running (struct inferior_list_entry *entry, void *data)
3059{
3060 struct lwp_info *lwp = (struct lwp_info *) entry;
3061
3062 if (lwp->dead)
3063 return 0;
3064 if (lwp->stopped)
3065 return 0;
3066 return 1;
3067}
3068
3069/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3070 If SUSPEND, then also increase the suspend count of every LWP,
3071 except EXCEPT. */
3072
3073static void
3074stop_all_lwps (int suspend, struct lwp_info *except)
3075{
3076 /* Should not be called recursively. */
3077 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3078
3079 stopping_threads = (suspend
3080 ? STOPPING_AND_SUSPENDING_THREADS
3081 : STOPPING_THREADS);
3082
3083 if (suspend)
3084 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3085 else
3086 find_inferior (&all_lwps, send_sigstop_callback, except);
3087 for_each_inferior (&all_lwps, wait_for_sigstop);
3088 stopping_threads = NOT_STOPPING_THREADS;
3089}
3090
3091/* Resume execution of the inferior process.
3092 If STEP is nonzero, single-step it.
3093 If SIGNAL is nonzero, give it that signal. */
3094
3095static void
3096linux_resume_one_lwp (struct lwp_info *lwp,
3097 int step, int signal, siginfo_t *info)
3098{
3099 struct thread_info *saved_inferior;
3100 int fast_tp_collecting;
3101
3102 if (lwp->stopped == 0)
3103 return;
3104
3105 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3106
3107 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3108
3109 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3110 user used the "jump" command, or "set $pc = foo"). */
3111 if (lwp->stop_pc != get_pc (lwp))
3112 {
3113 /* Collecting 'while-stepping' actions doesn't make sense
3114 anymore. */
3115 release_while_stepping_state_list (get_lwp_thread (lwp));
3116 }
3117
3118 /* If we have pending signals or status, and a new signal, enqueue the
3119 signal. Also enqueue the signal if we are waiting to reinsert a
3120 breakpoint; it will be picked up again below. */
3121 if (signal != 0
3122 && (lwp->status_pending_p
3123 || lwp->pending_signals != NULL
3124 || lwp->bp_reinsert != 0
3125 || fast_tp_collecting))
3126 {
3127 struct pending_signals *p_sig;
3128 p_sig = xmalloc (sizeof (*p_sig));
3129 p_sig->prev = lwp->pending_signals;
3130 p_sig->signal = signal;
3131 if (info == NULL)
3132 memset (&p_sig->info, 0, sizeof (siginfo_t));
3133 else
3134 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3135 lwp->pending_signals = p_sig;
3136 }
3137
3138 if (lwp->status_pending_p)
3139 {
3140 if (debug_threads)
3141 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3142 " has pending status\n",
3143 lwpid_of (lwp), step ? "step" : "continue", signal,
3144 lwp->stop_expected ? "expected" : "not expected");
3145 return;
3146 }
3147
3148 saved_inferior = current_inferior;
3149 current_inferior = get_lwp_thread (lwp);
3150
3151 if (debug_threads)
3152 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3153 lwpid_of (lwp), step ? "step" : "continue", signal,
3154 lwp->stop_expected ? "expected" : "not expected");
3155
3156 /* This bit needs some thinking about. If we get a signal that
3157 we must report while a single-step reinsert is still pending,
3158 we often end up resuming the thread. It might be better to
3159 (ew) allow a stack of pending events; then we could be sure that
3160 the reinsert happened right away and not lose any signals.
3161
3162 Making this stack would also shrink the window in which breakpoints are
3163 uninserted (see comment in linux_wait_for_lwp) but not enough for
3164 complete correctness, so it won't solve that problem. It may be
3165 worthwhile just to solve this one, however. */
3166 if (lwp->bp_reinsert != 0)
3167 {
3168 if (debug_threads)
3169 fprintf (stderr, " pending reinsert at 0x%s\n",
3170 paddress (lwp->bp_reinsert));
3171
3172 if (can_hardware_single_step ())
3173 {
3174 if (fast_tp_collecting == 0)
3175 {
3176 if (step == 0)
3177 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3178 if (lwp->suspended)
3179 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3180 lwp->suspended);
3181 }
3182
3183 step = 1;
3184 }
3185
3186 /* Postpone any pending signal. It was enqueued above. */
3187 signal = 0;
3188 }
3189
3190 if (fast_tp_collecting == 1)
3191 {
3192 if (debug_threads)
3193 fprintf (stderr, "\
3194lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3195 lwpid_of (lwp));
3196
3197 /* Postpone any pending signal. It was enqueued above. */
3198 signal = 0;
3199 }
3200 else if (fast_tp_collecting == 2)
3201 {
3202 if (debug_threads)
3203 fprintf (stderr, "\
3204lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3205 lwpid_of (lwp));
3206
3207 if (can_hardware_single_step ())
3208 step = 1;
3209 else
3210 fatal ("moving out of jump pad single-stepping"
3211 " not implemented on this target");
3212
3213 /* Postpone any pending signal. It was enqueued above. */
3214 signal = 0;
3215 }
3216
3217 /* If we have while-stepping actions in this thread set it stepping.
3218 If we have a signal to deliver, it may or may not be set to
3219 SIG_IGN, we don't know. Assume so, and allow collecting
3220 while-stepping into a signal handler. A possible smart thing to
3221 do would be to set an internal breakpoint at the signal return
3222 address, continue, and carry on catching this while-stepping
3223 action only when that breakpoint is hit. A future
3224 enhancement. */
3225 if (get_lwp_thread (lwp)->while_stepping != NULL
3226 && can_hardware_single_step ())
3227 {
3228 if (debug_threads)
3229 fprintf (stderr,
3230 "lwp %ld has a while-stepping action -> forcing step.\n",
3231 lwpid_of (lwp));
3232 step = 1;
3233 }
3234
3235 if (debug_threads && the_low_target.get_pc != NULL)
3236 {
3237 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3238 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3239 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3240 }
3241
3242 /* If we have pending signals, consume one unless we are trying to
3243 reinsert a breakpoint or we're trying to finish a fast tracepoint
3244 collect. */
3245 if (lwp->pending_signals != NULL
3246 && lwp->bp_reinsert == 0
3247 && fast_tp_collecting == 0)
3248 {
3249 struct pending_signals **p_sig;
3250
3251 p_sig = &lwp->pending_signals;
3252 while ((*p_sig)->prev != NULL)
3253 p_sig = &(*p_sig)->prev;
3254
3255 signal = (*p_sig)->signal;
3256 if ((*p_sig)->info.si_signo != 0)
3257 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3258 &(*p_sig)->info);
3259
3260 free (*p_sig);
3261 *p_sig = NULL;
3262 }
3263
3264 if (the_low_target.prepare_to_resume != NULL)
3265 the_low_target.prepare_to_resume (lwp);
3266
3267 regcache_invalidate_thread (get_lwp_thread (lwp));
3268 errno = 0;
3269 lwp->stopped = 0;
3270 lwp->stopped_by_watchpoint = 0;
3271 lwp->stepping = step;
3272 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3273 (PTRACE_TYPE_ARG3) 0,
3274 /* Coerce to a uintptr_t first to avoid potential gcc warning
3275 of coercing an 8 byte integer to a 4 byte pointer. */
3276 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3277
3278 current_inferior = saved_inferior;
3279 if (errno)
3280 {
3281 /* ESRCH from ptrace either means that the thread was already
3282 running (an error) or that it is gone (a race condition). If
3283 it's gone, we will get a notification the next time we wait,
3284 so we can ignore the error. We could differentiate these
3285 two, but it's tricky without waiting; the thread still exists
3286 as a zombie, so sending it signal 0 would succeed. So just
3287 ignore ESRCH. */
3288 if (errno == ESRCH)
3289 return;
3290
3291 perror_with_name ("ptrace");
3292 }
3293}
3294
3295struct thread_resume_array
3296{
3297 struct thread_resume *resume;
3298 size_t n;
3299};
3300
3301/* This function is called once per thread. We look up the thread
3302 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3303 resume request.
3304
3305 This algorithm is O(threads * resume elements), but resume elements
3306 is small (and will remain small at least until GDB supports thread
3307 suspension). */
3308static int
3309linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3310{
3311 struct lwp_info *lwp;
3312 struct thread_info *thread;
3313 int ndx;
3314 struct thread_resume_array *r;
3315
3316 thread = (struct thread_info *) entry;
3317 lwp = get_thread_lwp (thread);
3318 r = arg;
3319
3320 for (ndx = 0; ndx < r->n; ndx++)
3321 {
3322 ptid_t ptid = r->resume[ndx].thread;
3323 if (ptid_equal (ptid, minus_one_ptid)
3324 || ptid_equal (ptid, entry->id)
3325 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3326 of PID'. */
3327 || (ptid_get_pid (ptid) == pid_of (lwp)
3328 && (ptid_is_pid (ptid)
3329 || ptid_get_lwp (ptid) == -1)))
3330 {
3331 if (r->resume[ndx].kind == resume_stop
3332 && thread->last_resume_kind == resume_stop)
3333 {
3334 if (debug_threads)
3335 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3336 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3337 ? "stopped"
3338 : "stopping",
3339 lwpid_of (lwp));
3340
3341 continue;
3342 }
3343
3344 lwp->resume = &r->resume[ndx];
3345 thread->last_resume_kind = lwp->resume->kind;
3346
3347 lwp->step_range_start = lwp->resume->step_range_start;
3348 lwp->step_range_end = lwp->resume->step_range_end;
3349
3350 /* If we had a deferred signal to report, dequeue one now.
3351 This can happen if LWP gets more than one signal while
3352 trying to get out of a jump pad. */
3353 if (lwp->stopped
3354 && !lwp->status_pending_p
3355 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3356 {
3357 lwp->status_pending_p = 1;
3358
3359 if (debug_threads)
3360 fprintf (stderr,
3361 "Dequeueing deferred signal %d for LWP %ld, "
3362 "leaving status pending.\n",
3363 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3364 }
3365
3366 return 0;
3367 }
3368 }
3369
3370 /* No resume action for this thread. */
3371 lwp->resume = NULL;
3372
3373 return 0;
3374}
3375
3376
3377/* Set *FLAG_P if this lwp has an interesting status pending. */
3378static int
3379resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3380{
3381 struct lwp_info *lwp = (struct lwp_info *) entry;
3382
3383 /* LWPs which will not be resumed are not interesting, because
3384 we might not wait for them next time through linux_wait. */
3385 if (lwp->resume == NULL)
3386 return 0;
3387
3388 if (lwp->status_pending_p)
3389 * (int *) flag_p = 1;
3390
3391 return 0;
3392}
3393
3394/* Return 1 if this lwp that GDB wants running is stopped at an
3395 internal breakpoint that we need to step over. It assumes that any
3396 required STOP_PC adjustment has already been propagated to the
3397 inferior's regcache. */
3398
3399static int
3400need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3401{
3402 struct lwp_info *lwp = (struct lwp_info *) entry;
3403 struct thread_info *thread;
3404 struct thread_info *saved_inferior;
3405 CORE_ADDR pc;
3406
3407 /* LWPs which will not be resumed are not interesting, because we
3408 might not wait for them next time through linux_wait. */
3409
3410 if (!lwp->stopped)
3411 {
3412 if (debug_threads)
3413 fprintf (stderr,
3414 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3415 lwpid_of (lwp));
3416 return 0;
3417 }
3418
3419 thread = get_lwp_thread (lwp);
3420
3421 if (thread->last_resume_kind == resume_stop)
3422 {
3423 if (debug_threads)
3424 fprintf (stderr,
3425 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3426 lwpid_of (lwp));
3427 return 0;
3428 }
3429
3430 gdb_assert (lwp->suspended >= 0);
3431
3432 if (lwp->suspended)
3433 {
3434 if (debug_threads)
3435 fprintf (stderr,
3436 "Need step over [LWP %ld]? Ignoring, suspended\n",
3437 lwpid_of (lwp));
3438 return 0;
3439 }
3440
3441 if (!lwp->need_step_over)
3442 {
3443 if (debug_threads)
3444 fprintf (stderr,
3445 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3446 }
3447
3448 if (lwp->status_pending_p)
3449 {
3450 if (debug_threads)
3451 fprintf (stderr,
3452 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3453 lwpid_of (lwp));
3454 return 0;
3455 }
3456
3457 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3458 or we have. */
3459 pc = get_pc (lwp);
3460
3461 /* If the PC has changed since we stopped, then don't do anything,
3462 and let the breakpoint/tracepoint be hit. This happens if, for
3463 instance, GDB handled the decr_pc_after_break subtraction itself,
3464 GDB is OOL stepping this thread, or the user has issued a "jump"
3465 command, or poked thread's registers herself. */
3466 if (pc != lwp->stop_pc)
3467 {
3468 if (debug_threads)
3469 fprintf (stderr,
3470 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3471 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3472 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3473
3474 lwp->need_step_over = 0;
3475 return 0;
3476 }
3477
3478 saved_inferior = current_inferior;
3479 current_inferior = thread;
3480
3481 /* We can only step over breakpoints we know about. */
3482 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3483 {
3484 /* Don't step over a breakpoint that GDB expects to hit
3485 though. If the condition is being evaluated on the target's side
3486 and it evaluate to false, step over this breakpoint as well. */
3487 if (gdb_breakpoint_here (pc)
3488 && gdb_condition_true_at_breakpoint (pc)
3489 && gdb_no_commands_at_breakpoint (pc))
3490 {
3491 if (debug_threads)
3492 fprintf (stderr,
3493 "Need step over [LWP %ld]? yes, but found"
3494 " GDB breakpoint at 0x%s; skipping step over\n",
3495 lwpid_of (lwp), paddress (pc));
3496
3497 current_inferior = saved_inferior;
3498 return 0;
3499 }
3500 else
3501 {
3502 if (debug_threads)
3503 fprintf (stderr,
3504 "Need step over [LWP %ld]? yes, "
3505 "found breakpoint at 0x%s\n",
3506 lwpid_of (lwp), paddress (pc));
3507
3508 /* We've found an lwp that needs stepping over --- return 1 so
3509 that find_inferior stops looking. */
3510 current_inferior = saved_inferior;
3511
3512 /* If the step over is cancelled, this is set again. */
3513 lwp->need_step_over = 0;
3514 return 1;
3515 }
3516 }
3517
3518 current_inferior = saved_inferior;
3519
3520 if (debug_threads)
3521 fprintf (stderr,
3522 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3523 lwpid_of (lwp), paddress (pc));
3524
3525 return 0;
3526}
3527
3528/* Start a step-over operation on LWP. When LWP stopped at a
3529 breakpoint, to make progress, we need to remove the breakpoint out
3530 of the way. If we let other threads run while we do that, they may
3531 pass by the breakpoint location and miss hitting it. To avoid
3532 that, a step-over momentarily stops all threads while LWP is
3533 single-stepped while the breakpoint is temporarily uninserted from
3534 the inferior. When the single-step finishes, we reinsert the
3535 breakpoint, and let all threads that are supposed to be running,
3536 run again.
3537
3538 On targets that don't support hardware single-step, we don't
3539 currently support full software single-stepping. Instead, we only
3540 support stepping over the thread event breakpoint, by asking the
3541 low target where to place a reinsert breakpoint. Since this
3542 routine assumes the breakpoint being stepped over is a thread event
3543 breakpoint, it usually assumes the return address of the current
3544 function is a good enough place to set the reinsert breakpoint. */
3545
3546static int
3547start_step_over (struct lwp_info *lwp)
3548{
3549 struct thread_info *saved_inferior;
3550 CORE_ADDR pc;
3551 int step;
3552
3553 if (debug_threads)
3554 fprintf (stderr,
3555 "Starting step-over on LWP %ld. Stopping all threads\n",
3556 lwpid_of (lwp));
3557
3558 stop_all_lwps (1, lwp);
3559 gdb_assert (lwp->suspended == 0);
3560
3561 if (debug_threads)
3562 fprintf (stderr, "Done stopping all threads for step-over.\n");
3563
3564 /* Note, we should always reach here with an already adjusted PC,
3565 either by GDB (if we're resuming due to GDB's request), or by our
3566 caller, if we just finished handling an internal breakpoint GDB
3567 shouldn't care about. */
3568 pc = get_pc (lwp);
3569
3570 saved_inferior = current_inferior;
3571 current_inferior = get_lwp_thread (lwp);
3572
3573 lwp->bp_reinsert = pc;
3574 uninsert_breakpoints_at (pc);
3575 uninsert_fast_tracepoint_jumps_at (pc);
3576
3577 if (can_hardware_single_step ())
3578 {
3579 step = 1;
3580 }
3581 else
3582 {
3583 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3584 set_reinsert_breakpoint (raddr);
3585 step = 0;
3586 }
3587
3588 current_inferior = saved_inferior;
3589
3590 linux_resume_one_lwp (lwp, step, 0, NULL);
3591
3592 /* Require next event from this LWP. */
3593 step_over_bkpt = lwp->head.id;
3594 return 1;
3595}
3596
3597/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3598 start_step_over, if still there, and delete any reinsert
3599 breakpoints we've set, on non hardware single-step targets. */
3600
3601static int
3602finish_step_over (struct lwp_info *lwp)
3603{
3604 if (lwp->bp_reinsert != 0)
3605 {
3606 if (debug_threads)
3607 fprintf (stderr, "Finished step over.\n");
3608
3609 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3610 may be no breakpoint to reinsert there by now. */
3611 reinsert_breakpoints_at (lwp->bp_reinsert);
3612 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3613
3614 lwp->bp_reinsert = 0;
3615
3616 /* Delete any software-single-step reinsert breakpoints. No
3617 longer needed. We don't have to worry about other threads
3618 hitting this trap, and later not being able to explain it,
3619 because we were stepping over a breakpoint, and we hold all
3620 threads but LWP stopped while doing that. */
3621 if (!can_hardware_single_step ())
3622 delete_reinsert_breakpoints ();
3623
3624 step_over_bkpt = null_ptid;
3625 return 1;
3626 }
3627 else
3628 return 0;
3629}
3630
3631/* This function is called once per thread. We check the thread's resume
3632 request, which will tell us whether to resume, step, or leave the thread
3633 stopped; and what signal, if any, it should be sent.
3634
3635 For threads which we aren't explicitly told otherwise, we preserve
3636 the stepping flag; this is used for stepping over gdbserver-placed
3637 breakpoints.
3638
3639 If pending_flags was set in any thread, we queue any needed
3640 signals, since we won't actually resume. We already have a pending
3641 event to report, so we don't need to preserve any step requests;
3642 they should be re-issued if necessary. */
3643
3644static int
3645linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3646{
3647 struct lwp_info *lwp;
3648 struct thread_info *thread;
3649 int step;
3650 int leave_all_stopped = * (int *) arg;
3651 int leave_pending;
3652
3653 thread = (struct thread_info *) entry;
3654 lwp = get_thread_lwp (thread);
3655
3656 if (lwp->resume == NULL)
3657 return 0;
3658
3659 if (lwp->resume->kind == resume_stop)
3660 {
3661 if (debug_threads)
3662 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3663
3664 if (!lwp->stopped)
3665 {
3666 if (debug_threads)
3667 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3668
3669 /* Stop the thread, and wait for the event asynchronously,
3670 through the event loop. */
3671 send_sigstop (lwp);
3672 }
3673 else
3674 {
3675 if (debug_threads)
3676 fprintf (stderr, "already stopped LWP %ld\n",
3677 lwpid_of (lwp));
3678
3679 /* The LWP may have been stopped in an internal event that
3680 was not meant to be notified back to GDB (e.g., gdbserver
3681 breakpoint), so we should be reporting a stop event in
3682 this case too. */
3683
3684 /* If the thread already has a pending SIGSTOP, this is a
3685 no-op. Otherwise, something later will presumably resume
3686 the thread and this will cause it to cancel any pending
3687 operation, due to last_resume_kind == resume_stop. If
3688 the thread already has a pending status to report, we
3689 will still report it the next time we wait - see
3690 status_pending_p_callback. */
3691
3692 /* If we already have a pending signal to report, then
3693 there's no need to queue a SIGSTOP, as this means we're
3694 midway through moving the LWP out of the jumppad, and we
3695 will report the pending signal as soon as that is
3696 finished. */
3697 if (lwp->pending_signals_to_report == NULL)
3698 send_sigstop (lwp);
3699 }
3700
3701 /* For stop requests, we're done. */
3702 lwp->resume = NULL;
3703 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3704 return 0;
3705 }
3706
3707 /* If this thread which is about to be resumed has a pending status,
3708 then don't resume any threads - we can just report the pending
3709 status. Make sure to queue any signals that would otherwise be
3710 sent. In all-stop mode, we do this decision based on if *any*
3711 thread has a pending status. If there's a thread that needs the
3712 step-over-breakpoint dance, then don't resume any other thread
3713 but that particular one. */
3714 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3715
3716 if (!leave_pending)
3717 {
3718 if (debug_threads)
3719 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3720
3721 step = (lwp->resume->kind == resume_step);
3722 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3723 }
3724 else
3725 {
3726 if (debug_threads)
3727 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3728
3729 /* If we have a new signal, enqueue the signal. */
3730 if (lwp->resume->sig != 0)
3731 {
3732 struct pending_signals *p_sig;
3733 p_sig = xmalloc (sizeof (*p_sig));
3734 p_sig->prev = lwp->pending_signals;
3735 p_sig->signal = lwp->resume->sig;
3736 memset (&p_sig->info, 0, sizeof (siginfo_t));
3737
3738 /* If this is the same signal we were previously stopped by,
3739 make sure to queue its siginfo. We can ignore the return
3740 value of ptrace; if it fails, we'll skip
3741 PTRACE_SETSIGINFO. */
3742 if (WIFSTOPPED (lwp->last_status)
3743 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3744 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3745 &p_sig->info);
3746
3747 lwp->pending_signals = p_sig;
3748 }
3749 }
3750
3751 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3752 lwp->resume = NULL;
3753 return 0;
3754}
3755
3756static void
3757linux_resume (struct thread_resume *resume_info, size_t n)
3758{
3759 struct thread_resume_array array = { resume_info, n };
3760 struct lwp_info *need_step_over = NULL;
3761 int any_pending;
3762 int leave_all_stopped;
3763
3764 find_inferior (&all_threads, linux_set_resume_request, &array);
3765
3766 /* If there is a thread which would otherwise be resumed, which has
3767 a pending status, then don't resume any threads - we can just
3768 report the pending status. Make sure to queue any signals that
3769 would otherwise be sent. In non-stop mode, we'll apply this
3770 logic to each thread individually. We consume all pending events
3771 before considering to start a step-over (in all-stop). */
3772 any_pending = 0;
3773 if (!non_stop)
3774 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3775
3776 /* If there is a thread which would otherwise be resumed, which is
3777 stopped at a breakpoint that needs stepping over, then don't
3778 resume any threads - have it step over the breakpoint with all
3779 other threads stopped, then resume all threads again. Make sure
3780 to queue any signals that would otherwise be delivered or
3781 queued. */
3782 if (!any_pending && supports_breakpoints ())
3783 need_step_over
3784 = (struct lwp_info *) find_inferior (&all_lwps,
3785 need_step_over_p, NULL);
3786
3787 leave_all_stopped = (need_step_over != NULL || any_pending);
3788
3789 if (debug_threads)
3790 {
3791 if (need_step_over != NULL)
3792 fprintf (stderr, "Not resuming all, need step over\n");
3793 else if (any_pending)
3794 fprintf (stderr,
3795 "Not resuming, all-stop and found "
3796 "an LWP with pending status\n");
3797 else
3798 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3799 }
3800
3801 /* Even if we're leaving threads stopped, queue all signals we'd
3802 otherwise deliver. */
3803 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3804
3805 if (need_step_over)
3806 start_step_over (need_step_over);
3807}
3808
3809/* This function is called once per thread. We check the thread's
3810 last resume request, which will tell us whether to resume, step, or
3811 leave the thread stopped. Any signal the client requested to be
3812 delivered has already been enqueued at this point.
3813
3814 If any thread that GDB wants running is stopped at an internal
3815 breakpoint that needs stepping over, we start a step-over operation
3816 on that particular thread, and leave all others stopped. */
3817
3818static int
3819proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3820{
3821 struct lwp_info *lwp = (struct lwp_info *) entry;
3822 struct thread_info *thread;
3823 int step;
3824
3825 if (lwp == except)
3826 return 0;
3827
3828 if (debug_threads)
3829 fprintf (stderr,
3830 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3831
3832 if (!lwp->stopped)
3833 {
3834 if (debug_threads)
3835 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3836 return 0;
3837 }
3838
3839 thread = get_lwp_thread (lwp);
3840
3841 if (thread->last_resume_kind == resume_stop
3842 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3843 {
3844 if (debug_threads)
3845 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3846 lwpid_of (lwp));
3847 return 0;
3848 }
3849
3850 if (lwp->status_pending_p)
3851 {
3852 if (debug_threads)
3853 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3854 lwpid_of (lwp));
3855 return 0;
3856 }
3857
3858 gdb_assert (lwp->suspended >= 0);
3859
3860 if (lwp->suspended)
3861 {
3862 if (debug_threads)
3863 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3864 return 0;
3865 }
3866
3867 if (thread->last_resume_kind == resume_stop
3868 && lwp->pending_signals_to_report == NULL
3869 && lwp->collecting_fast_tracepoint == 0)
3870 {
3871 /* We haven't reported this LWP as stopped yet (otherwise, the
3872 last_status.kind check above would catch it, and we wouldn't
3873 reach here. This LWP may have been momentarily paused by a
3874 stop_all_lwps call while handling for example, another LWP's
3875 step-over. In that case, the pending expected SIGSTOP signal
3876 that was queued at vCont;t handling time will have already
3877 been consumed by wait_for_sigstop, and so we need to requeue
3878 another one here. Note that if the LWP already has a SIGSTOP
3879 pending, this is a no-op. */
3880
3881 if (debug_threads)
3882 fprintf (stderr,
3883 "Client wants LWP %ld to stop. "
3884 "Making sure it has a SIGSTOP pending\n",
3885 lwpid_of (lwp));
3886
3887 send_sigstop (lwp);
3888 }
3889
3890 step = thread->last_resume_kind == resume_step;
3891 linux_resume_one_lwp (lwp, step, 0, NULL);
3892 return 0;
3893}
3894
3895static int
3896unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3897{
3898 struct lwp_info *lwp = (struct lwp_info *) entry;
3899
3900 if (lwp == except)
3901 return 0;
3902
3903 lwp->suspended--;
3904 gdb_assert (lwp->suspended >= 0);
3905
3906 return proceed_one_lwp (entry, except);
3907}
3908
3909/* When we finish a step-over, set threads running again. If there's
3910 another thread that may need a step-over, now's the time to start
3911 it. Eventually, we'll move all threads past their breakpoints. */
3912
3913static void
3914proceed_all_lwps (void)
3915{
3916 struct lwp_info *need_step_over;
3917
3918 /* If there is a thread which would otherwise be resumed, which is
3919 stopped at a breakpoint that needs stepping over, then don't
3920 resume any threads - have it step over the breakpoint with all
3921 other threads stopped, then resume all threads again. */
3922
3923 if (supports_breakpoints ())
3924 {
3925 need_step_over
3926 = (struct lwp_info *) find_inferior (&all_lwps,
3927 need_step_over_p, NULL);
3928
3929 if (need_step_over != NULL)
3930 {
3931 if (debug_threads)
3932 fprintf (stderr, "proceed_all_lwps: found "
3933 "thread %ld needing a step-over\n",
3934 lwpid_of (need_step_over));
3935
3936 start_step_over (need_step_over);
3937 return;
3938 }
3939 }
3940
3941 if (debug_threads)
3942 fprintf (stderr, "Proceeding, no step-over needed\n");
3943
3944 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3945}
3946
3947/* Stopped LWPs that the client wanted to be running, that don't have
3948 pending statuses, are set to run again, except for EXCEPT, if not
3949 NULL. This undoes a stop_all_lwps call. */
3950
3951static void
3952unstop_all_lwps (int unsuspend, struct lwp_info *except)
3953{
3954 if (debug_threads)
3955 {
3956 if (except)
3957 fprintf (stderr,
3958 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3959 else
3960 fprintf (stderr,
3961 "unstopping all lwps\n");
3962 }
3963
3964 if (unsuspend)
3965 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3966 else
3967 find_inferior (&all_lwps, proceed_one_lwp, except);
3968}
3969
3970
3971#ifdef HAVE_LINUX_REGSETS
3972
3973#define use_linux_regsets 1
3974
3975/* Returns true if REGSET has been disabled. */
3976
3977static int
3978regset_disabled (struct regsets_info *info, struct regset_info *regset)
3979{
3980 return (info->disabled_regsets != NULL
3981 && info->disabled_regsets[regset - info->regsets]);
3982}
3983
3984/* Disable REGSET. */
3985
3986static void
3987disable_regset (struct regsets_info *info, struct regset_info *regset)
3988{
3989 int dr_offset;
3990
3991 dr_offset = regset - info->regsets;
3992 if (info->disabled_regsets == NULL)
3993 info->disabled_regsets = xcalloc (1, info->num_regsets);
3994 info->disabled_regsets[dr_offset] = 1;
3995}
3996
3997static int
3998regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
3999 struct regcache *regcache)
4000{
4001 struct regset_info *regset;
4002 int saw_general_regs = 0;
4003 int pid;
4004 struct iovec iov;
4005
4006 regset = regsets_info->regsets;
4007
4008 pid = lwpid_of (get_thread_lwp (current_inferior));
4009 while (regset->size >= 0)
4010 {
4011 void *buf, *data;
4012 int nt_type, res;
4013
4014 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4015 {
4016 regset ++;
4017 continue;
4018 }
4019
4020 buf = xmalloc (regset->size);
4021
4022 nt_type = regset->nt_type;
4023 if (nt_type)
4024 {
4025 iov.iov_base = buf;
4026 iov.iov_len = regset->size;
4027 data = (void *) &iov;
4028 }
4029 else
4030 data = buf;
4031
4032#ifndef __sparc__
4033 res = ptrace (regset->get_request, pid,
4034 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4035#else
4036 res = ptrace (regset->get_request, pid, data, nt_type);
4037#endif
4038 if (res < 0)
4039 {
4040 if (errno == EIO)
4041 {
4042 /* If we get EIO on a regset, do not try it again for
4043 this process mode. */
4044 disable_regset (regsets_info, regset);
4045 free (buf);
4046 continue;
4047 }
4048 else
4049 {
4050 char s[256];
4051 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4052 pid);
4053 perror (s);
4054 }
4055 }
4056 else if (regset->type == GENERAL_REGS)
4057 saw_general_regs = 1;
4058 regset->store_function (regcache, buf);
4059 regset ++;
4060 free (buf);
4061 }
4062 if (saw_general_regs)
4063 return 0;
4064 else
4065 return 1;
4066}
4067
4068static int
4069regsets_store_inferior_registers (struct regsets_info *regsets_info,
4070 struct regcache *regcache)
4071{
4072 struct regset_info *regset;
4073 int saw_general_regs = 0;
4074 int pid;
4075 struct iovec iov;
4076
4077 regset = regsets_info->regsets;
4078
4079 pid = lwpid_of (get_thread_lwp (current_inferior));
4080 while (regset->size >= 0)
4081 {
4082 void *buf, *data;
4083 int nt_type, res;
4084
4085 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4086 {
4087 regset ++;
4088 continue;
4089 }
4090
4091 buf = xmalloc (regset->size);
4092
4093 /* First fill the buffer with the current register set contents,
4094 in case there are any items in the kernel's regset that are
4095 not in gdbserver's regcache. */
4096
4097 nt_type = regset->nt_type;
4098 if (nt_type)
4099 {
4100 iov.iov_base = buf;
4101 iov.iov_len = regset->size;
4102 data = (void *) &iov;
4103 }
4104 else
4105 data = buf;
4106
4107#ifndef __sparc__
4108 res = ptrace (regset->get_request, pid,
4109 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4110#else
4111 res = ptrace (regset->get_request, pid, data, nt_type);
4112#endif
4113
4114 if (res == 0)
4115 {
4116 /* Then overlay our cached registers on that. */
4117 regset->fill_function (regcache, buf);
4118
4119 /* Only now do we write the register set. */
4120#ifndef __sparc__
4121 res = ptrace (regset->set_request, pid,
4122 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4123#else
4124 res = ptrace (regset->set_request, pid, data, nt_type);
4125#endif
4126 }
4127
4128 if (res < 0)
4129 {
4130 if (errno == EIO)
4131 {
4132 /* If we get EIO on a regset, do not try it again for
4133 this process mode. */
4134 disable_regset (regsets_info, regset);
4135 free (buf);
4136 continue;
4137 }
4138 else if (errno == ESRCH)
4139 {
4140 /* At this point, ESRCH should mean the process is
4141 already gone, in which case we simply ignore attempts
4142 to change its registers. See also the related
4143 comment in linux_resume_one_lwp. */
4144 free (buf);
4145 return 0;
4146 }
4147 else
4148 {
4149 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4150 }
4151 }
4152 else if (regset->type == GENERAL_REGS)
4153 saw_general_regs = 1;
4154 regset ++;
4155 free (buf);
4156 }
4157 if (saw_general_regs)
4158 return 0;
4159 else
4160 return 1;
4161}
4162
4163#else /* !HAVE_LINUX_REGSETS */
4164
4165#define use_linux_regsets 0
4166#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4167#define regsets_store_inferior_registers(regsets_info, regcache) 1
4168
4169#endif
4170
4171/* Return 1 if register REGNO is supported by one of the regset ptrace
4172 calls or 0 if it has to be transferred individually. */
4173
4174static int
4175linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4176{
4177 unsigned char mask = 1 << (regno % 8);
4178 size_t index = regno / 8;
4179
4180 return (use_linux_regsets
4181 && (regs_info->regset_bitmap == NULL
4182 || (regs_info->regset_bitmap[index] & mask) != 0));
4183}
4184
4185#ifdef HAVE_LINUX_USRREGS
4186
4187int
4188register_addr (const struct usrregs_info *usrregs, int regnum)
4189{
4190 int addr;
4191
4192 if (regnum < 0 || regnum >= usrregs->num_regs)
4193 error ("Invalid register number %d.", regnum);
4194
4195 addr = usrregs->regmap[regnum];
4196
4197 return addr;
4198}
4199
4200/* Fetch one register. */
4201static void
4202fetch_register (const struct usrregs_info *usrregs,
4203 struct regcache *regcache, int regno)
4204{
4205 CORE_ADDR regaddr;
4206 int i, size;
4207 char *buf;
4208 int pid;
4209
4210 if (regno >= usrregs->num_regs)
4211 return;
4212 if ((*the_low_target.cannot_fetch_register) (regno))
4213 return;
4214
4215 regaddr = register_addr (usrregs, regno);
4216 if (regaddr == -1)
4217 return;
4218
4219 size = ((register_size (regcache->tdesc, regno)
4220 + sizeof (PTRACE_XFER_TYPE) - 1)
4221 & -sizeof (PTRACE_XFER_TYPE));
4222 buf = alloca (size);
4223
4224 pid = lwpid_of (get_thread_lwp (current_inferior));
4225 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4226 {
4227 errno = 0;
4228 *(PTRACE_XFER_TYPE *) (buf + i) =
4229 ptrace (PTRACE_PEEKUSER, pid,
4230 /* Coerce to a uintptr_t first to avoid potential gcc warning
4231 of coercing an 8 byte integer to a 4 byte pointer. */
4232 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4233 regaddr += sizeof (PTRACE_XFER_TYPE);
4234 if (errno != 0)
4235 error ("reading register %d: %s", regno, strerror (errno));
4236 }
4237
4238 if (the_low_target.supply_ptrace_register)
4239 the_low_target.supply_ptrace_register (regcache, regno, buf);
4240 else
4241 supply_register (regcache, regno, buf);
4242}
4243
4244/* Store one register. */
4245static void
4246store_register (const struct usrregs_info *usrregs,
4247 struct regcache *regcache, int regno)
4248{
4249 CORE_ADDR regaddr;
4250 int i, size;
4251 char *buf;
4252 int pid;
4253
4254 if (regno >= usrregs->num_regs)
4255 return;
4256 if ((*the_low_target.cannot_store_register) (regno))
4257 return;
4258
4259 regaddr = register_addr (usrregs, regno);
4260 if (regaddr == -1)
4261 return;
4262
4263 size = ((register_size (regcache->tdesc, regno)
4264 + sizeof (PTRACE_XFER_TYPE) - 1)
4265 & -sizeof (PTRACE_XFER_TYPE));
4266 buf = alloca (size);
4267 memset (buf, 0, size);
4268
4269 if (the_low_target.collect_ptrace_register)
4270 the_low_target.collect_ptrace_register (regcache, regno, buf);
4271 else
4272 collect_register (regcache, regno, buf);
4273
4274 pid = lwpid_of (get_thread_lwp (current_inferior));
4275 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4276 {
4277 errno = 0;
4278 ptrace (PTRACE_POKEUSER, pid,
4279 /* Coerce to a uintptr_t first to avoid potential gcc warning
4280 about coercing an 8 byte integer to a 4 byte pointer. */
4281 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4282 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4283 if (errno != 0)
4284 {
4285 /* At this point, ESRCH should mean the process is
4286 already gone, in which case we simply ignore attempts
4287 to change its registers. See also the related
4288 comment in linux_resume_one_lwp. */
4289 if (errno == ESRCH)
4290 return;
4291
4292 if ((*the_low_target.cannot_store_register) (regno) == 0)
4293 error ("writing register %d: %s", regno, strerror (errno));
4294 }
4295 regaddr += sizeof (PTRACE_XFER_TYPE);
4296 }
4297}
4298
4299/* Fetch all registers, or just one, from the child process.
4300 If REGNO is -1, do this for all registers, skipping any that are
4301 assumed to have been retrieved by regsets_fetch_inferior_registers,
4302 unless ALL is non-zero.
4303 Otherwise, REGNO specifies which register (so we can save time). */
4304static void
4305usr_fetch_inferior_registers (const struct regs_info *regs_info,
4306 struct regcache *regcache, int regno, int all)
4307{
4308 struct usrregs_info *usr = regs_info->usrregs;
4309
4310 if (regno == -1)
4311 {
4312 for (regno = 0; regno < usr->num_regs; regno++)
4313 if (all || !linux_register_in_regsets (regs_info, regno))
4314 fetch_register (usr, regcache, regno);
4315 }
4316 else
4317 fetch_register (usr, regcache, regno);
4318}
4319
4320/* Store our register values back into the inferior.
4321 If REGNO is -1, do this for all registers, skipping any that are
4322 assumed to have been saved by regsets_store_inferior_registers,
4323 unless ALL is non-zero.
4324 Otherwise, REGNO specifies which register (so we can save time). */
4325static void
4326usr_store_inferior_registers (const struct regs_info *regs_info,
4327 struct regcache *regcache, int regno, int all)
4328{
4329 struct usrregs_info *usr = regs_info->usrregs;
4330
4331 if (regno == -1)
4332 {
4333 for (regno = 0; regno < usr->num_regs; regno++)
4334 if (all || !linux_register_in_regsets (regs_info, regno))
4335 store_register (usr, regcache, regno);
4336 }
4337 else
4338 store_register (usr, regcache, regno);
4339}
4340
4341#else /* !HAVE_LINUX_USRREGS */
4342
4343#define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4344#define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4345
4346#endif
4347
4348
4349void
4350linux_fetch_registers (struct regcache *regcache, int regno)
4351{
4352 int use_regsets;
4353 int all = 0;
4354 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4355
4356 if (regno == -1)
4357 {
4358 if (the_low_target.fetch_register != NULL
4359 && regs_info->usrregs != NULL)
4360 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4361 (*the_low_target.fetch_register) (regcache, regno);
4362
4363 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4364 if (regs_info->usrregs != NULL)
4365 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4366 }
4367 else
4368 {
4369 if (the_low_target.fetch_register != NULL
4370 && (*the_low_target.fetch_register) (regcache, regno))
4371 return;
4372
4373 use_regsets = linux_register_in_regsets (regs_info, regno);
4374 if (use_regsets)
4375 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4376 regcache);
4377 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4378 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4379 }
4380}
4381
4382void
4383linux_store_registers (struct regcache *regcache, int regno)
4384{
4385 int use_regsets;
4386 int all = 0;
4387 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4388
4389 if (regno == -1)
4390 {
4391 all = regsets_store_inferior_registers (regs_info->regsets_info,
4392 regcache);
4393 if (regs_info->usrregs != NULL)
4394 usr_store_inferior_registers (regs_info, regcache, regno, all);
4395 }
4396 else
4397 {
4398 use_regsets = linux_register_in_regsets (regs_info, regno);
4399 if (use_regsets)
4400 all = regsets_store_inferior_registers (regs_info->regsets_info,
4401 regcache);
4402 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4403 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4404 }
4405}
4406
4407
4408/* Copy LEN bytes from inferior's memory starting at MEMADDR
4409 to debugger memory starting at MYADDR. */
4410
4411static int
4412linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4413{
4414 int pid = lwpid_of (get_thread_lwp (current_inferior));
4415 register PTRACE_XFER_TYPE *buffer;
4416 register CORE_ADDR addr;
4417 register int count;
4418 char filename[64];
4419 register int i;
4420 int ret;
4421 int fd;
4422
4423 /* Try using /proc. Don't bother for one word. */
4424 if (len >= 3 * sizeof (long))
4425 {
4426 int bytes;
4427
4428 /* We could keep this file open and cache it - possibly one per
4429 thread. That requires some juggling, but is even faster. */
4430 sprintf (filename, "/proc/%d/mem", pid);
4431 fd = open (filename, O_RDONLY | O_LARGEFILE);
4432 if (fd == -1)
4433 goto no_proc;
4434
4435 /* If pread64 is available, use it. It's faster if the kernel
4436 supports it (only one syscall), and it's 64-bit safe even on
4437 32-bit platforms (for instance, SPARC debugging a SPARC64
4438 application). */
4439#ifdef HAVE_PREAD64
4440 bytes = pread64 (fd, myaddr, len, memaddr);
4441#else
4442 bytes = -1;
4443 if (lseek (fd, memaddr, SEEK_SET) != -1)
4444 bytes = read (fd, myaddr, len);
4445#endif
4446
4447 close (fd);
4448 if (bytes == len)
4449 return 0;
4450
4451 /* Some data was read, we'll try to get the rest with ptrace. */
4452 if (bytes > 0)
4453 {
4454 memaddr += bytes;
4455 myaddr += bytes;
4456 len -= bytes;
4457 }
4458 }
4459
4460 no_proc:
4461 /* Round starting address down to longword boundary. */
4462 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4463 /* Round ending address up; get number of longwords that makes. */
4464 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4465 / sizeof (PTRACE_XFER_TYPE));
4466 /* Allocate buffer of that many longwords. */
4467 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4468
4469 /* Read all the longwords */
4470 errno = 0;
4471 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4472 {
4473 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4474 about coercing an 8 byte integer to a 4 byte pointer. */
4475 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4476 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4477 (PTRACE_TYPE_ARG4) 0);
4478 if (errno)
4479 break;
4480 }
4481 ret = errno;
4482
4483 /* Copy appropriate bytes out of the buffer. */
4484 if (i > 0)
4485 {
4486 i *= sizeof (PTRACE_XFER_TYPE);
4487 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4488 memcpy (myaddr,
4489 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4490 i < len ? i : len);
4491 }
4492
4493 return ret;
4494}
4495
4496/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4497 memory at MEMADDR. On failure (cannot write to the inferior)
4498 returns the value of errno. Always succeeds if LEN is zero. */
4499
4500static int
4501linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4502{
4503 register int i;
4504 /* Round starting address down to longword boundary. */
4505 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4506 /* Round ending address up; get number of longwords that makes. */
4507 register int count
4508 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4509 / sizeof (PTRACE_XFER_TYPE);
4510
4511 /* Allocate buffer of that many longwords. */
4512 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4513 alloca (count * sizeof (PTRACE_XFER_TYPE));
4514
4515 int pid = lwpid_of (get_thread_lwp (current_inferior));
4516
4517 if (len == 0)
4518 {
4519 /* Zero length write always succeeds. */
4520 return 0;
4521 }
4522
4523 if (debug_threads)
4524 {
4525 /* Dump up to four bytes. */
4526 unsigned int val = * (unsigned int *) myaddr;
4527 if (len == 1)
4528 val = val & 0xff;
4529 else if (len == 2)
4530 val = val & 0xffff;
4531 else if (len == 3)
4532 val = val & 0xffffff;
4533 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4534 val, (long)memaddr);
4535 }
4536
4537 /* Fill start and end extra bytes of buffer with existing memory data. */
4538
4539 errno = 0;
4540 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4541 about coercing an 8 byte integer to a 4 byte pointer. */
4542 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4543 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4544 (PTRACE_TYPE_ARG4) 0);
4545 if (errno)
4546 return errno;
4547
4548 if (count > 1)
4549 {
4550 errno = 0;
4551 buffer[count - 1]
4552 = ptrace (PTRACE_PEEKTEXT, pid,
4553 /* Coerce to a uintptr_t first to avoid potential gcc warning
4554 about coercing an 8 byte integer to a 4 byte pointer. */
4555 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4556 * sizeof (PTRACE_XFER_TYPE)),
4557 (PTRACE_TYPE_ARG4) 0);
4558 if (errno)
4559 return errno;
4560 }
4561
4562 /* Copy data to be written over corresponding part of buffer. */
4563
4564 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4565 myaddr, len);
4566
4567 /* Write the entire buffer. */
4568
4569 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4570 {
4571 errno = 0;
4572 ptrace (PTRACE_POKETEXT, pid,
4573 /* Coerce to a uintptr_t first to avoid potential gcc warning
4574 about coercing an 8 byte integer to a 4 byte pointer. */
4575 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4576 (PTRACE_TYPE_ARG4) buffer[i]);
4577 if (errno)
4578 return errno;
4579 }
4580
4581 return 0;
4582}
4583
4584static void
4585linux_look_up_symbols (void)
4586{
4587#ifdef USE_THREAD_DB
4588 struct process_info *proc = current_process ();
4589
4590 if (proc->private->thread_db != NULL)
4591 return;
4592
4593 /* If the kernel supports tracing clones, then we don't need to
4594 use the magic thread event breakpoint to learn about
4595 threads. */
4596 thread_db_init (!linux_supports_traceclone ());
4597#endif
4598}
4599
4600static void
4601linux_request_interrupt (void)
4602{
4603 extern unsigned long signal_pid;
4604
4605 if (!ptid_equal (cont_thread, null_ptid)
4606 && !ptid_equal (cont_thread, minus_one_ptid))
4607 {
4608 struct lwp_info *lwp;
4609 int lwpid;
4610
4611 lwp = get_thread_lwp (current_inferior);
4612 lwpid = lwpid_of (lwp);
4613 kill_lwp (lwpid, SIGINT);
4614 }
4615 else
4616 kill_lwp (signal_pid, SIGINT);
4617}
4618
4619/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4620 to debugger memory starting at MYADDR. */
4621
4622static int
4623linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4624{
4625 char filename[PATH_MAX];
4626 int fd, n;
4627 int pid = lwpid_of (get_thread_lwp (current_inferior));
4628
4629 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4630
4631 fd = open (filename, O_RDONLY);
4632 if (fd < 0)
4633 return -1;
4634
4635 if (offset != (CORE_ADDR) 0
4636 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4637 n = -1;
4638 else
4639 n = read (fd, myaddr, len);
4640
4641 close (fd);
4642
4643 return n;
4644}
4645
4646/* These breakpoint and watchpoint related wrapper functions simply
4647 pass on the function call if the target has registered a
4648 corresponding function. */
4649
4650static int
4651linux_insert_point (char type, CORE_ADDR addr, int len)
4652{
4653 if (the_low_target.insert_point != NULL)
4654 return the_low_target.insert_point (type, addr, len);
4655 else
4656 /* Unsupported (see target.h). */
4657 return 1;
4658}
4659
4660static int
4661linux_remove_point (char type, CORE_ADDR addr, int len)
4662{
4663 if (the_low_target.remove_point != NULL)
4664 return the_low_target.remove_point (type, addr, len);
4665 else
4666 /* Unsupported (see target.h). */
4667 return 1;
4668}
4669
4670static int
4671linux_stopped_by_watchpoint (void)
4672{
4673 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4674
4675 return lwp->stopped_by_watchpoint;
4676}
4677
4678static CORE_ADDR
4679linux_stopped_data_address (void)
4680{
4681 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4682
4683 return lwp->stopped_data_address;
4684}
4685
4686#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4687 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4688 && defined(PT_TEXT_END_ADDR)
4689
4690/* This is only used for targets that define PT_TEXT_ADDR,
4691 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4692 the target has different ways of acquiring this information, like
4693 loadmaps. */
4694
4695/* Under uClinux, programs are loaded at non-zero offsets, which we need
4696 to tell gdb about. */
4697
4698static int
4699linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4700{
4701 unsigned long text, text_end, data;
4702 int pid = lwpid_of (get_thread_lwp (current_inferior));
4703
4704 errno = 0;
4705
4706 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4707 (PTRACE_TYPE_ARG4) 0);
4708 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4709 (PTRACE_TYPE_ARG4) 0);
4710 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4711 (PTRACE_TYPE_ARG4) 0);
4712
4713 if (errno == 0)
4714 {
4715 /* Both text and data offsets produced at compile-time (and so
4716 used by gdb) are relative to the beginning of the program,
4717 with the data segment immediately following the text segment.
4718 However, the actual runtime layout in memory may put the data
4719 somewhere else, so when we send gdb a data base-address, we
4720 use the real data base address and subtract the compile-time
4721 data base-address from it (which is just the length of the
4722 text segment). BSS immediately follows data in both
4723 cases. */
4724 *text_p = text;
4725 *data_p = data - (text_end - text);
4726
4727 return 1;
4728 }
4729 return 0;
4730}
4731#endif
4732
4733static int
4734linux_qxfer_osdata (const char *annex,
4735 unsigned char *readbuf, unsigned const char *writebuf,
4736 CORE_ADDR offset, int len)
4737{
4738 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4739}
4740
4741/* Convert a native/host siginfo object, into/from the siginfo in the
4742 layout of the inferiors' architecture. */
4743
4744static void
4745siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4746{
4747 int done = 0;
4748
4749 if (the_low_target.siginfo_fixup != NULL)
4750 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4751
4752 /* If there was no callback, or the callback didn't do anything,
4753 then just do a straight memcpy. */
4754 if (!done)
4755 {
4756 if (direction == 1)
4757 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4758 else
4759 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4760 }
4761}
4762
4763static int
4764linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4765 unsigned const char *writebuf, CORE_ADDR offset, int len)
4766{
4767 int pid;
4768 siginfo_t siginfo;
4769 char inf_siginfo[sizeof (siginfo_t)];
4770
4771 if (current_inferior == NULL)
4772 return -1;
4773
4774 pid = lwpid_of (get_thread_lwp (current_inferior));
4775
4776 if (debug_threads)
4777 fprintf (stderr, "%s siginfo for lwp %d.\n",
4778 readbuf != NULL ? "Reading" : "Writing",
4779 pid);
4780
4781 if (offset >= sizeof (siginfo))
4782 return -1;
4783
4784 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4785 return -1;
4786
4787 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4788 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4789 inferior with a 64-bit GDBSERVER should look the same as debugging it
4790 with a 32-bit GDBSERVER, we need to convert it. */
4791 siginfo_fixup (&siginfo, inf_siginfo, 0);
4792
4793 if (offset + len > sizeof (siginfo))
4794 len = sizeof (siginfo) - offset;
4795
4796 if (readbuf != NULL)
4797 memcpy (readbuf, inf_siginfo + offset, len);
4798 else
4799 {
4800 memcpy (inf_siginfo + offset, writebuf, len);
4801
4802 /* Convert back to ptrace layout before flushing it out. */
4803 siginfo_fixup (&siginfo, inf_siginfo, 1);
4804
4805 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4806 return -1;
4807 }
4808
4809 return len;
4810}
4811
4812/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4813 so we notice when children change state; as the handler for the
4814 sigsuspend in my_waitpid. */
4815
4816static void
4817sigchld_handler (int signo)
4818{
4819 int old_errno = errno;
4820
4821 if (debug_threads)
4822 {
4823 do
4824 {
4825 /* fprintf is not async-signal-safe, so call write
4826 directly. */
4827 if (write (2, "sigchld_handler\n",
4828 sizeof ("sigchld_handler\n") - 1) < 0)
4829 break; /* just ignore */
4830 } while (0);
4831 }
4832
4833 if (target_is_async_p ())
4834 async_file_mark (); /* trigger a linux_wait */
4835
4836 errno = old_errno;
4837}
4838
4839static int
4840linux_supports_non_stop (void)
4841{
4842 return 1;
4843}
4844
4845static int
4846linux_async (int enable)
4847{
4848 int previous = (linux_event_pipe[0] != -1);
4849
4850 if (debug_threads)
4851 fprintf (stderr, "linux_async (%d), previous=%d\n",
4852 enable, previous);
4853
4854 if (previous != enable)
4855 {
4856 sigset_t mask;
4857 sigemptyset (&mask);
4858 sigaddset (&mask, SIGCHLD);
4859
4860 sigprocmask (SIG_BLOCK, &mask, NULL);
4861
4862 if (enable)
4863 {
4864 if (pipe (linux_event_pipe) == -1)
4865 fatal ("creating event pipe failed.");
4866
4867 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4868 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4869
4870 /* Register the event loop handler. */
4871 add_file_handler (linux_event_pipe[0],
4872 handle_target_event, NULL);
4873
4874 /* Always trigger a linux_wait. */
4875 async_file_mark ();
4876 }
4877 else
4878 {
4879 delete_file_handler (linux_event_pipe[0]);
4880
4881 close (linux_event_pipe[0]);
4882 close (linux_event_pipe[1]);
4883 linux_event_pipe[0] = -1;
4884 linux_event_pipe[1] = -1;
4885 }
4886
4887 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4888 }
4889
4890 return previous;
4891}
4892
4893static int
4894linux_start_non_stop (int nonstop)
4895{
4896 /* Register or unregister from event-loop accordingly. */
4897 linux_async (nonstop);
4898 return 0;
4899}
4900
4901static int
4902linux_supports_multi_process (void)
4903{
4904 return 1;
4905}
4906
4907static int
4908linux_supports_disable_randomization (void)
4909{
4910#ifdef HAVE_PERSONALITY
4911 return 1;
4912#else
4913 return 0;
4914#endif
4915}
4916
4917static int
4918linux_supports_agent (void)
4919{
4920 return 1;
4921}
4922
4923static int
4924linux_supports_range_stepping (void)
4925{
4926 if (*the_low_target.supports_range_stepping == NULL)
4927 return 0;
4928
4929 return (*the_low_target.supports_range_stepping) ();
4930}
4931
4932/* Enumerate spufs IDs for process PID. */
4933static int
4934spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4935{
4936 int pos = 0;
4937 int written = 0;
4938 char path[128];
4939 DIR *dir;
4940 struct dirent *entry;
4941
4942 sprintf (path, "/proc/%ld/fd", pid);
4943 dir = opendir (path);
4944 if (!dir)
4945 return -1;
4946
4947 rewinddir (dir);
4948 while ((entry = readdir (dir)) != NULL)
4949 {
4950 struct stat st;
4951 struct statfs stfs;
4952 int fd;
4953
4954 fd = atoi (entry->d_name);
4955 if (!fd)
4956 continue;
4957
4958 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4959 if (stat (path, &st) != 0)
4960 continue;
4961 if (!S_ISDIR (st.st_mode))
4962 continue;
4963
4964 if (statfs (path, &stfs) != 0)
4965 continue;
4966 if (stfs.f_type != SPUFS_MAGIC)
4967 continue;
4968
4969 if (pos >= offset && pos + 4 <= offset + len)
4970 {
4971 *(unsigned int *)(buf + pos - offset) = fd;
4972 written += 4;
4973 }
4974 pos += 4;
4975 }
4976
4977 closedir (dir);
4978 return written;
4979}
4980
4981/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4982 object type, using the /proc file system. */
4983static int
4984linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4985 unsigned const char *writebuf,
4986 CORE_ADDR offset, int len)
4987{
4988 long pid = lwpid_of (get_thread_lwp (current_inferior));
4989 char buf[128];
4990 int fd = 0;
4991 int ret = 0;
4992
4993 if (!writebuf && !readbuf)
4994 return -1;
4995
4996 if (!*annex)
4997 {
4998 if (!readbuf)
4999 return -1;
5000 else
5001 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5002 }
5003
5004 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5005 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5006 if (fd <= 0)
5007 return -1;
5008
5009 if (offset != 0
5010 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5011 {
5012 close (fd);
5013 return 0;
5014 }
5015
5016 if (writebuf)
5017 ret = write (fd, writebuf, (size_t) len);
5018 else
5019 ret = read (fd, readbuf, (size_t) len);
5020
5021 close (fd);
5022 return ret;
5023}
5024
5025#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5026struct target_loadseg
5027{
5028 /* Core address to which the segment is mapped. */
5029 Elf32_Addr addr;
5030 /* VMA recorded in the program header. */
5031 Elf32_Addr p_vaddr;
5032 /* Size of this segment in memory. */
5033 Elf32_Word p_memsz;
5034};
5035
5036# if defined PT_GETDSBT
5037struct target_loadmap
5038{
5039 /* Protocol version number, must be zero. */
5040 Elf32_Word version;
5041 /* Pointer to the DSBT table, its size, and the DSBT index. */
5042 unsigned *dsbt_table;
5043 unsigned dsbt_size, dsbt_index;
5044 /* Number of segments in this map. */
5045 Elf32_Word nsegs;
5046 /* The actual memory map. */
5047 struct target_loadseg segs[/*nsegs*/];
5048};
5049# define LINUX_LOADMAP PT_GETDSBT
5050# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5051# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5052# else
5053struct target_loadmap
5054{
5055 /* Protocol version number, must be zero. */
5056 Elf32_Half version;
5057 /* Number of segments in this map. */
5058 Elf32_Half nsegs;
5059 /* The actual memory map. */
5060 struct target_loadseg segs[/*nsegs*/];
5061};
5062# define LINUX_LOADMAP PTRACE_GETFDPIC
5063# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5064# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5065# endif
5066
5067static int
5068linux_read_loadmap (const char *annex, CORE_ADDR offset,
5069 unsigned char *myaddr, unsigned int len)
5070{
5071 int pid = lwpid_of (get_thread_lwp (current_inferior));
5072 int addr = -1;
5073 struct target_loadmap *data = NULL;
5074 unsigned int actual_length, copy_length;
5075
5076 if (strcmp (annex, "exec") == 0)
5077 addr = (int) LINUX_LOADMAP_EXEC;
5078 else if (strcmp (annex, "interp") == 0)
5079 addr = (int) LINUX_LOADMAP_INTERP;
5080 else
5081 return -1;
5082
5083 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5084 return -1;
5085
5086 if (data == NULL)
5087 return -1;
5088
5089 actual_length = sizeof (struct target_loadmap)
5090 + sizeof (struct target_loadseg) * data->nsegs;
5091
5092 if (offset < 0 || offset > actual_length)
5093 return -1;
5094
5095 copy_length = actual_length - offset < len ? actual_length - offset : len;
5096 memcpy (myaddr, (char *) data + offset, copy_length);
5097 return copy_length;
5098}
5099#else
5100# define linux_read_loadmap NULL
5101#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5102
5103static void
5104linux_process_qsupported (const char *query)
5105{
5106 if (the_low_target.process_qsupported != NULL)
5107 the_low_target.process_qsupported (query);
5108}
5109
5110static int
5111linux_supports_tracepoints (void)
5112{
5113 if (*the_low_target.supports_tracepoints == NULL)
5114 return 0;
5115
5116 return (*the_low_target.supports_tracepoints) ();
5117}
5118
5119static CORE_ADDR
5120linux_read_pc (struct regcache *regcache)
5121{
5122 if (the_low_target.get_pc == NULL)
5123 return 0;
5124
5125 return (*the_low_target.get_pc) (regcache);
5126}
5127
5128static void
5129linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5130{
5131 gdb_assert (the_low_target.set_pc != NULL);
5132
5133 (*the_low_target.set_pc) (regcache, pc);
5134}
5135
5136static int
5137linux_thread_stopped (struct thread_info *thread)
5138{
5139 return get_thread_lwp (thread)->stopped;
5140}
5141
5142/* This exposes stop-all-threads functionality to other modules. */
5143
5144static void
5145linux_pause_all (int freeze)
5146{
5147 stop_all_lwps (freeze, NULL);
5148}
5149
5150/* This exposes unstop-all-threads functionality to other gdbserver
5151 modules. */
5152
5153static void
5154linux_unpause_all (int unfreeze)
5155{
5156 unstop_all_lwps (unfreeze, NULL);
5157}
5158
5159static int
5160linux_prepare_to_access_memory (void)
5161{
5162 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5163 running LWP. */
5164 if (non_stop)
5165 linux_pause_all (1);
5166 return 0;
5167}
5168
5169static void
5170linux_done_accessing_memory (void)
5171{
5172 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5173 running LWP. */
5174 if (non_stop)
5175 linux_unpause_all (1);
5176}
5177
5178static int
5179linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5180 CORE_ADDR collector,
5181 CORE_ADDR lockaddr,
5182 ULONGEST orig_size,
5183 CORE_ADDR *jump_entry,
5184 CORE_ADDR *trampoline,
5185 ULONGEST *trampoline_size,
5186 unsigned char *jjump_pad_insn,
5187 ULONGEST *jjump_pad_insn_size,
5188 CORE_ADDR *adjusted_insn_addr,
5189 CORE_ADDR *adjusted_insn_addr_end,
5190 char *err)
5191{
5192 return (*the_low_target.install_fast_tracepoint_jump_pad)
5193 (tpoint, tpaddr, collector, lockaddr, orig_size,
5194 jump_entry, trampoline, trampoline_size,
5195 jjump_pad_insn, jjump_pad_insn_size,
5196 adjusted_insn_addr, adjusted_insn_addr_end,
5197 err);
5198}
5199
5200static struct emit_ops *
5201linux_emit_ops (void)
5202{
5203 if (the_low_target.emit_ops != NULL)
5204 return (*the_low_target.emit_ops) ();
5205 else
5206 return NULL;
5207}
5208
5209static int
5210linux_get_min_fast_tracepoint_insn_len (void)
5211{
5212 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5213}
5214
5215/* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5216
5217static int
5218get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5219 CORE_ADDR *phdr_memaddr, int *num_phdr)
5220{
5221 char filename[PATH_MAX];
5222 int fd;
5223 const int auxv_size = is_elf64
5224 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5225 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5226
5227 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5228
5229 fd = open (filename, O_RDONLY);
5230 if (fd < 0)
5231 return 1;
5232
5233 *phdr_memaddr = 0;
5234 *num_phdr = 0;
5235 while (read (fd, buf, auxv_size) == auxv_size
5236 && (*phdr_memaddr == 0 || *num_phdr == 0))
5237 {
5238 if (is_elf64)
5239 {
5240 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5241
5242 switch (aux->a_type)
5243 {
5244 case AT_PHDR:
5245 *phdr_memaddr = aux->a_un.a_val;
5246 break;
5247 case AT_PHNUM:
5248 *num_phdr = aux->a_un.a_val;
5249 break;
5250 }
5251 }
5252 else
5253 {
5254 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5255
5256 switch (aux->a_type)
5257 {
5258 case AT_PHDR:
5259 *phdr_memaddr = aux->a_un.a_val;
5260 break;
5261 case AT_PHNUM:
5262 *num_phdr = aux->a_un.a_val;
5263 break;
5264 }
5265 }
5266 }
5267
5268 close (fd);
5269
5270 if (*phdr_memaddr == 0 || *num_phdr == 0)
5271 {
5272 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5273 "phdr_memaddr = %ld, phdr_num = %d",
5274 (long) *phdr_memaddr, *num_phdr);
5275 return 2;
5276 }
5277
5278 return 0;
5279}
5280
5281/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5282
5283static CORE_ADDR
5284get_dynamic (const int pid, const int is_elf64)
5285{
5286 CORE_ADDR phdr_memaddr, relocation;
5287 int num_phdr, i;
5288 unsigned char *phdr_buf;
5289 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5290
5291 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5292 return 0;
5293
5294 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5295 phdr_buf = alloca (num_phdr * phdr_size);
5296
5297 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5298 return 0;
5299
5300 /* Compute relocation: it is expected to be 0 for "regular" executables,
5301 non-zero for PIE ones. */
5302 relocation = -1;
5303 for (i = 0; relocation == -1 && i < num_phdr; i++)
5304 if (is_elf64)
5305 {
5306 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5307
5308 if (p->p_type == PT_PHDR)
5309 relocation = phdr_memaddr - p->p_vaddr;
5310 }
5311 else
5312 {
5313 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5314
5315 if (p->p_type == PT_PHDR)
5316 relocation = phdr_memaddr - p->p_vaddr;
5317 }
5318
5319 if (relocation == -1)
5320 {
5321 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5322 any real world executables, including PIE executables, have always
5323 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5324 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5325 or present DT_DEBUG anyway (fpc binaries are statically linked).
5326
5327 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5328
5329 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5330
5331 return 0;
5332 }
5333
5334 for (i = 0; i < num_phdr; i++)
5335 {
5336 if (is_elf64)
5337 {
5338 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5339
5340 if (p->p_type == PT_DYNAMIC)
5341 return p->p_vaddr + relocation;
5342 }
5343 else
5344 {
5345 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5346
5347 if (p->p_type == PT_DYNAMIC)
5348 return p->p_vaddr + relocation;
5349 }
5350 }
5351
5352 return 0;
5353}
5354
5355/* Return &_r_debug in the inferior, or -1 if not present. Return value
5356 can be 0 if the inferior does not yet have the library list initialized.
5357 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5358 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5359
5360static CORE_ADDR
5361get_r_debug (const int pid, const int is_elf64)
5362{
5363 CORE_ADDR dynamic_memaddr;
5364 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5365 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5366 CORE_ADDR map = -1;
5367
5368 dynamic_memaddr = get_dynamic (pid, is_elf64);
5369 if (dynamic_memaddr == 0)
5370 return map;
5371
5372 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5373 {
5374 if (is_elf64)
5375 {
5376 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5377#ifdef DT_MIPS_RLD_MAP
5378 union
5379 {
5380 Elf64_Xword map;
5381 unsigned char buf[sizeof (Elf64_Xword)];
5382 }
5383 rld_map;
5384
5385 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5386 {
5387 if (linux_read_memory (dyn->d_un.d_val,
5388 rld_map.buf, sizeof (rld_map.buf)) == 0)
5389 return rld_map.map;
5390 else
5391 break;
5392 }
5393#endif /* DT_MIPS_RLD_MAP */
5394
5395 if (dyn->d_tag == DT_DEBUG && map == -1)
5396 map = dyn->d_un.d_val;
5397
5398 if (dyn->d_tag == DT_NULL)
5399 break;
5400 }
5401 else
5402 {
5403 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5404#ifdef DT_MIPS_RLD_MAP
5405 union
5406 {
5407 Elf32_Word map;
5408 unsigned char buf[sizeof (Elf32_Word)];
5409 }
5410 rld_map;
5411
5412 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5413 {
5414 if (linux_read_memory (dyn->d_un.d_val,
5415 rld_map.buf, sizeof (rld_map.buf)) == 0)
5416 return rld_map.map;
5417 else
5418 break;
5419 }
5420#endif /* DT_MIPS_RLD_MAP */
5421
5422 if (dyn->d_tag == DT_DEBUG && map == -1)
5423 map = dyn->d_un.d_val;
5424
5425 if (dyn->d_tag == DT_NULL)
5426 break;
5427 }
5428
5429 dynamic_memaddr += dyn_size;
5430 }
5431
5432 return map;
5433}
5434
5435/* Read one pointer from MEMADDR in the inferior. */
5436
5437static int
5438read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5439{
5440 int ret;
5441
5442 /* Go through a union so this works on either big or little endian
5443 hosts, when the inferior's pointer size is smaller than the size
5444 of CORE_ADDR. It is assumed the inferior's endianness is the
5445 same of the superior's. */
5446 union
5447 {
5448 CORE_ADDR core_addr;
5449 unsigned int ui;
5450 unsigned char uc;
5451 } addr;
5452
5453 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5454 if (ret == 0)
5455 {
5456 if (ptr_size == sizeof (CORE_ADDR))
5457 *ptr = addr.core_addr;
5458 else if (ptr_size == sizeof (unsigned int))
5459 *ptr = addr.ui;
5460 else
5461 gdb_assert_not_reached ("unhandled pointer size");
5462 }
5463 return ret;
5464}
5465
5466struct link_map_offsets
5467 {
5468 /* Offset and size of r_debug.r_version. */
5469 int r_version_offset;
5470
5471 /* Offset and size of r_debug.r_map. */
5472 int r_map_offset;
5473
5474 /* Offset to l_addr field in struct link_map. */
5475 int l_addr_offset;
5476
5477 /* Offset to l_name field in struct link_map. */
5478 int l_name_offset;
5479
5480 /* Offset to l_ld field in struct link_map. */
5481 int l_ld_offset;
5482
5483 /* Offset to l_next field in struct link_map. */
5484 int l_next_offset;
5485
5486 /* Offset to l_prev field in struct link_map. */
5487 int l_prev_offset;
5488 };
5489
5490/* Construct qXfer:libraries-svr4:read reply. */
5491
5492static int
5493linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5494 unsigned const char *writebuf,
5495 CORE_ADDR offset, int len)
5496{
5497 char *document;
5498 unsigned document_len;
5499 struct process_info_private *const priv = current_process ()->private;
5500 char filename[PATH_MAX];
5501 int pid, is_elf64;
5502
5503 static const struct link_map_offsets lmo_32bit_offsets =
5504 {
5505 0, /* r_version offset. */
5506 4, /* r_debug.r_map offset. */
5507 0, /* l_addr offset in link_map. */
5508 4, /* l_name offset in link_map. */
5509 8, /* l_ld offset in link_map. */
5510 12, /* l_next offset in link_map. */
5511 16 /* l_prev offset in link_map. */
5512 };
5513
5514 static const struct link_map_offsets lmo_64bit_offsets =
5515 {
5516 0, /* r_version offset. */
5517 8, /* r_debug.r_map offset. */
5518 0, /* l_addr offset in link_map. */
5519 8, /* l_name offset in link_map. */
5520 16, /* l_ld offset in link_map. */
5521 24, /* l_next offset in link_map. */
5522 32 /* l_prev offset in link_map. */
5523 };
5524 const struct link_map_offsets *lmo;
5525 unsigned int machine;
5526 int ptr_size;
5527 CORE_ADDR lm_addr = 0, lm_prev = 0;
5528 int allocated = 1024;
5529 char *p;
5530 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5531 int header_done = 0;
5532
5533 if (writebuf != NULL)
5534 return -2;
5535 if (readbuf == NULL)
5536 return -1;
5537
5538 pid = lwpid_of (get_thread_lwp (current_inferior));
5539 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5540 is_elf64 = elf_64_file_p (filename, &machine);
5541 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5542 ptr_size = is_elf64 ? 8 : 4;
5543
5544 while (annex[0] != '\0')
5545 {
5546 const char *sep;
5547 CORE_ADDR *addrp;
5548 int len;
5549
5550 sep = strchr (annex, '=');
5551 if (sep == NULL)
5552 break;
5553
5554 len = sep - annex;
5555 if (len == 5 && strncmp (annex, "start", 5) == 0)
5556 addrp = &lm_addr;
5557 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5558 addrp = &lm_prev;
5559 else
5560 {
5561 annex = strchr (sep, ';');
5562 if (annex == NULL)
5563 break;
5564 annex++;
5565 continue;
5566 }
5567
5568 annex = decode_address_to_semicolon (addrp, sep + 1);
5569 }
5570
5571 if (lm_addr == 0)
5572 {
5573 int r_version = 0;
5574
5575 if (priv->r_debug == 0)
5576 priv->r_debug = get_r_debug (pid, is_elf64);
5577
5578 /* We failed to find DT_DEBUG. Such situation will not change
5579 for this inferior - do not retry it. Report it to GDB as
5580 E01, see for the reasons at the GDB solib-svr4.c side. */
5581 if (priv->r_debug == (CORE_ADDR) -1)
5582 return -1;
5583
5584 if (priv->r_debug != 0)
5585 {
5586 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5587 (unsigned char *) &r_version,
5588 sizeof (r_version)) != 0
5589 || r_version != 1)
5590 {
5591 warning ("unexpected r_debug version %d", r_version);
5592 }
5593 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5594 &lm_addr, ptr_size) != 0)
5595 {
5596 warning ("unable to read r_map from 0x%lx",
5597 (long) priv->r_debug + lmo->r_map_offset);
5598 }
5599 }
5600 }
5601
5602 document = xmalloc (allocated);
5603 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5604 p = document + strlen (document);
5605
5606 while (lm_addr
5607 && read_one_ptr (lm_addr + lmo->l_name_offset,
5608 &l_name, ptr_size) == 0
5609 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5610 &l_addr, ptr_size) == 0
5611 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5612 &l_ld, ptr_size) == 0
5613 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5614 &l_prev, ptr_size) == 0
5615 && read_one_ptr (lm_addr + lmo->l_next_offset,
5616 &l_next, ptr_size) == 0)
5617 {
5618 unsigned char libname[PATH_MAX];
5619
5620 if (lm_prev != l_prev)
5621 {
5622 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5623 (long) lm_prev, (long) l_prev);
5624 break;
5625 }
5626
5627 /* Ignore the first entry even if it has valid name as the first entry
5628 corresponds to the main executable. The first entry should not be
5629 skipped if the dynamic loader was loaded late by a static executable
5630 (see solib-svr4.c parameter ignore_first). But in such case the main
5631 executable does not have PT_DYNAMIC present and this function already
5632 exited above due to failed get_r_debug. */
5633 if (lm_prev == 0)
5634 {
5635 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5636 p = p + strlen (p);
5637 }
5638 else
5639 {
5640 /* Not checking for error because reading may stop before
5641 we've got PATH_MAX worth of characters. */
5642 libname[0] = '\0';
5643 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5644 libname[sizeof (libname) - 1] = '\0';
5645 if (libname[0] != '\0')
5646 {
5647 /* 6x the size for xml_escape_text below. */
5648 size_t len = 6 * strlen ((char *) libname);
5649 char *name;
5650
5651 if (!header_done)
5652 {
5653 /* Terminate `<library-list-svr4'. */
5654 *p++ = '>';
5655 header_done = 1;
5656 }
5657
5658 while (allocated < p - document + len + 200)
5659 {
5660 /* Expand to guarantee sufficient storage. */
5661 uintptr_t document_len = p - document;
5662
5663 document = xrealloc (document, 2 * allocated);
5664 allocated *= 2;
5665 p = document + document_len;
5666 }
5667
5668 name = xml_escape_text ((char *) libname);
5669 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5670 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5671 name, (unsigned long) lm_addr,
5672 (unsigned long) l_addr, (unsigned long) l_ld);
5673 free (name);
5674 }
5675 }
5676
5677 lm_prev = lm_addr;
5678 lm_addr = l_next;
5679 }
5680
5681 if (!header_done)
5682 {
5683 /* Empty list; terminate `<library-list-svr4'. */
5684 strcpy (p, "/>");
5685 }
5686 else
5687 strcpy (p, "</library-list-svr4>");
5688
5689 document_len = strlen (document);
5690 if (offset < document_len)
5691 document_len -= offset;
5692 else
5693 document_len = 0;
5694 if (len > document_len)
5695 len = document_len;
5696
5697 memcpy (readbuf, document + offset, len);
5698 xfree (document);
5699
5700 return len;
5701}
5702
5703#ifdef HAVE_LINUX_BTRACE
5704
5705/* Enable branch tracing. */
5706
5707static struct btrace_target_info *
5708linux_low_enable_btrace (ptid_t ptid)
5709{
5710 struct btrace_target_info *tinfo;
5711
5712 tinfo = linux_enable_btrace (ptid);
5713
5714 if (tinfo != NULL)
5715 {
5716 struct thread_info *thread = find_thread_ptid (ptid);
5717 struct regcache *regcache = get_thread_regcache (thread, 0);
5718
5719 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5720 }
5721
5722 return tinfo;
5723}
5724
5725/* Read branch trace data as btrace xml document. */
5726
5727static void
5728linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5729 int type)
5730{
5731 VEC (btrace_block_s) *btrace;
5732 struct btrace_block *block;
5733 int i;
5734
5735 btrace = linux_read_btrace (tinfo, type);
5736
5737 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5738 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5739
5740 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5741 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5742 paddress (block->begin), paddress (block->end));
5743
5744 buffer_grow_str (buffer, "</btrace>\n");
5745
5746 VEC_free (btrace_block_s, btrace);
5747}
5748#endif /* HAVE_LINUX_BTRACE */
5749
5750static struct target_ops linux_target_ops = {
5751 linux_create_inferior,
5752 linux_attach,
5753 linux_kill,
5754 linux_detach,
5755 linux_mourn,
5756 linux_join,
5757 linux_thread_alive,
5758 linux_resume,
5759 linux_wait,
5760 linux_fetch_registers,
5761 linux_store_registers,
5762 linux_prepare_to_access_memory,
5763 linux_done_accessing_memory,
5764 linux_read_memory,
5765 linux_write_memory,
5766 linux_look_up_symbols,
5767 linux_request_interrupt,
5768 linux_read_auxv,
5769 linux_insert_point,
5770 linux_remove_point,
5771 linux_stopped_by_watchpoint,
5772 linux_stopped_data_address,
5773#if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5774 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5775 && defined(PT_TEXT_END_ADDR)
5776 linux_read_offsets,
5777#else
5778 NULL,
5779#endif
5780#ifdef USE_THREAD_DB
5781 thread_db_get_tls_address,
5782#else
5783 NULL,
5784#endif
5785 linux_qxfer_spu,
5786 hostio_last_error_from_errno,
5787 linux_qxfer_osdata,
5788 linux_xfer_siginfo,
5789 linux_supports_non_stop,
5790 linux_async,
5791 linux_start_non_stop,
5792 linux_supports_multi_process,
5793#ifdef USE_THREAD_DB
5794 thread_db_handle_monitor_command,
5795#else
5796 NULL,
5797#endif
5798 linux_common_core_of_thread,
5799 linux_read_loadmap,
5800 linux_process_qsupported,
5801 linux_supports_tracepoints,
5802 linux_read_pc,
5803 linux_write_pc,
5804 linux_thread_stopped,
5805 NULL,
5806 linux_pause_all,
5807 linux_unpause_all,
5808 linux_cancel_breakpoints,
5809 linux_stabilize_threads,
5810 linux_install_fast_tracepoint_jump_pad,
5811 linux_emit_ops,
5812 linux_supports_disable_randomization,
5813 linux_get_min_fast_tracepoint_insn_len,
5814 linux_qxfer_libraries_svr4,
5815 linux_supports_agent,
5816#ifdef HAVE_LINUX_BTRACE
5817 linux_supports_btrace,
5818 linux_low_enable_btrace,
5819 linux_disable_btrace,
5820 linux_low_read_btrace,
5821#else
5822 NULL,
5823 NULL,
5824 NULL,
5825 NULL,
5826#endif
5827 linux_supports_range_stepping,
5828};
5829
5830static void
5831linux_init_signals ()
5832{
5833 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5834 to find what the cancel signal actually is. */
5835#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5836 signal (__SIGRTMIN+1, SIG_IGN);
5837#endif
5838}
5839
5840#ifdef HAVE_LINUX_REGSETS
5841void
5842initialize_regsets_info (struct regsets_info *info)
5843{
5844 for (info->num_regsets = 0;
5845 info->regsets[info->num_regsets].size >= 0;
5846 info->num_regsets++)
5847 ;
5848}
5849#endif
5850
5851void
5852initialize_low (void)
5853{
5854 struct sigaction sigchld_action;
5855 memset (&sigchld_action, 0, sizeof (sigchld_action));
5856 set_target_ops (&linux_target_ops);
5857 set_breakpoint_data (the_low_target.breakpoint,
5858 the_low_target.breakpoint_len);
5859 linux_init_signals ();
5860 linux_ptrace_init_warnings ();
5861
5862 sigchld_action.sa_handler = sigchld_handler;
5863 sigemptyset (&sigchld_action.sa_mask);
5864 sigchld_action.sa_flags = SA_RESTART;
5865 sigaction (SIGCHLD, &sigchld_action, NULL);
5866
5867 initialize_low_arch ();
5868}
This page took 0.041506 seconds and 4 git commands to generate.