gdb/gdbserver/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "gdb_wait.h"
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include "gdb_stat.h"
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 /* PTRACE_TEXT_ADDR and friends. */
82 #include <asm/ptrace.h>
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 #ifndef HAVE_ELF32_AUXV_T
88 /* Copied from glibc's elf.h. */
89 typedef struct
90 {
91 uint32_t a_type; /* Entry type */
92 union
93 {
94 uint32_t a_val; /* Integer value */
95 /* We use to have pointer elements added here. We cannot do that,
96 though, since it does not work when using 32-bit definitions
97 on 64-bit platforms and vice versa. */
98 } a_un;
99 } Elf32_auxv_t;
100 #endif
101
102 #ifndef HAVE_ELF64_AUXV_T
103 /* Copied from glibc's elf.h. */
104 typedef struct
105 {
106 uint64_t a_type; /* Entry type */
107 union
108 {
109 uint64_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114 } Elf64_auxv_t;
115 #endif
116
117 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
118 representation of the thread ID.
119
120 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
121 the same as the LWP ID.
122
123 ``all_processes'' is keyed by the "overall process ID", which
124 GNU/Linux calls tgid, "thread group ID". */
125
126 struct inferior_list all_lwps;
127
128 /* A list of all unknown processes which receive stop signals. Some
129 other process will presumably claim each of these as forked
130 children momentarily. */
131
132 struct simple_pid_list
133 {
134 /* The process ID. */
135 int pid;
136
137 /* The status as reported by waitpid. */
138 int status;
139
140 /* Next in chain. */
141 struct simple_pid_list *next;
142 };
143 struct simple_pid_list *stopped_pids;
144
145 /* Trivial list manipulation functions to keep track of a list of new
146 stopped processes. */
147
148 static void
149 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
150 {
151 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
152
153 new_pid->pid = pid;
154 new_pid->status = status;
155 new_pid->next = *listp;
156 *listp = new_pid;
157 }
158
159 static int
160 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
161 {
162 struct simple_pid_list **p;
163
164 for (p = listp; *p != NULL; p = &(*p)->next)
165 if ((*p)->pid == pid)
166 {
167 struct simple_pid_list *next = (*p)->next;
168
169 *statusp = (*p)->status;
170 xfree (*p);
171 *p = next;
172 return 1;
173 }
174 return 0;
175 }
176
177 enum stopping_threads_kind
178 {
179 /* Not stopping threads presently. */
180 NOT_STOPPING_THREADS,
181
182 /* Stopping threads. */
183 STOPPING_THREADS,
184
185 /* Stopping and suspending threads. */
186 STOPPING_AND_SUSPENDING_THREADS
187 };
188
189 /* This is set while stop_all_lwps is in effect. */
190 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
191
192 /* FIXME make into a target method? */
193 int using_threads = 1;
194
195 /* True if we're presently stabilizing threads (moving them out of
196 jump pads). */
197 static int stabilizing_threads;
198
199 /* This flag is true iff we've just created or attached to our first
200 inferior but it has not stopped yet. As soon as it does, we need
201 to call the low target's arch_setup callback. Doing this only on
202 the first inferior avoids reinializing the architecture on every
203 inferior, and avoids messing with the register caches of the
204 already running inferiors. NOTE: this assumes all inferiors under
205 control of gdbserver have the same architecture. */
206 static int new_inferior;
207
208 static void linux_resume_one_lwp (struct lwp_info *lwp,
209 int step, int signal, siginfo_t *info);
210 static void linux_resume (struct thread_resume *resume_info, size_t n);
211 static void stop_all_lwps (int suspend, struct lwp_info *except);
212 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
213 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
214 static void *add_lwp (ptid_t ptid);
215 static int linux_stopped_by_watchpoint (void);
216 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
217 static void proceed_all_lwps (void);
218 static int finish_step_over (struct lwp_info *lwp);
219 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
220 static int kill_lwp (unsigned long lwpid, int signo);
221 static void linux_enable_event_reporting (int pid);
222
223 /* True if the low target can hardware single-step. Such targets
224 don't need a BREAKPOINT_REINSERT_ADDR callback. */
225
226 static int
227 can_hardware_single_step (void)
228 {
229 return (the_low_target.breakpoint_reinsert_addr == NULL);
230 }
231
232 /* True if the low target supports memory breakpoints. If so, we'll
233 have a GET_PC implementation. */
234
235 static int
236 supports_breakpoints (void)
237 {
238 return (the_low_target.get_pc != NULL);
239 }
240
241 /* Returns true if this target can support fast tracepoints. This
242 does not mean that the in-process agent has been loaded in the
243 inferior. */
244
245 static int
246 supports_fast_tracepoints (void)
247 {
248 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
249 }
250
251 struct pending_signals
252 {
253 int signal;
254 siginfo_t info;
255 struct pending_signals *prev;
256 };
257
258 #ifdef HAVE_LINUX_REGSETS
259 static char *disabled_regsets;
260 static int num_regsets;
261 #endif
262
263 /* The read/write ends of the pipe registered as waitable file in the
264 event loop. */
265 static int linux_event_pipe[2] = { -1, -1 };
266
267 /* True if we're currently in async mode. */
268 #define target_is_async_p() (linux_event_pipe[0] != -1)
269
270 static void send_sigstop (struct lwp_info *lwp);
271 static void wait_for_sigstop (struct inferior_list_entry *entry);
272
273 /* Return non-zero if HEADER is a 64-bit ELF file. */
274
275 static int
276 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
277 {
278 if (header->e_ident[EI_MAG0] == ELFMAG0
279 && header->e_ident[EI_MAG1] == ELFMAG1
280 && header->e_ident[EI_MAG2] == ELFMAG2
281 && header->e_ident[EI_MAG3] == ELFMAG3)
282 {
283 *machine = header->e_machine;
284 return header->e_ident[EI_CLASS] == ELFCLASS64;
285
286 }
287 *machine = EM_NONE;
288 return -1;
289 }
290
291 /* Return non-zero if FILE is a 64-bit ELF file,
292 zero if the file is not a 64-bit ELF file,
293 and -1 if the file is not accessible or doesn't exist. */
294
295 static int
296 elf_64_file_p (const char *file, unsigned int *machine)
297 {
298 Elf64_Ehdr header;
299 int fd;
300
301 fd = open (file, O_RDONLY);
302 if (fd < 0)
303 return -1;
304
305 if (read (fd, &header, sizeof (header)) != sizeof (header))
306 {
307 close (fd);
308 return 0;
309 }
310 close (fd);
311
312 return elf_64_header_p (&header, machine);
313 }
314
315 /* Accepts an integer PID; Returns true if the executable PID is
316 running is a 64-bit ELF file.. */
317
318 int
319 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
320 {
321 char file[MAXPATHLEN];
322
323 sprintf (file, "/proc/%d/exe", pid);
324 return elf_64_file_p (file, machine);
325 }
326
327 static void
328 delete_lwp (struct lwp_info *lwp)
329 {
330 remove_thread (get_lwp_thread (lwp));
331 remove_inferior (&all_lwps, &lwp->head);
332 free (lwp->arch_private);
333 free (lwp);
334 }
335
336 /* Add a process to the common process list, and set its private
337 data. */
338
339 static struct process_info *
340 linux_add_process (int pid, int attached)
341 {
342 struct process_info *proc;
343
344 /* Is this the first process? If so, then set the arch. */
345 if (all_processes.head == NULL)
346 new_inferior = 1;
347
348 proc = add_process (pid, attached);
349 proc->private = xcalloc (1, sizeof (*proc->private));
350
351 if (the_low_target.new_process != NULL)
352 proc->private->arch_private = the_low_target.new_process ();
353
354 return proc;
355 }
356
357 /* Wrapper function for waitpid which handles EINTR, and emulates
358 __WALL for systems where that is not available. */
359
360 static int
361 my_waitpid (int pid, int *status, int flags)
362 {
363 int ret, out_errno;
364
365 if (debug_threads)
366 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
367
368 if (flags & __WALL)
369 {
370 sigset_t block_mask, org_mask, wake_mask;
371 int wnohang;
372
373 wnohang = (flags & WNOHANG) != 0;
374 flags &= ~(__WALL | __WCLONE);
375 flags |= WNOHANG;
376
377 /* Block all signals while here. This avoids knowing about
378 LinuxThread's signals. */
379 sigfillset (&block_mask);
380 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
381
382 /* ... except during the sigsuspend below. */
383 sigemptyset (&wake_mask);
384
385 while (1)
386 {
387 /* Since all signals are blocked, there's no need to check
388 for EINTR here. */
389 ret = waitpid (pid, status, flags);
390 out_errno = errno;
391
392 if (ret == -1 && out_errno != ECHILD)
393 break;
394 else if (ret > 0)
395 break;
396
397 if (flags & __WCLONE)
398 {
399 /* We've tried both flavors now. If WNOHANG is set,
400 there's nothing else to do, just bail out. */
401 if (wnohang)
402 break;
403
404 if (debug_threads)
405 fprintf (stderr, "blocking\n");
406
407 /* Block waiting for signals. */
408 sigsuspend (&wake_mask);
409 }
410
411 flags ^= __WCLONE;
412 }
413
414 sigprocmask (SIG_SETMASK, &org_mask, NULL);
415 }
416 else
417 {
418 do
419 ret = waitpid (pid, status, flags);
420 while (ret == -1 && errno == EINTR);
421 out_errno = errno;
422 }
423
424 if (debug_threads)
425 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
426 pid, flags, status ? *status : -1, ret);
427
428 errno = out_errno;
429 return ret;
430 }
431
432 /* Handle a GNU/Linux extended wait response. If we see a clone
433 event, we need to add the new LWP to our list (and not report the
434 trap to higher layers). */
435
436 static void
437 handle_extended_wait (struct lwp_info *event_child, int wstat)
438 {
439 int event = wstat >> 16;
440 struct lwp_info *new_lwp;
441
442 if (event == PTRACE_EVENT_CLONE)
443 {
444 ptid_t ptid;
445 unsigned long new_pid;
446 int ret, status;
447
448 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
449
450 /* If we haven't already seen the new PID stop, wait for it now. */
451 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
452 {
453 /* The new child has a pending SIGSTOP. We can't affect it until it
454 hits the SIGSTOP, but we're already attached. */
455
456 ret = my_waitpid (new_pid, &status, __WALL);
457
458 if (ret == -1)
459 perror_with_name ("waiting for new child");
460 else if (ret != new_pid)
461 warning ("wait returned unexpected PID %d", ret);
462 else if (!WIFSTOPPED (status))
463 warning ("wait returned unexpected status 0x%x", status);
464 }
465
466 linux_enable_event_reporting (new_pid);
467
468 ptid = ptid_build (pid_of (event_child), new_pid, 0);
469 new_lwp = (struct lwp_info *) add_lwp (ptid);
470 add_thread (ptid, new_lwp);
471
472 /* Either we're going to immediately resume the new thread
473 or leave it stopped. linux_resume_one_lwp is a nop if it
474 thinks the thread is currently running, so set this first
475 before calling linux_resume_one_lwp. */
476 new_lwp->stopped = 1;
477
478 /* If we're suspending all threads, leave this one suspended
479 too. */
480 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
481 new_lwp->suspended = 1;
482
483 /* Normally we will get the pending SIGSTOP. But in some cases
484 we might get another signal delivered to the group first.
485 If we do get another signal, be sure not to lose it. */
486 if (WSTOPSIG (status) == SIGSTOP)
487 {
488 if (stopping_threads != NOT_STOPPING_THREADS)
489 new_lwp->stop_pc = get_stop_pc (new_lwp);
490 else
491 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
492 }
493 else
494 {
495 new_lwp->stop_expected = 1;
496
497 if (stopping_threads != NOT_STOPPING_THREADS)
498 {
499 new_lwp->stop_pc = get_stop_pc (new_lwp);
500 new_lwp->status_pending_p = 1;
501 new_lwp->status_pending = status;
502 }
503 else
504 /* Pass the signal on. This is what GDB does - except
505 shouldn't we really report it instead? */
506 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
507 }
508
509 /* Always resume the current thread. If we are stopping
510 threads, it will have a pending SIGSTOP; we may as well
511 collect it now. */
512 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
513 }
514 }
515
516 /* Return the PC as read from the regcache of LWP, without any
517 adjustment. */
518
519 static CORE_ADDR
520 get_pc (struct lwp_info *lwp)
521 {
522 struct thread_info *saved_inferior;
523 struct regcache *regcache;
524 CORE_ADDR pc;
525
526 if (the_low_target.get_pc == NULL)
527 return 0;
528
529 saved_inferior = current_inferior;
530 current_inferior = get_lwp_thread (lwp);
531
532 regcache = get_thread_regcache (current_inferior, 1);
533 pc = (*the_low_target.get_pc) (regcache);
534
535 if (debug_threads)
536 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
537
538 current_inferior = saved_inferior;
539 return pc;
540 }
541
542 /* This function should only be called if LWP got a SIGTRAP.
543 The SIGTRAP could mean several things.
544
545 On i386, where decr_pc_after_break is non-zero:
546 If we were single-stepping this process using PTRACE_SINGLESTEP,
547 we will get only the one SIGTRAP (even if the instruction we
548 stepped over was a breakpoint). The value of $eip will be the
549 next instruction.
550 If we continue the process using PTRACE_CONT, we will get a
551 SIGTRAP when we hit a breakpoint. The value of $eip will be
552 the instruction after the breakpoint (i.e. needs to be
553 decremented). If we report the SIGTRAP to GDB, we must also
554 report the undecremented PC. If we cancel the SIGTRAP, we
555 must resume at the decremented PC.
556
557 (Presumably, not yet tested) On a non-decr_pc_after_break machine
558 with hardware or kernel single-step:
559 If we single-step over a breakpoint instruction, our PC will
560 point at the following instruction. If we continue and hit a
561 breakpoint instruction, our PC will point at the breakpoint
562 instruction. */
563
564 static CORE_ADDR
565 get_stop_pc (struct lwp_info *lwp)
566 {
567 CORE_ADDR stop_pc;
568
569 if (the_low_target.get_pc == NULL)
570 return 0;
571
572 stop_pc = get_pc (lwp);
573
574 if (WSTOPSIG (lwp->last_status) == SIGTRAP
575 && !lwp->stepping
576 && !lwp->stopped_by_watchpoint
577 && lwp->last_status >> 16 == 0)
578 stop_pc -= the_low_target.decr_pc_after_break;
579
580 if (debug_threads)
581 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
582
583 return stop_pc;
584 }
585
586 static void *
587 add_lwp (ptid_t ptid)
588 {
589 struct lwp_info *lwp;
590
591 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
592 memset (lwp, 0, sizeof (*lwp));
593
594 lwp->head.id = ptid;
595
596 if (the_low_target.new_thread != NULL)
597 lwp->arch_private = the_low_target.new_thread ();
598
599 add_inferior_to_list (&all_lwps, &lwp->head);
600
601 return lwp;
602 }
603
604 /* Start an inferior process and returns its pid.
605 ALLARGS is a vector of program-name and args. */
606
607 static int
608 linux_create_inferior (char *program, char **allargs)
609 {
610 #ifdef HAVE_PERSONALITY
611 int personality_orig = 0, personality_set = 0;
612 #endif
613 struct lwp_info *new_lwp;
614 int pid;
615 ptid_t ptid;
616
617 #ifdef HAVE_PERSONALITY
618 if (disable_randomization)
619 {
620 errno = 0;
621 personality_orig = personality (0xffffffff);
622 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
623 {
624 personality_set = 1;
625 personality (personality_orig | ADDR_NO_RANDOMIZE);
626 }
627 if (errno != 0 || (personality_set
628 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
629 warning ("Error disabling address space randomization: %s",
630 strerror (errno));
631 }
632 #endif
633
634 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
635 pid = vfork ();
636 #else
637 pid = fork ();
638 #endif
639 if (pid < 0)
640 perror_with_name ("fork");
641
642 if (pid == 0)
643 {
644 ptrace (PTRACE_TRACEME, 0, 0, 0);
645
646 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
647 signal (__SIGRTMIN + 1, SIG_DFL);
648 #endif
649
650 setpgid (0, 0);
651
652 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
653 stdout to stderr so that inferior i/o doesn't corrupt the connection.
654 Also, redirect stdin to /dev/null. */
655 if (remote_connection_is_stdio ())
656 {
657 close (0);
658 open ("/dev/null", O_RDONLY);
659 dup2 (2, 1);
660 if (write (2, "stdin/stdout redirected\n",
661 sizeof ("stdin/stdout redirected\n") - 1) < 0)
662 {
663 /* Errors ignored. */;
664 }
665 }
666
667 execv (program, allargs);
668 if (errno == ENOENT)
669 execvp (program, allargs);
670
671 fprintf (stderr, "Cannot exec %s: %s.\n", program,
672 strerror (errno));
673 fflush (stderr);
674 _exit (0177);
675 }
676
677 #ifdef HAVE_PERSONALITY
678 if (personality_set)
679 {
680 errno = 0;
681 personality (personality_orig);
682 if (errno != 0)
683 warning ("Error restoring address space randomization: %s",
684 strerror (errno));
685 }
686 #endif
687
688 linux_add_process (pid, 0);
689
690 ptid = ptid_build (pid, pid, 0);
691 new_lwp = add_lwp (ptid);
692 add_thread (ptid, new_lwp);
693 new_lwp->must_set_ptrace_flags = 1;
694
695 return pid;
696 }
697
698 /* Attach to an inferior process. */
699
700 static void
701 linux_attach_lwp_1 (unsigned long lwpid, int initial)
702 {
703 ptid_t ptid;
704 struct lwp_info *new_lwp;
705
706 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
707 {
708 struct buffer buffer;
709
710 if (!initial)
711 {
712 /* If we fail to attach to an LWP, just warn. */
713 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
714 strerror (errno), errno);
715 fflush (stderr);
716 return;
717 }
718
719 /* If we fail to attach to a process, report an error. */
720 buffer_init (&buffer);
721 linux_ptrace_attach_warnings (lwpid, &buffer);
722 buffer_grow_str0 (&buffer, "");
723 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
724 lwpid, strerror (errno), errno);
725 }
726
727 if (initial)
728 /* If lwp is the tgid, we handle adding existing threads later.
729 Otherwise we just add lwp without bothering about any other
730 threads. */
731 ptid = ptid_build (lwpid, lwpid, 0);
732 else
733 {
734 /* Note that extracting the pid from the current inferior is
735 safe, since we're always called in the context of the same
736 process as this new thread. */
737 int pid = pid_of (get_thread_lwp (current_inferior));
738 ptid = ptid_build (pid, lwpid, 0);
739 }
740
741 new_lwp = (struct lwp_info *) add_lwp (ptid);
742 add_thread (ptid, new_lwp);
743
744 /* We need to wait for SIGSTOP before being able to make the next
745 ptrace call on this LWP. */
746 new_lwp->must_set_ptrace_flags = 1;
747
748 if (linux_proc_pid_is_stopped (lwpid))
749 {
750 if (debug_threads)
751 fprintf (stderr,
752 "Attached to a stopped process\n");
753
754 /* The process is definitely stopped. It is in a job control
755 stop, unless the kernel predates the TASK_STOPPED /
756 TASK_TRACED distinction, in which case it might be in a
757 ptrace stop. Make sure it is in a ptrace stop; from there we
758 can kill it, signal it, et cetera.
759
760 First make sure there is a pending SIGSTOP. Since we are
761 already attached, the process can not transition from stopped
762 to running without a PTRACE_CONT; so we know this signal will
763 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
764 probably already in the queue (unless this kernel is old
765 enough to use TASK_STOPPED for ptrace stops); but since
766 SIGSTOP is not an RT signal, it can only be queued once. */
767 kill_lwp (lwpid, SIGSTOP);
768
769 /* Finally, resume the stopped process. This will deliver the
770 SIGSTOP (or a higher priority signal, just like normal
771 PTRACE_ATTACH), which we'll catch later on. */
772 ptrace (PTRACE_CONT, lwpid, 0, 0);
773 }
774
775 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
776 brings it to a halt.
777
778 There are several cases to consider here:
779
780 1) gdbserver has already attached to the process and is being notified
781 of a new thread that is being created.
782 In this case we should ignore that SIGSTOP and resume the
783 process. This is handled below by setting stop_expected = 1,
784 and the fact that add_thread sets last_resume_kind ==
785 resume_continue.
786
787 2) This is the first thread (the process thread), and we're attaching
788 to it via attach_inferior.
789 In this case we want the process thread to stop.
790 This is handled by having linux_attach set last_resume_kind ==
791 resume_stop after we return.
792
793 If the pid we are attaching to is also the tgid, we attach to and
794 stop all the existing threads. Otherwise, we attach to pid and
795 ignore any other threads in the same group as this pid.
796
797 3) GDB is connecting to gdbserver and is requesting an enumeration of all
798 existing threads.
799 In this case we want the thread to stop.
800 FIXME: This case is currently not properly handled.
801 We should wait for the SIGSTOP but don't. Things work apparently
802 because enough time passes between when we ptrace (ATTACH) and when
803 gdb makes the next ptrace call on the thread.
804
805 On the other hand, if we are currently trying to stop all threads, we
806 should treat the new thread as if we had sent it a SIGSTOP. This works
807 because we are guaranteed that the add_lwp call above added us to the
808 end of the list, and so the new thread has not yet reached
809 wait_for_sigstop (but will). */
810 new_lwp->stop_expected = 1;
811 }
812
813 void
814 linux_attach_lwp (unsigned long lwpid)
815 {
816 linux_attach_lwp_1 (lwpid, 0);
817 }
818
819 /* Attach to PID. If PID is the tgid, attach to it and all
820 of its threads. */
821
822 static int
823 linux_attach (unsigned long pid)
824 {
825 /* Attach to PID. We will check for other threads
826 soon. */
827 linux_attach_lwp_1 (pid, 1);
828 linux_add_process (pid, 1);
829
830 if (!non_stop)
831 {
832 struct thread_info *thread;
833
834 /* Don't ignore the initial SIGSTOP if we just attached to this
835 process. It will be collected by wait shortly. */
836 thread = find_thread_ptid (ptid_build (pid, pid, 0));
837 thread->last_resume_kind = resume_stop;
838 }
839
840 if (linux_proc_get_tgid (pid) == pid)
841 {
842 DIR *dir;
843 char pathname[128];
844
845 sprintf (pathname, "/proc/%ld/task", pid);
846
847 dir = opendir (pathname);
848
849 if (!dir)
850 {
851 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
852 fflush (stderr);
853 }
854 else
855 {
856 /* At this point we attached to the tgid. Scan the task for
857 existing threads. */
858 unsigned long lwp;
859 int new_threads_found;
860 int iterations = 0;
861 struct dirent *dp;
862
863 while (iterations < 2)
864 {
865 new_threads_found = 0;
866 /* Add all the other threads. While we go through the
867 threads, new threads may be spawned. Cycle through
868 the list of threads until we have done two iterations without
869 finding new threads. */
870 while ((dp = readdir (dir)) != NULL)
871 {
872 /* Fetch one lwp. */
873 lwp = strtoul (dp->d_name, NULL, 10);
874
875 /* Is this a new thread? */
876 if (lwp
877 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
878 {
879 linux_attach_lwp_1 (lwp, 0);
880 new_threads_found++;
881
882 if (debug_threads)
883 fprintf (stderr, "\
884 Found and attached to new lwp %ld\n", lwp);
885 }
886 }
887
888 if (!new_threads_found)
889 iterations++;
890 else
891 iterations = 0;
892
893 rewinddir (dir);
894 }
895 closedir (dir);
896 }
897 }
898
899 return 0;
900 }
901
902 struct counter
903 {
904 int pid;
905 int count;
906 };
907
908 static int
909 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
910 {
911 struct counter *counter = args;
912
913 if (ptid_get_pid (entry->id) == counter->pid)
914 {
915 if (++counter->count > 1)
916 return 1;
917 }
918
919 return 0;
920 }
921
922 static int
923 last_thread_of_process_p (struct thread_info *thread)
924 {
925 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
926 int pid = ptid_get_pid (ptid);
927 struct counter counter = { pid , 0 };
928
929 return (find_inferior (&all_threads,
930 second_thread_of_pid_p, &counter) == NULL);
931 }
932
933 /* Kill LWP. */
934
935 static void
936 linux_kill_one_lwp (struct lwp_info *lwp)
937 {
938 int pid = lwpid_of (lwp);
939
940 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
941 there is no signal context, and ptrace(PTRACE_KILL) (or
942 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
943 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
944 alternative is to kill with SIGKILL. We only need one SIGKILL
945 per process, not one for each thread. But since we still support
946 linuxthreads, and we also support debugging programs using raw
947 clone without CLONE_THREAD, we send one for each thread. For
948 years, we used PTRACE_KILL only, so we're being a bit paranoid
949 about some old kernels where PTRACE_KILL might work better
950 (dubious if there are any such, but that's why it's paranoia), so
951 we try SIGKILL first, PTRACE_KILL second, and so we're fine
952 everywhere. */
953
954 errno = 0;
955 kill (pid, SIGKILL);
956 if (debug_threads)
957 fprintf (stderr,
958 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
959 target_pid_to_str (ptid_of (lwp)),
960 errno ? strerror (errno) : "OK");
961
962 errno = 0;
963 ptrace (PTRACE_KILL, pid, 0, 0);
964 if (debug_threads)
965 fprintf (stderr,
966 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
967 target_pid_to_str (ptid_of (lwp)),
968 errno ? strerror (errno) : "OK");
969 }
970
971 /* Callback for `find_inferior'. Kills an lwp of a given process,
972 except the leader. */
973
974 static int
975 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
976 {
977 struct thread_info *thread = (struct thread_info *) entry;
978 struct lwp_info *lwp = get_thread_lwp (thread);
979 int wstat;
980 int pid = * (int *) args;
981
982 if (ptid_get_pid (entry->id) != pid)
983 return 0;
984
985 /* We avoid killing the first thread here, because of a Linux kernel (at
986 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
987 the children get a chance to be reaped, it will remain a zombie
988 forever. */
989
990 if (lwpid_of (lwp) == pid)
991 {
992 if (debug_threads)
993 fprintf (stderr, "lkop: is last of process %s\n",
994 target_pid_to_str (entry->id));
995 return 0;
996 }
997
998 do
999 {
1000 linux_kill_one_lwp (lwp);
1001
1002 /* Make sure it died. The loop is most likely unnecessary. */
1003 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1004 } while (pid > 0 && WIFSTOPPED (wstat));
1005
1006 return 0;
1007 }
1008
1009 static int
1010 linux_kill (int pid)
1011 {
1012 struct process_info *process;
1013 struct lwp_info *lwp;
1014 int wstat;
1015 int lwpid;
1016
1017 process = find_process_pid (pid);
1018 if (process == NULL)
1019 return -1;
1020
1021 /* If we're killing a running inferior, make sure it is stopped
1022 first, as PTRACE_KILL will not work otherwise. */
1023 stop_all_lwps (0, NULL);
1024
1025 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1026
1027 /* See the comment in linux_kill_one_lwp. We did not kill the first
1028 thread in the list, so do so now. */
1029 lwp = find_lwp_pid (pid_to_ptid (pid));
1030
1031 if (lwp == NULL)
1032 {
1033 if (debug_threads)
1034 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1035 lwpid_of (lwp), pid);
1036 }
1037 else
1038 {
1039 if (debug_threads)
1040 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1041 lwpid_of (lwp), pid);
1042
1043 do
1044 {
1045 linux_kill_one_lwp (lwp);
1046
1047 /* Make sure it died. The loop is most likely unnecessary. */
1048 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1049 } while (lwpid > 0 && WIFSTOPPED (wstat));
1050 }
1051
1052 the_target->mourn (process);
1053
1054 /* Since we presently can only stop all lwps of all processes, we
1055 need to unstop lwps of other processes. */
1056 unstop_all_lwps (0, NULL);
1057 return 0;
1058 }
1059
1060 /* Get pending signal of THREAD, for detaching purposes. This is the
1061 signal the thread last stopped for, which we need to deliver to the
1062 thread when detaching, otherwise, it'd be suppressed/lost. */
1063
1064 static int
1065 get_detach_signal (struct thread_info *thread)
1066 {
1067 enum gdb_signal signo = GDB_SIGNAL_0;
1068 int status;
1069 struct lwp_info *lp = get_thread_lwp (thread);
1070
1071 if (lp->status_pending_p)
1072 status = lp->status_pending;
1073 else
1074 {
1075 /* If the thread had been suspended by gdbserver, and it stopped
1076 cleanly, then it'll have stopped with SIGSTOP. But we don't
1077 want to deliver that SIGSTOP. */
1078 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1079 || thread->last_status.value.sig == GDB_SIGNAL_0)
1080 return 0;
1081
1082 /* Otherwise, we may need to deliver the signal we
1083 intercepted. */
1084 status = lp->last_status;
1085 }
1086
1087 if (!WIFSTOPPED (status))
1088 {
1089 if (debug_threads)
1090 fprintf (stderr,
1091 "GPS: lwp %s hasn't stopped: no pending signal\n",
1092 target_pid_to_str (ptid_of (lp)));
1093 return 0;
1094 }
1095
1096 /* Extended wait statuses aren't real SIGTRAPs. */
1097 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1098 {
1099 if (debug_threads)
1100 fprintf (stderr,
1101 "GPS: lwp %s had stopped with extended "
1102 "status: no pending signal\n",
1103 target_pid_to_str (ptid_of (lp)));
1104 return 0;
1105 }
1106
1107 signo = gdb_signal_from_host (WSTOPSIG (status));
1108
1109 if (program_signals_p && !program_signals[signo])
1110 {
1111 if (debug_threads)
1112 fprintf (stderr,
1113 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1114 target_pid_to_str (ptid_of (lp)),
1115 gdb_signal_to_string (signo));
1116 return 0;
1117 }
1118 else if (!program_signals_p
1119 /* If we have no way to know which signals GDB does not
1120 want to have passed to the program, assume
1121 SIGTRAP/SIGINT, which is GDB's default. */
1122 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1123 {
1124 if (debug_threads)
1125 fprintf (stderr,
1126 "GPS: lwp %s had signal %s, "
1127 "but we don't know if we should pass it. Default to not.\n",
1128 target_pid_to_str (ptid_of (lp)),
1129 gdb_signal_to_string (signo));
1130 return 0;
1131 }
1132 else
1133 {
1134 if (debug_threads)
1135 fprintf (stderr,
1136 "GPS: lwp %s has pending signal %s: delivering it.\n",
1137 target_pid_to_str (ptid_of (lp)),
1138 gdb_signal_to_string (signo));
1139
1140 return WSTOPSIG (status);
1141 }
1142 }
1143
1144 static int
1145 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1146 {
1147 struct thread_info *thread = (struct thread_info *) entry;
1148 struct lwp_info *lwp = get_thread_lwp (thread);
1149 int pid = * (int *) args;
1150 int sig;
1151
1152 if (ptid_get_pid (entry->id) != pid)
1153 return 0;
1154
1155 /* If there is a pending SIGSTOP, get rid of it. */
1156 if (lwp->stop_expected)
1157 {
1158 if (debug_threads)
1159 fprintf (stderr,
1160 "Sending SIGCONT to %s\n",
1161 target_pid_to_str (ptid_of (lwp)));
1162
1163 kill_lwp (lwpid_of (lwp), SIGCONT);
1164 lwp->stop_expected = 0;
1165 }
1166
1167 /* Flush any pending changes to the process's registers. */
1168 regcache_invalidate_one ((struct inferior_list_entry *)
1169 get_lwp_thread (lwp));
1170
1171 /* Pass on any pending signal for this thread. */
1172 sig = get_detach_signal (thread);
1173
1174 /* Finally, let it resume. */
1175 if (the_low_target.prepare_to_resume != NULL)
1176 the_low_target.prepare_to_resume (lwp);
1177 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0,
1178 (PTRACE_ARG4_TYPE) (long) sig) < 0)
1179 error (_("Can't detach %s: %s"),
1180 target_pid_to_str (ptid_of (lwp)),
1181 strerror (errno));
1182
1183 delete_lwp (lwp);
1184 return 0;
1185 }
1186
1187 static int
1188 linux_detach (int pid)
1189 {
1190 struct process_info *process;
1191
1192 process = find_process_pid (pid);
1193 if (process == NULL)
1194 return -1;
1195
1196 /* Stop all threads before detaching. First, ptrace requires that
1197 the thread is stopped to sucessfully detach. Second, thread_db
1198 may need to uninstall thread event breakpoints from memory, which
1199 only works with a stopped process anyway. */
1200 stop_all_lwps (0, NULL);
1201
1202 #ifdef USE_THREAD_DB
1203 thread_db_detach (process);
1204 #endif
1205
1206 /* Stabilize threads (move out of jump pads). */
1207 stabilize_threads ();
1208
1209 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1210
1211 the_target->mourn (process);
1212
1213 /* Since we presently can only stop all lwps of all processes, we
1214 need to unstop lwps of other processes. */
1215 unstop_all_lwps (0, NULL);
1216 return 0;
1217 }
1218
1219 /* Remove all LWPs that belong to process PROC from the lwp list. */
1220
1221 static int
1222 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1223 {
1224 struct lwp_info *lwp = (struct lwp_info *) entry;
1225 struct process_info *process = proc;
1226
1227 if (pid_of (lwp) == pid_of (process))
1228 delete_lwp (lwp);
1229
1230 return 0;
1231 }
1232
1233 static void
1234 linux_mourn (struct process_info *process)
1235 {
1236 struct process_info_private *priv;
1237
1238 #ifdef USE_THREAD_DB
1239 thread_db_mourn (process);
1240 #endif
1241
1242 find_inferior (&all_lwps, delete_lwp_callback, process);
1243
1244 /* Freeing all private data. */
1245 priv = process->private;
1246 free (priv->arch_private);
1247 free (priv);
1248 process->private = NULL;
1249
1250 remove_process (process);
1251 }
1252
1253 static void
1254 linux_join (int pid)
1255 {
1256 int status, ret;
1257
1258 do {
1259 ret = my_waitpid (pid, &status, 0);
1260 if (WIFEXITED (status) || WIFSIGNALED (status))
1261 break;
1262 } while (ret != -1 || errno != ECHILD);
1263 }
1264
1265 /* Return nonzero if the given thread is still alive. */
1266 static int
1267 linux_thread_alive (ptid_t ptid)
1268 {
1269 struct lwp_info *lwp = find_lwp_pid (ptid);
1270
1271 /* We assume we always know if a thread exits. If a whole process
1272 exited but we still haven't been able to report it to GDB, we'll
1273 hold on to the last lwp of the dead process. */
1274 if (lwp != NULL)
1275 return !lwp->dead;
1276 else
1277 return 0;
1278 }
1279
1280 /* Return 1 if this lwp has an interesting status pending. */
1281 static int
1282 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1283 {
1284 struct lwp_info *lwp = (struct lwp_info *) entry;
1285 ptid_t ptid = * (ptid_t *) arg;
1286 struct thread_info *thread;
1287
1288 /* Check if we're only interested in events from a specific process
1289 or its lwps. */
1290 if (!ptid_equal (minus_one_ptid, ptid)
1291 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1292 return 0;
1293
1294 thread = get_lwp_thread (lwp);
1295
1296 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1297 report any status pending the LWP may have. */
1298 if (thread->last_resume_kind == resume_stop
1299 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1300 return 0;
1301
1302 return lwp->status_pending_p;
1303 }
1304
1305 static int
1306 same_lwp (struct inferior_list_entry *entry, void *data)
1307 {
1308 ptid_t ptid = *(ptid_t *) data;
1309 int lwp;
1310
1311 if (ptid_get_lwp (ptid) != 0)
1312 lwp = ptid_get_lwp (ptid);
1313 else
1314 lwp = ptid_get_pid (ptid);
1315
1316 if (ptid_get_lwp (entry->id) == lwp)
1317 return 1;
1318
1319 return 0;
1320 }
1321
1322 struct lwp_info *
1323 find_lwp_pid (ptid_t ptid)
1324 {
1325 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1326 }
1327
1328 static struct lwp_info *
1329 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1330 {
1331 int ret;
1332 int to_wait_for = -1;
1333 struct lwp_info *child = NULL;
1334
1335 if (debug_threads)
1336 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1337
1338 if (ptid_equal (ptid, minus_one_ptid))
1339 to_wait_for = -1; /* any child */
1340 else
1341 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1342
1343 options |= __WALL;
1344
1345 retry:
1346
1347 ret = my_waitpid (to_wait_for, wstatp, options);
1348 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1349 return NULL;
1350 else if (ret == -1)
1351 perror_with_name ("waitpid");
1352
1353 if (debug_threads
1354 && (!WIFSTOPPED (*wstatp)
1355 || (WSTOPSIG (*wstatp) != 32
1356 && WSTOPSIG (*wstatp) != 33)))
1357 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1358
1359 child = find_lwp_pid (pid_to_ptid (ret));
1360
1361 /* If we didn't find a process, one of two things presumably happened:
1362 - A process we started and then detached from has exited. Ignore it.
1363 - A process we are controlling has forked and the new child's stop
1364 was reported to us by the kernel. Save its PID. */
1365 if (child == NULL && WIFSTOPPED (*wstatp))
1366 {
1367 add_to_pid_list (&stopped_pids, ret, *wstatp);
1368 goto retry;
1369 }
1370 else if (child == NULL)
1371 goto retry;
1372
1373 child->stopped = 1;
1374
1375 child->last_status = *wstatp;
1376
1377 /* Architecture-specific setup after inferior is running.
1378 This needs to happen after we have attached to the inferior
1379 and it is stopped for the first time, but before we access
1380 any inferior registers. */
1381 if (new_inferior)
1382 {
1383 the_low_target.arch_setup ();
1384 #ifdef HAVE_LINUX_REGSETS
1385 memset (disabled_regsets, 0, num_regsets);
1386 #endif
1387 new_inferior = 0;
1388 }
1389
1390 /* Fetch the possibly triggered data watchpoint info and store it in
1391 CHILD.
1392
1393 On some archs, like x86, that use debug registers to set
1394 watchpoints, it's possible that the way to know which watched
1395 address trapped, is to check the register that is used to select
1396 which address to watch. Problem is, between setting the
1397 watchpoint and reading back which data address trapped, the user
1398 may change the set of watchpoints, and, as a consequence, GDB
1399 changes the debug registers in the inferior. To avoid reading
1400 back a stale stopped-data-address when that happens, we cache in
1401 LP the fact that a watchpoint trapped, and the corresponding data
1402 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1403 changes the debug registers meanwhile, we have the cached data we
1404 can rely on. */
1405
1406 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1407 {
1408 if (the_low_target.stopped_by_watchpoint == NULL)
1409 {
1410 child->stopped_by_watchpoint = 0;
1411 }
1412 else
1413 {
1414 struct thread_info *saved_inferior;
1415
1416 saved_inferior = current_inferior;
1417 current_inferior = get_lwp_thread (child);
1418
1419 child->stopped_by_watchpoint
1420 = the_low_target.stopped_by_watchpoint ();
1421
1422 if (child->stopped_by_watchpoint)
1423 {
1424 if (the_low_target.stopped_data_address != NULL)
1425 child->stopped_data_address
1426 = the_low_target.stopped_data_address ();
1427 else
1428 child->stopped_data_address = 0;
1429 }
1430
1431 current_inferior = saved_inferior;
1432 }
1433 }
1434
1435 /* Store the STOP_PC, with adjustment applied. This depends on the
1436 architecture being defined already (so that CHILD has a valid
1437 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1438 not). */
1439 if (WIFSTOPPED (*wstatp))
1440 child->stop_pc = get_stop_pc (child);
1441
1442 if (debug_threads
1443 && WIFSTOPPED (*wstatp)
1444 && the_low_target.get_pc != NULL)
1445 {
1446 struct thread_info *saved_inferior = current_inferior;
1447 struct regcache *regcache;
1448 CORE_ADDR pc;
1449
1450 current_inferior = get_lwp_thread (child);
1451 regcache = get_thread_regcache (current_inferior, 1);
1452 pc = (*the_low_target.get_pc) (regcache);
1453 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1454 current_inferior = saved_inferior;
1455 }
1456
1457 return child;
1458 }
1459
1460 /* This function should only be called if the LWP got a SIGTRAP.
1461
1462 Handle any tracepoint steps or hits. Return true if a tracepoint
1463 event was handled, 0 otherwise. */
1464
1465 static int
1466 handle_tracepoints (struct lwp_info *lwp)
1467 {
1468 struct thread_info *tinfo = get_lwp_thread (lwp);
1469 int tpoint_related_event = 0;
1470
1471 /* If this tracepoint hit causes a tracing stop, we'll immediately
1472 uninsert tracepoints. To do this, we temporarily pause all
1473 threads, unpatch away, and then unpause threads. We need to make
1474 sure the unpausing doesn't resume LWP too. */
1475 lwp->suspended++;
1476
1477 /* And we need to be sure that any all-threads-stopping doesn't try
1478 to move threads out of the jump pads, as it could deadlock the
1479 inferior (LWP could be in the jump pad, maybe even holding the
1480 lock.) */
1481
1482 /* Do any necessary step collect actions. */
1483 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1484
1485 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1486
1487 /* See if we just hit a tracepoint and do its main collect
1488 actions. */
1489 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1490
1491 lwp->suspended--;
1492
1493 gdb_assert (lwp->suspended == 0);
1494 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1495
1496 if (tpoint_related_event)
1497 {
1498 if (debug_threads)
1499 fprintf (stderr, "got a tracepoint event\n");
1500 return 1;
1501 }
1502
1503 return 0;
1504 }
1505
1506 /* Convenience wrapper. Returns true if LWP is presently collecting a
1507 fast tracepoint. */
1508
1509 static int
1510 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1511 struct fast_tpoint_collect_status *status)
1512 {
1513 CORE_ADDR thread_area;
1514
1515 if (the_low_target.get_thread_area == NULL)
1516 return 0;
1517
1518 /* Get the thread area address. This is used to recognize which
1519 thread is which when tracing with the in-process agent library.
1520 We don't read anything from the address, and treat it as opaque;
1521 it's the address itself that we assume is unique per-thread. */
1522 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1523 return 0;
1524
1525 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1526 }
1527
1528 /* The reason we resume in the caller, is because we want to be able
1529 to pass lwp->status_pending as WSTAT, and we need to clear
1530 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1531 refuses to resume. */
1532
1533 static int
1534 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1535 {
1536 struct thread_info *saved_inferior;
1537
1538 saved_inferior = current_inferior;
1539 current_inferior = get_lwp_thread (lwp);
1540
1541 if ((wstat == NULL
1542 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1543 && supports_fast_tracepoints ()
1544 && agent_loaded_p ())
1545 {
1546 struct fast_tpoint_collect_status status;
1547 int r;
1548
1549 if (debug_threads)
1550 fprintf (stderr, "\
1551 Checking whether LWP %ld needs to move out of the jump pad.\n",
1552 lwpid_of (lwp));
1553
1554 r = linux_fast_tracepoint_collecting (lwp, &status);
1555
1556 if (wstat == NULL
1557 || (WSTOPSIG (*wstat) != SIGILL
1558 && WSTOPSIG (*wstat) != SIGFPE
1559 && WSTOPSIG (*wstat) != SIGSEGV
1560 && WSTOPSIG (*wstat) != SIGBUS))
1561 {
1562 lwp->collecting_fast_tracepoint = r;
1563
1564 if (r != 0)
1565 {
1566 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1567 {
1568 /* Haven't executed the original instruction yet.
1569 Set breakpoint there, and wait till it's hit,
1570 then single-step until exiting the jump pad. */
1571 lwp->exit_jump_pad_bkpt
1572 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1573 }
1574
1575 if (debug_threads)
1576 fprintf (stderr, "\
1577 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1578 lwpid_of (lwp));
1579 current_inferior = saved_inferior;
1580
1581 return 1;
1582 }
1583 }
1584 else
1585 {
1586 /* If we get a synchronous signal while collecting, *and*
1587 while executing the (relocated) original instruction,
1588 reset the PC to point at the tpoint address, before
1589 reporting to GDB. Otherwise, it's an IPA lib bug: just
1590 report the signal to GDB, and pray for the best. */
1591
1592 lwp->collecting_fast_tracepoint = 0;
1593
1594 if (r != 0
1595 && (status.adjusted_insn_addr <= lwp->stop_pc
1596 && lwp->stop_pc < status.adjusted_insn_addr_end))
1597 {
1598 siginfo_t info;
1599 struct regcache *regcache;
1600
1601 /* The si_addr on a few signals references the address
1602 of the faulting instruction. Adjust that as
1603 well. */
1604 if ((WSTOPSIG (*wstat) == SIGILL
1605 || WSTOPSIG (*wstat) == SIGFPE
1606 || WSTOPSIG (*wstat) == SIGBUS
1607 || WSTOPSIG (*wstat) == SIGSEGV)
1608 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1609 /* Final check just to make sure we don't clobber
1610 the siginfo of non-kernel-sent signals. */
1611 && (uintptr_t) info.si_addr == lwp->stop_pc)
1612 {
1613 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1614 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1615 }
1616
1617 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1618 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1619 lwp->stop_pc = status.tpoint_addr;
1620
1621 /* Cancel any fast tracepoint lock this thread was
1622 holding. */
1623 force_unlock_trace_buffer ();
1624 }
1625
1626 if (lwp->exit_jump_pad_bkpt != NULL)
1627 {
1628 if (debug_threads)
1629 fprintf (stderr,
1630 "Cancelling fast exit-jump-pad: removing bkpt. "
1631 "stopping all threads momentarily.\n");
1632
1633 stop_all_lwps (1, lwp);
1634 cancel_breakpoints ();
1635
1636 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1637 lwp->exit_jump_pad_bkpt = NULL;
1638
1639 unstop_all_lwps (1, lwp);
1640
1641 gdb_assert (lwp->suspended >= 0);
1642 }
1643 }
1644 }
1645
1646 if (debug_threads)
1647 fprintf (stderr, "\
1648 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1649 lwpid_of (lwp));
1650
1651 current_inferior = saved_inferior;
1652 return 0;
1653 }
1654
1655 /* Enqueue one signal in the "signals to report later when out of the
1656 jump pad" list. */
1657
1658 static void
1659 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1660 {
1661 struct pending_signals *p_sig;
1662
1663 if (debug_threads)
1664 fprintf (stderr, "\
1665 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1666
1667 if (debug_threads)
1668 {
1669 struct pending_signals *sig;
1670
1671 for (sig = lwp->pending_signals_to_report;
1672 sig != NULL;
1673 sig = sig->prev)
1674 fprintf (stderr,
1675 " Already queued %d\n",
1676 sig->signal);
1677
1678 fprintf (stderr, " (no more currently queued signals)\n");
1679 }
1680
1681 /* Don't enqueue non-RT signals if they are already in the deferred
1682 queue. (SIGSTOP being the easiest signal to see ending up here
1683 twice) */
1684 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1685 {
1686 struct pending_signals *sig;
1687
1688 for (sig = lwp->pending_signals_to_report;
1689 sig != NULL;
1690 sig = sig->prev)
1691 {
1692 if (sig->signal == WSTOPSIG (*wstat))
1693 {
1694 if (debug_threads)
1695 fprintf (stderr,
1696 "Not requeuing already queued non-RT signal %d"
1697 " for LWP %ld\n",
1698 sig->signal,
1699 lwpid_of (lwp));
1700 return;
1701 }
1702 }
1703 }
1704
1705 p_sig = xmalloc (sizeof (*p_sig));
1706 p_sig->prev = lwp->pending_signals_to_report;
1707 p_sig->signal = WSTOPSIG (*wstat);
1708 memset (&p_sig->info, 0, sizeof (siginfo_t));
1709 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1710
1711 lwp->pending_signals_to_report = p_sig;
1712 }
1713
1714 /* Dequeue one signal from the "signals to report later when out of
1715 the jump pad" list. */
1716
1717 static int
1718 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1719 {
1720 if (lwp->pending_signals_to_report != NULL)
1721 {
1722 struct pending_signals **p_sig;
1723
1724 p_sig = &lwp->pending_signals_to_report;
1725 while ((*p_sig)->prev != NULL)
1726 p_sig = &(*p_sig)->prev;
1727
1728 *wstat = W_STOPCODE ((*p_sig)->signal);
1729 if ((*p_sig)->info.si_signo != 0)
1730 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1731 free (*p_sig);
1732 *p_sig = NULL;
1733
1734 if (debug_threads)
1735 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1736 WSTOPSIG (*wstat), lwpid_of (lwp));
1737
1738 if (debug_threads)
1739 {
1740 struct pending_signals *sig;
1741
1742 for (sig = lwp->pending_signals_to_report;
1743 sig != NULL;
1744 sig = sig->prev)
1745 fprintf (stderr,
1746 " Still queued %d\n",
1747 sig->signal);
1748
1749 fprintf (stderr, " (no more queued signals)\n");
1750 }
1751
1752 return 1;
1753 }
1754
1755 return 0;
1756 }
1757
1758 /* Arrange for a breakpoint to be hit again later. We don't keep the
1759 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1760 will handle the current event, eventually we will resume this LWP,
1761 and this breakpoint will trap again. */
1762
1763 static int
1764 cancel_breakpoint (struct lwp_info *lwp)
1765 {
1766 struct thread_info *saved_inferior;
1767
1768 /* There's nothing to do if we don't support breakpoints. */
1769 if (!supports_breakpoints ())
1770 return 0;
1771
1772 /* breakpoint_at reads from current inferior. */
1773 saved_inferior = current_inferior;
1774 current_inferior = get_lwp_thread (lwp);
1775
1776 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1777 {
1778 if (debug_threads)
1779 fprintf (stderr,
1780 "CB: Push back breakpoint for %s\n",
1781 target_pid_to_str (ptid_of (lwp)));
1782
1783 /* Back up the PC if necessary. */
1784 if (the_low_target.decr_pc_after_break)
1785 {
1786 struct regcache *regcache
1787 = get_thread_regcache (current_inferior, 1);
1788 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1789 }
1790
1791 current_inferior = saved_inferior;
1792 return 1;
1793 }
1794 else
1795 {
1796 if (debug_threads)
1797 fprintf (stderr,
1798 "CB: No breakpoint found at %s for [%s]\n",
1799 paddress (lwp->stop_pc),
1800 target_pid_to_str (ptid_of (lwp)));
1801 }
1802
1803 current_inferior = saved_inferior;
1804 return 0;
1805 }
1806
1807 /* When the event-loop is doing a step-over, this points at the thread
1808 being stepped. */
1809 ptid_t step_over_bkpt;
1810
1811 /* Wait for an event from child PID. If PID is -1, wait for any
1812 child. Store the stop status through the status pointer WSTAT.
1813 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1814 event was found and OPTIONS contains WNOHANG. Return the PID of
1815 the stopped child otherwise. */
1816
1817 static int
1818 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1819 {
1820 struct lwp_info *event_child, *requested_child;
1821 ptid_t wait_ptid;
1822
1823 event_child = NULL;
1824 requested_child = NULL;
1825
1826 /* Check for a lwp with a pending status. */
1827
1828 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1829 {
1830 event_child = (struct lwp_info *)
1831 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1832 if (debug_threads && event_child)
1833 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1834 }
1835 else
1836 {
1837 requested_child = find_lwp_pid (ptid);
1838
1839 if (stopping_threads == NOT_STOPPING_THREADS
1840 && requested_child->status_pending_p
1841 && requested_child->collecting_fast_tracepoint)
1842 {
1843 enqueue_one_deferred_signal (requested_child,
1844 &requested_child->status_pending);
1845 requested_child->status_pending_p = 0;
1846 requested_child->status_pending = 0;
1847 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1848 }
1849
1850 if (requested_child->suspended
1851 && requested_child->status_pending_p)
1852 fatal ("requesting an event out of a suspended child?");
1853
1854 if (requested_child->status_pending_p)
1855 event_child = requested_child;
1856 }
1857
1858 if (event_child != NULL)
1859 {
1860 if (debug_threads)
1861 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1862 lwpid_of (event_child), event_child->status_pending);
1863 *wstat = event_child->status_pending;
1864 event_child->status_pending_p = 0;
1865 event_child->status_pending = 0;
1866 current_inferior = get_lwp_thread (event_child);
1867 return lwpid_of (event_child);
1868 }
1869
1870 if (ptid_is_pid (ptid))
1871 {
1872 /* A request to wait for a specific tgid. This is not possible
1873 with waitpid, so instead, we wait for any child, and leave
1874 children we're not interested in right now with a pending
1875 status to report later. */
1876 wait_ptid = minus_one_ptid;
1877 }
1878 else
1879 wait_ptid = ptid;
1880
1881 /* We only enter this loop if no process has a pending wait status. Thus
1882 any action taken in response to a wait status inside this loop is
1883 responding as soon as we detect the status, not after any pending
1884 events. */
1885 while (1)
1886 {
1887 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1888
1889 if ((options & WNOHANG) && event_child == NULL)
1890 {
1891 if (debug_threads)
1892 fprintf (stderr, "WNOHANG set, no event found\n");
1893 return 0;
1894 }
1895
1896 if (event_child == NULL)
1897 error ("event from unknown child");
1898
1899 if (ptid_is_pid (ptid)
1900 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1901 {
1902 if (! WIFSTOPPED (*wstat))
1903 mark_lwp_dead (event_child, *wstat);
1904 else
1905 {
1906 event_child->status_pending_p = 1;
1907 event_child->status_pending = *wstat;
1908 }
1909 continue;
1910 }
1911
1912 current_inferior = get_lwp_thread (event_child);
1913
1914 /* Check for thread exit. */
1915 if (! WIFSTOPPED (*wstat))
1916 {
1917 if (debug_threads)
1918 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1919
1920 /* If the last thread is exiting, just return. */
1921 if (last_thread_of_process_p (current_inferior))
1922 {
1923 if (debug_threads)
1924 fprintf (stderr, "LWP %ld is last lwp of process\n",
1925 lwpid_of (event_child));
1926 return lwpid_of (event_child);
1927 }
1928
1929 if (!non_stop)
1930 {
1931 current_inferior = (struct thread_info *) all_threads.head;
1932 if (debug_threads)
1933 fprintf (stderr, "Current inferior is now %ld\n",
1934 lwpid_of (get_thread_lwp (current_inferior)));
1935 }
1936 else
1937 {
1938 current_inferior = NULL;
1939 if (debug_threads)
1940 fprintf (stderr, "Current inferior is now <NULL>\n");
1941 }
1942
1943 /* If we were waiting for this particular child to do something...
1944 well, it did something. */
1945 if (requested_child != NULL)
1946 {
1947 int lwpid = lwpid_of (event_child);
1948
1949 /* Cancel the step-over operation --- the thread that
1950 started it is gone. */
1951 if (finish_step_over (event_child))
1952 unstop_all_lwps (1, event_child);
1953 delete_lwp (event_child);
1954 return lwpid;
1955 }
1956
1957 delete_lwp (event_child);
1958
1959 /* Wait for a more interesting event. */
1960 continue;
1961 }
1962
1963 if (event_child->must_set_ptrace_flags)
1964 {
1965 linux_enable_event_reporting (lwpid_of (event_child));
1966 event_child->must_set_ptrace_flags = 0;
1967 }
1968
1969 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1970 && *wstat >> 16 != 0)
1971 {
1972 handle_extended_wait (event_child, *wstat);
1973 continue;
1974 }
1975
1976 if (WIFSTOPPED (*wstat)
1977 && WSTOPSIG (*wstat) == SIGSTOP
1978 && event_child->stop_expected)
1979 {
1980 int should_stop;
1981
1982 if (debug_threads)
1983 fprintf (stderr, "Expected stop.\n");
1984 event_child->stop_expected = 0;
1985
1986 should_stop = (current_inferior->last_resume_kind == resume_stop
1987 || stopping_threads != NOT_STOPPING_THREADS);
1988
1989 if (!should_stop)
1990 {
1991 linux_resume_one_lwp (event_child,
1992 event_child->stepping, 0, NULL);
1993 continue;
1994 }
1995 }
1996
1997 return lwpid_of (event_child);
1998 }
1999
2000 /* NOTREACHED */
2001 return 0;
2002 }
2003
2004 /* Count the LWP's that have had events. */
2005
2006 static int
2007 count_events_callback (struct inferior_list_entry *entry, void *data)
2008 {
2009 struct lwp_info *lp = (struct lwp_info *) entry;
2010 struct thread_info *thread = get_lwp_thread (lp);
2011 int *count = data;
2012
2013 gdb_assert (count != NULL);
2014
2015 /* Count only resumed LWPs that have a SIGTRAP event pending that
2016 should be reported to GDB. */
2017 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2018 && thread->last_resume_kind != resume_stop
2019 && lp->status_pending_p
2020 && WIFSTOPPED (lp->status_pending)
2021 && WSTOPSIG (lp->status_pending) == SIGTRAP
2022 && !breakpoint_inserted_here (lp->stop_pc))
2023 (*count)++;
2024
2025 return 0;
2026 }
2027
2028 /* Select the LWP (if any) that is currently being single-stepped. */
2029
2030 static int
2031 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2032 {
2033 struct lwp_info *lp = (struct lwp_info *) entry;
2034 struct thread_info *thread = get_lwp_thread (lp);
2035
2036 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2037 && thread->last_resume_kind == resume_step
2038 && lp->status_pending_p)
2039 return 1;
2040 else
2041 return 0;
2042 }
2043
2044 /* Select the Nth LWP that has had a SIGTRAP event that should be
2045 reported to GDB. */
2046
2047 static int
2048 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2049 {
2050 struct lwp_info *lp = (struct lwp_info *) entry;
2051 struct thread_info *thread = get_lwp_thread (lp);
2052 int *selector = data;
2053
2054 gdb_assert (selector != NULL);
2055
2056 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2057 if (thread->last_resume_kind != resume_stop
2058 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2059 && lp->status_pending_p
2060 && WIFSTOPPED (lp->status_pending)
2061 && WSTOPSIG (lp->status_pending) == SIGTRAP
2062 && !breakpoint_inserted_here (lp->stop_pc))
2063 if ((*selector)-- == 0)
2064 return 1;
2065
2066 return 0;
2067 }
2068
2069 static int
2070 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2071 {
2072 struct lwp_info *lp = (struct lwp_info *) entry;
2073 struct thread_info *thread = get_lwp_thread (lp);
2074 struct lwp_info *event_lp = data;
2075
2076 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2077 if (lp == event_lp)
2078 return 0;
2079
2080 /* If a LWP other than the LWP that we're reporting an event for has
2081 hit a GDB breakpoint (as opposed to some random trap signal),
2082 then just arrange for it to hit it again later. We don't keep
2083 the SIGTRAP status and don't forward the SIGTRAP signal to the
2084 LWP. We will handle the current event, eventually we will resume
2085 all LWPs, and this one will get its breakpoint trap again.
2086
2087 If we do not do this, then we run the risk that the user will
2088 delete or disable the breakpoint, but the LWP will have already
2089 tripped on it. */
2090
2091 if (thread->last_resume_kind != resume_stop
2092 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2093 && lp->status_pending_p
2094 && WIFSTOPPED (lp->status_pending)
2095 && WSTOPSIG (lp->status_pending) == SIGTRAP
2096 && !lp->stepping
2097 && !lp->stopped_by_watchpoint
2098 && cancel_breakpoint (lp))
2099 /* Throw away the SIGTRAP. */
2100 lp->status_pending_p = 0;
2101
2102 return 0;
2103 }
2104
2105 static void
2106 linux_cancel_breakpoints (void)
2107 {
2108 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2109 }
2110
2111 /* Select one LWP out of those that have events pending. */
2112
2113 static void
2114 select_event_lwp (struct lwp_info **orig_lp)
2115 {
2116 int num_events = 0;
2117 int random_selector;
2118 struct lwp_info *event_lp;
2119
2120 /* Give preference to any LWP that is being single-stepped. */
2121 event_lp
2122 = (struct lwp_info *) find_inferior (&all_lwps,
2123 select_singlestep_lwp_callback, NULL);
2124 if (event_lp != NULL)
2125 {
2126 if (debug_threads)
2127 fprintf (stderr,
2128 "SEL: Select single-step %s\n",
2129 target_pid_to_str (ptid_of (event_lp)));
2130 }
2131 else
2132 {
2133 /* No single-stepping LWP. Select one at random, out of those
2134 which have had SIGTRAP events. */
2135
2136 /* First see how many SIGTRAP events we have. */
2137 find_inferior (&all_lwps, count_events_callback, &num_events);
2138
2139 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2140 random_selector = (int)
2141 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2142
2143 if (debug_threads && num_events > 1)
2144 fprintf (stderr,
2145 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2146 num_events, random_selector);
2147
2148 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2149 select_event_lwp_callback,
2150 &random_selector);
2151 }
2152
2153 if (event_lp != NULL)
2154 {
2155 /* Switch the event LWP. */
2156 *orig_lp = event_lp;
2157 }
2158 }
2159
2160 /* Decrement the suspend count of an LWP. */
2161
2162 static int
2163 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2164 {
2165 struct lwp_info *lwp = (struct lwp_info *) entry;
2166
2167 /* Ignore EXCEPT. */
2168 if (lwp == except)
2169 return 0;
2170
2171 lwp->suspended--;
2172
2173 gdb_assert (lwp->suspended >= 0);
2174 return 0;
2175 }
2176
2177 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2178 NULL. */
2179
2180 static void
2181 unsuspend_all_lwps (struct lwp_info *except)
2182 {
2183 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2184 }
2185
2186 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2187 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2188 void *data);
2189 static int lwp_running (struct inferior_list_entry *entry, void *data);
2190 static ptid_t linux_wait_1 (ptid_t ptid,
2191 struct target_waitstatus *ourstatus,
2192 int target_options);
2193
2194 /* Stabilize threads (move out of jump pads).
2195
2196 If a thread is midway collecting a fast tracepoint, we need to
2197 finish the collection and move it out of the jump pad before
2198 reporting the signal.
2199
2200 This avoids recursion while collecting (when a signal arrives
2201 midway, and the signal handler itself collects), which would trash
2202 the trace buffer. In case the user set a breakpoint in a signal
2203 handler, this avoids the backtrace showing the jump pad, etc..
2204 Most importantly, there are certain things we can't do safely if
2205 threads are stopped in a jump pad (or in its callee's). For
2206 example:
2207
2208 - starting a new trace run. A thread still collecting the
2209 previous run, could trash the trace buffer when resumed. The trace
2210 buffer control structures would have been reset but the thread had
2211 no way to tell. The thread could even midway memcpy'ing to the
2212 buffer, which would mean that when resumed, it would clobber the
2213 trace buffer that had been set for a new run.
2214
2215 - we can't rewrite/reuse the jump pads for new tracepoints
2216 safely. Say you do tstart while a thread is stopped midway while
2217 collecting. When the thread is later resumed, it finishes the
2218 collection, and returns to the jump pad, to execute the original
2219 instruction that was under the tracepoint jump at the time the
2220 older run had been started. If the jump pad had been rewritten
2221 since for something else in the new run, the thread would now
2222 execute the wrong / random instructions. */
2223
2224 static void
2225 linux_stabilize_threads (void)
2226 {
2227 struct thread_info *save_inferior;
2228 struct lwp_info *lwp_stuck;
2229
2230 lwp_stuck
2231 = (struct lwp_info *) find_inferior (&all_lwps,
2232 stuck_in_jump_pad_callback, NULL);
2233 if (lwp_stuck != NULL)
2234 {
2235 if (debug_threads)
2236 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2237 lwpid_of (lwp_stuck));
2238 return;
2239 }
2240
2241 save_inferior = current_inferior;
2242
2243 stabilizing_threads = 1;
2244
2245 /* Kick 'em all. */
2246 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2247
2248 /* Loop until all are stopped out of the jump pads. */
2249 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2250 {
2251 struct target_waitstatus ourstatus;
2252 struct lwp_info *lwp;
2253 int wstat;
2254
2255 /* Note that we go through the full wait even loop. While
2256 moving threads out of jump pad, we need to be able to step
2257 over internal breakpoints and such. */
2258 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2259
2260 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2261 {
2262 lwp = get_thread_lwp (current_inferior);
2263
2264 /* Lock it. */
2265 lwp->suspended++;
2266
2267 if (ourstatus.value.sig != GDB_SIGNAL_0
2268 || current_inferior->last_resume_kind == resume_stop)
2269 {
2270 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2271 enqueue_one_deferred_signal (lwp, &wstat);
2272 }
2273 }
2274 }
2275
2276 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2277
2278 stabilizing_threads = 0;
2279
2280 current_inferior = save_inferior;
2281
2282 if (debug_threads)
2283 {
2284 lwp_stuck
2285 = (struct lwp_info *) find_inferior (&all_lwps,
2286 stuck_in_jump_pad_callback, NULL);
2287 if (lwp_stuck != NULL)
2288 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2289 lwpid_of (lwp_stuck));
2290 }
2291 }
2292
2293 /* Wait for process, returns status. */
2294
2295 static ptid_t
2296 linux_wait_1 (ptid_t ptid,
2297 struct target_waitstatus *ourstatus, int target_options)
2298 {
2299 int w;
2300 struct lwp_info *event_child;
2301 int options;
2302 int pid;
2303 int step_over_finished;
2304 int bp_explains_trap;
2305 int maybe_internal_trap;
2306 int report_to_gdb;
2307 int trace_event;
2308
2309 /* Translate generic target options into linux options. */
2310 options = __WALL;
2311 if (target_options & TARGET_WNOHANG)
2312 options |= WNOHANG;
2313
2314 retry:
2315 bp_explains_trap = 0;
2316 trace_event = 0;
2317 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2318
2319 /* If we were only supposed to resume one thread, only wait for
2320 that thread - if it's still alive. If it died, however - which
2321 can happen if we're coming from the thread death case below -
2322 then we need to make sure we restart the other threads. We could
2323 pick a thread at random or restart all; restarting all is less
2324 arbitrary. */
2325 if (!non_stop
2326 && !ptid_equal (cont_thread, null_ptid)
2327 && !ptid_equal (cont_thread, minus_one_ptid))
2328 {
2329 struct thread_info *thread;
2330
2331 thread = (struct thread_info *) find_inferior_id (&all_threads,
2332 cont_thread);
2333
2334 /* No stepping, no signal - unless one is pending already, of course. */
2335 if (thread == NULL)
2336 {
2337 struct thread_resume resume_info;
2338 resume_info.thread = minus_one_ptid;
2339 resume_info.kind = resume_continue;
2340 resume_info.sig = 0;
2341 linux_resume (&resume_info, 1);
2342 }
2343 else
2344 ptid = cont_thread;
2345 }
2346
2347 if (ptid_equal (step_over_bkpt, null_ptid))
2348 pid = linux_wait_for_event (ptid, &w, options);
2349 else
2350 {
2351 if (debug_threads)
2352 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2353 target_pid_to_str (step_over_bkpt));
2354 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2355 }
2356
2357 if (pid == 0) /* only if TARGET_WNOHANG */
2358 return null_ptid;
2359
2360 event_child = get_thread_lwp (current_inferior);
2361
2362 /* If we are waiting for a particular child, and it exited,
2363 linux_wait_for_event will return its exit status. Similarly if
2364 the last child exited. If this is not the last child, however,
2365 do not report it as exited until there is a 'thread exited' response
2366 available in the remote protocol. Instead, just wait for another event.
2367 This should be safe, because if the thread crashed we will already
2368 have reported the termination signal to GDB; that should stop any
2369 in-progress stepping operations, etc.
2370
2371 Report the exit status of the last thread to exit. This matches
2372 LinuxThreads' behavior. */
2373
2374 if (last_thread_of_process_p (current_inferior))
2375 {
2376 if (WIFEXITED (w) || WIFSIGNALED (w))
2377 {
2378 if (WIFEXITED (w))
2379 {
2380 ourstatus->kind = TARGET_WAITKIND_EXITED;
2381 ourstatus->value.integer = WEXITSTATUS (w);
2382
2383 if (debug_threads)
2384 fprintf (stderr,
2385 "\nChild exited with retcode = %x \n",
2386 WEXITSTATUS (w));
2387 }
2388 else
2389 {
2390 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2391 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2392
2393 if (debug_threads)
2394 fprintf (stderr,
2395 "\nChild terminated with signal = %x \n",
2396 WTERMSIG (w));
2397
2398 }
2399
2400 return ptid_of (event_child);
2401 }
2402 }
2403 else
2404 {
2405 if (!WIFSTOPPED (w))
2406 goto retry;
2407 }
2408
2409 /* If this event was not handled before, and is not a SIGTRAP, we
2410 report it. SIGILL and SIGSEGV are also treated as traps in case
2411 a breakpoint is inserted at the current PC. If this target does
2412 not support internal breakpoints at all, we also report the
2413 SIGTRAP without further processing; it's of no concern to us. */
2414 maybe_internal_trap
2415 = (supports_breakpoints ()
2416 && (WSTOPSIG (w) == SIGTRAP
2417 || ((WSTOPSIG (w) == SIGILL
2418 || WSTOPSIG (w) == SIGSEGV)
2419 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2420
2421 if (maybe_internal_trap)
2422 {
2423 /* Handle anything that requires bookkeeping before deciding to
2424 report the event or continue waiting. */
2425
2426 /* First check if we can explain the SIGTRAP with an internal
2427 breakpoint, or if we should possibly report the event to GDB.
2428 Do this before anything that may remove or insert a
2429 breakpoint. */
2430 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2431
2432 /* We have a SIGTRAP, possibly a step-over dance has just
2433 finished. If so, tweak the state machine accordingly,
2434 reinsert breakpoints and delete any reinsert (software
2435 single-step) breakpoints. */
2436 step_over_finished = finish_step_over (event_child);
2437
2438 /* Now invoke the callbacks of any internal breakpoints there. */
2439 check_breakpoints (event_child->stop_pc);
2440
2441 /* Handle tracepoint data collecting. This may overflow the
2442 trace buffer, and cause a tracing stop, removing
2443 breakpoints. */
2444 trace_event = handle_tracepoints (event_child);
2445
2446 if (bp_explains_trap)
2447 {
2448 /* If we stepped or ran into an internal breakpoint, we've
2449 already handled it. So next time we resume (from this
2450 PC), we should step over it. */
2451 if (debug_threads)
2452 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2453
2454 if (breakpoint_here (event_child->stop_pc))
2455 event_child->need_step_over = 1;
2456 }
2457 }
2458 else
2459 {
2460 /* We have some other signal, possibly a step-over dance was in
2461 progress, and it should be cancelled too. */
2462 step_over_finished = finish_step_over (event_child);
2463 }
2464
2465 /* We have all the data we need. Either report the event to GDB, or
2466 resume threads and keep waiting for more. */
2467
2468 /* If we're collecting a fast tracepoint, finish the collection and
2469 move out of the jump pad before delivering a signal. See
2470 linux_stabilize_threads. */
2471
2472 if (WIFSTOPPED (w)
2473 && WSTOPSIG (w) != SIGTRAP
2474 && supports_fast_tracepoints ()
2475 && agent_loaded_p ())
2476 {
2477 if (debug_threads)
2478 fprintf (stderr,
2479 "Got signal %d for LWP %ld. Check if we need "
2480 "to defer or adjust it.\n",
2481 WSTOPSIG (w), lwpid_of (event_child));
2482
2483 /* Allow debugging the jump pad itself. */
2484 if (current_inferior->last_resume_kind != resume_step
2485 && maybe_move_out_of_jump_pad (event_child, &w))
2486 {
2487 enqueue_one_deferred_signal (event_child, &w);
2488
2489 if (debug_threads)
2490 fprintf (stderr,
2491 "Signal %d for LWP %ld deferred (in jump pad)\n",
2492 WSTOPSIG (w), lwpid_of (event_child));
2493
2494 linux_resume_one_lwp (event_child, 0, 0, NULL);
2495 goto retry;
2496 }
2497 }
2498
2499 if (event_child->collecting_fast_tracepoint)
2500 {
2501 if (debug_threads)
2502 fprintf (stderr, "\
2503 LWP %ld was trying to move out of the jump pad (%d). \
2504 Check if we're already there.\n",
2505 lwpid_of (event_child),
2506 event_child->collecting_fast_tracepoint);
2507
2508 trace_event = 1;
2509
2510 event_child->collecting_fast_tracepoint
2511 = linux_fast_tracepoint_collecting (event_child, NULL);
2512
2513 if (event_child->collecting_fast_tracepoint != 1)
2514 {
2515 /* No longer need this breakpoint. */
2516 if (event_child->exit_jump_pad_bkpt != NULL)
2517 {
2518 if (debug_threads)
2519 fprintf (stderr,
2520 "No longer need exit-jump-pad bkpt; removing it."
2521 "stopping all threads momentarily.\n");
2522
2523 /* Other running threads could hit this breakpoint.
2524 We don't handle moribund locations like GDB does,
2525 instead we always pause all threads when removing
2526 breakpoints, so that any step-over or
2527 decr_pc_after_break adjustment is always taken
2528 care of while the breakpoint is still
2529 inserted. */
2530 stop_all_lwps (1, event_child);
2531 cancel_breakpoints ();
2532
2533 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2534 event_child->exit_jump_pad_bkpt = NULL;
2535
2536 unstop_all_lwps (1, event_child);
2537
2538 gdb_assert (event_child->suspended >= 0);
2539 }
2540 }
2541
2542 if (event_child->collecting_fast_tracepoint == 0)
2543 {
2544 if (debug_threads)
2545 fprintf (stderr,
2546 "fast tracepoint finished "
2547 "collecting successfully.\n");
2548
2549 /* We may have a deferred signal to report. */
2550 if (dequeue_one_deferred_signal (event_child, &w))
2551 {
2552 if (debug_threads)
2553 fprintf (stderr, "dequeued one signal.\n");
2554 }
2555 else
2556 {
2557 if (debug_threads)
2558 fprintf (stderr, "no deferred signals.\n");
2559
2560 if (stabilizing_threads)
2561 {
2562 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2563 ourstatus->value.sig = GDB_SIGNAL_0;
2564 return ptid_of (event_child);
2565 }
2566 }
2567 }
2568 }
2569
2570 /* Check whether GDB would be interested in this event. */
2571
2572 /* If GDB is not interested in this signal, don't stop other
2573 threads, and don't report it to GDB. Just resume the inferior
2574 right away. We do this for threading-related signals as well as
2575 any that GDB specifically requested we ignore. But never ignore
2576 SIGSTOP if we sent it ourselves, and do not ignore signals when
2577 stepping - they may require special handling to skip the signal
2578 handler. */
2579 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2580 thread library? */
2581 if (WIFSTOPPED (w)
2582 && current_inferior->last_resume_kind != resume_step
2583 && (
2584 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2585 (current_process ()->private->thread_db != NULL
2586 && (WSTOPSIG (w) == __SIGRTMIN
2587 || WSTOPSIG (w) == __SIGRTMIN + 1))
2588 ||
2589 #endif
2590 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2591 && !(WSTOPSIG (w) == SIGSTOP
2592 && current_inferior->last_resume_kind == resume_stop))))
2593 {
2594 siginfo_t info, *info_p;
2595
2596 if (debug_threads)
2597 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2598 WSTOPSIG (w), lwpid_of (event_child));
2599
2600 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2601 info_p = &info;
2602 else
2603 info_p = NULL;
2604 linux_resume_one_lwp (event_child, event_child->stepping,
2605 WSTOPSIG (w), info_p);
2606 goto retry;
2607 }
2608
2609 /* If GDB wanted this thread to single step, we always want to
2610 report the SIGTRAP, and let GDB handle it. Watchpoints should
2611 always be reported. So should signals we can't explain. A
2612 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2613 not support Z0 breakpoints. If we do, we're be able to handle
2614 GDB breakpoints on top of internal breakpoints, by handling the
2615 internal breakpoint and still reporting the event to GDB. If we
2616 don't, we're out of luck, GDB won't see the breakpoint hit. */
2617 report_to_gdb = (!maybe_internal_trap
2618 || current_inferior->last_resume_kind == resume_step
2619 || event_child->stopped_by_watchpoint
2620 || (!step_over_finished
2621 && !bp_explains_trap && !trace_event)
2622 || (gdb_breakpoint_here (event_child->stop_pc)
2623 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2624 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2625
2626 run_breakpoint_commands (event_child->stop_pc);
2627
2628 /* We found no reason GDB would want us to stop. We either hit one
2629 of our own breakpoints, or finished an internal step GDB
2630 shouldn't know about. */
2631 if (!report_to_gdb)
2632 {
2633 if (debug_threads)
2634 {
2635 if (bp_explains_trap)
2636 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2637 if (step_over_finished)
2638 fprintf (stderr, "Step-over finished.\n");
2639 if (trace_event)
2640 fprintf (stderr, "Tracepoint event.\n");
2641 }
2642
2643 /* We're not reporting this breakpoint to GDB, so apply the
2644 decr_pc_after_break adjustment to the inferior's regcache
2645 ourselves. */
2646
2647 if (the_low_target.set_pc != NULL)
2648 {
2649 struct regcache *regcache
2650 = get_thread_regcache (get_lwp_thread (event_child), 1);
2651 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2652 }
2653
2654 /* We may have finished stepping over a breakpoint. If so,
2655 we've stopped and suspended all LWPs momentarily except the
2656 stepping one. This is where we resume them all again. We're
2657 going to keep waiting, so use proceed, which handles stepping
2658 over the next breakpoint. */
2659 if (debug_threads)
2660 fprintf (stderr, "proceeding all threads.\n");
2661
2662 if (step_over_finished)
2663 unsuspend_all_lwps (event_child);
2664
2665 proceed_all_lwps ();
2666 goto retry;
2667 }
2668
2669 if (debug_threads)
2670 {
2671 if (current_inferior->last_resume_kind == resume_step)
2672 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2673 if (event_child->stopped_by_watchpoint)
2674 fprintf (stderr, "Stopped by watchpoint.\n");
2675 if (gdb_breakpoint_here (event_child->stop_pc))
2676 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2677 if (debug_threads)
2678 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2679 }
2680
2681 /* Alright, we're going to report a stop. */
2682
2683 if (!non_stop && !stabilizing_threads)
2684 {
2685 /* In all-stop, stop all threads. */
2686 stop_all_lwps (0, NULL);
2687
2688 /* If we're not waiting for a specific LWP, choose an event LWP
2689 from among those that have had events. Giving equal priority
2690 to all LWPs that have had events helps prevent
2691 starvation. */
2692 if (ptid_equal (ptid, minus_one_ptid))
2693 {
2694 event_child->status_pending_p = 1;
2695 event_child->status_pending = w;
2696
2697 select_event_lwp (&event_child);
2698
2699 event_child->status_pending_p = 0;
2700 w = event_child->status_pending;
2701 }
2702
2703 /* Now that we've selected our final event LWP, cancel any
2704 breakpoints in other LWPs that have hit a GDB breakpoint.
2705 See the comment in cancel_breakpoints_callback to find out
2706 why. */
2707 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2708
2709 /* If we were going a step-over, all other threads but the stepping one
2710 had been paused in start_step_over, with their suspend counts
2711 incremented. We don't want to do a full unstop/unpause, because we're
2712 in all-stop mode (so we want threads stopped), but we still need to
2713 unsuspend the other threads, to decrement their `suspended' count
2714 back. */
2715 if (step_over_finished)
2716 unsuspend_all_lwps (event_child);
2717
2718 /* Stabilize threads (move out of jump pads). */
2719 stabilize_threads ();
2720 }
2721 else
2722 {
2723 /* If we just finished a step-over, then all threads had been
2724 momentarily paused. In all-stop, that's fine, we want
2725 threads stopped by now anyway. In non-stop, we need to
2726 re-resume threads that GDB wanted to be running. */
2727 if (step_over_finished)
2728 unstop_all_lwps (1, event_child);
2729 }
2730
2731 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2732
2733 if (current_inferior->last_resume_kind == resume_stop
2734 && WSTOPSIG (w) == SIGSTOP)
2735 {
2736 /* A thread that has been requested to stop by GDB with vCont;t,
2737 and it stopped cleanly, so report as SIG0. The use of
2738 SIGSTOP is an implementation detail. */
2739 ourstatus->value.sig = GDB_SIGNAL_0;
2740 }
2741 else if (current_inferior->last_resume_kind == resume_stop
2742 && WSTOPSIG (w) != SIGSTOP)
2743 {
2744 /* A thread that has been requested to stop by GDB with vCont;t,
2745 but, it stopped for other reasons. */
2746 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2747 }
2748 else
2749 {
2750 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2751 }
2752
2753 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2754
2755 if (debug_threads)
2756 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2757 target_pid_to_str (ptid_of (event_child)),
2758 ourstatus->kind,
2759 ourstatus->value.sig);
2760
2761 return ptid_of (event_child);
2762 }
2763
2764 /* Get rid of any pending event in the pipe. */
2765 static void
2766 async_file_flush (void)
2767 {
2768 int ret;
2769 char buf;
2770
2771 do
2772 ret = read (linux_event_pipe[0], &buf, 1);
2773 while (ret >= 0 || (ret == -1 && errno == EINTR));
2774 }
2775
2776 /* Put something in the pipe, so the event loop wakes up. */
2777 static void
2778 async_file_mark (void)
2779 {
2780 int ret;
2781
2782 async_file_flush ();
2783
2784 do
2785 ret = write (linux_event_pipe[1], "+", 1);
2786 while (ret == 0 || (ret == -1 && errno == EINTR));
2787
2788 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2789 be awakened anyway. */
2790 }
2791
2792 static ptid_t
2793 linux_wait (ptid_t ptid,
2794 struct target_waitstatus *ourstatus, int target_options)
2795 {
2796 ptid_t event_ptid;
2797
2798 if (debug_threads)
2799 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2800
2801 /* Flush the async file first. */
2802 if (target_is_async_p ())
2803 async_file_flush ();
2804
2805 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2806
2807 /* If at least one stop was reported, there may be more. A single
2808 SIGCHLD can signal more than one child stop. */
2809 if (target_is_async_p ()
2810 && (target_options & TARGET_WNOHANG) != 0
2811 && !ptid_equal (event_ptid, null_ptid))
2812 async_file_mark ();
2813
2814 return event_ptid;
2815 }
2816
2817 /* Send a signal to an LWP. */
2818
2819 static int
2820 kill_lwp (unsigned long lwpid, int signo)
2821 {
2822 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2823 fails, then we are not using nptl threads and we should be using kill. */
2824
2825 #ifdef __NR_tkill
2826 {
2827 static int tkill_failed;
2828
2829 if (!tkill_failed)
2830 {
2831 int ret;
2832
2833 errno = 0;
2834 ret = syscall (__NR_tkill, lwpid, signo);
2835 if (errno != ENOSYS)
2836 return ret;
2837 tkill_failed = 1;
2838 }
2839 }
2840 #endif
2841
2842 return kill (lwpid, signo);
2843 }
2844
2845 void
2846 linux_stop_lwp (struct lwp_info *lwp)
2847 {
2848 send_sigstop (lwp);
2849 }
2850
2851 static void
2852 send_sigstop (struct lwp_info *lwp)
2853 {
2854 int pid;
2855
2856 pid = lwpid_of (lwp);
2857
2858 /* If we already have a pending stop signal for this process, don't
2859 send another. */
2860 if (lwp->stop_expected)
2861 {
2862 if (debug_threads)
2863 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2864
2865 return;
2866 }
2867
2868 if (debug_threads)
2869 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2870
2871 lwp->stop_expected = 1;
2872 kill_lwp (pid, SIGSTOP);
2873 }
2874
2875 static int
2876 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2877 {
2878 struct lwp_info *lwp = (struct lwp_info *) entry;
2879
2880 /* Ignore EXCEPT. */
2881 if (lwp == except)
2882 return 0;
2883
2884 if (lwp->stopped)
2885 return 0;
2886
2887 send_sigstop (lwp);
2888 return 0;
2889 }
2890
2891 /* Increment the suspend count of an LWP, and stop it, if not stopped
2892 yet. */
2893 static int
2894 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2895 void *except)
2896 {
2897 struct lwp_info *lwp = (struct lwp_info *) entry;
2898
2899 /* Ignore EXCEPT. */
2900 if (lwp == except)
2901 return 0;
2902
2903 lwp->suspended++;
2904
2905 return send_sigstop_callback (entry, except);
2906 }
2907
2908 static void
2909 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2910 {
2911 /* It's dead, really. */
2912 lwp->dead = 1;
2913
2914 /* Store the exit status for later. */
2915 lwp->status_pending_p = 1;
2916 lwp->status_pending = wstat;
2917
2918 /* Prevent trying to stop it. */
2919 lwp->stopped = 1;
2920
2921 /* No further stops are expected from a dead lwp. */
2922 lwp->stop_expected = 0;
2923 }
2924
2925 static void
2926 wait_for_sigstop (struct inferior_list_entry *entry)
2927 {
2928 struct lwp_info *lwp = (struct lwp_info *) entry;
2929 struct thread_info *saved_inferior;
2930 int wstat;
2931 ptid_t saved_tid;
2932 ptid_t ptid;
2933 int pid;
2934
2935 if (lwp->stopped)
2936 {
2937 if (debug_threads)
2938 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2939 lwpid_of (lwp));
2940 return;
2941 }
2942
2943 saved_inferior = current_inferior;
2944 if (saved_inferior != NULL)
2945 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2946 else
2947 saved_tid = null_ptid; /* avoid bogus unused warning */
2948
2949 ptid = lwp->head.id;
2950
2951 if (debug_threads)
2952 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2953
2954 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2955
2956 /* If we stopped with a non-SIGSTOP signal, save it for later
2957 and record the pending SIGSTOP. If the process exited, just
2958 return. */
2959 if (WIFSTOPPED (wstat))
2960 {
2961 if (debug_threads)
2962 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2963 lwpid_of (lwp), WSTOPSIG (wstat));
2964
2965 if (WSTOPSIG (wstat) != SIGSTOP)
2966 {
2967 if (debug_threads)
2968 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2969 lwpid_of (lwp), wstat);
2970
2971 lwp->status_pending_p = 1;
2972 lwp->status_pending = wstat;
2973 }
2974 }
2975 else
2976 {
2977 if (debug_threads)
2978 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2979
2980 lwp = find_lwp_pid (pid_to_ptid (pid));
2981 if (lwp)
2982 {
2983 /* Leave this status pending for the next time we're able to
2984 report it. In the mean time, we'll report this lwp as
2985 dead to GDB, so GDB doesn't try to read registers and
2986 memory from it. This can only happen if this was the
2987 last thread of the process; otherwise, PID is removed
2988 from the thread tables before linux_wait_for_event
2989 returns. */
2990 mark_lwp_dead (lwp, wstat);
2991 }
2992 }
2993
2994 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2995 current_inferior = saved_inferior;
2996 else
2997 {
2998 if (debug_threads)
2999 fprintf (stderr, "Previously current thread died.\n");
3000
3001 if (non_stop)
3002 {
3003 /* We can't change the current inferior behind GDB's back,
3004 otherwise, a subsequent command may apply to the wrong
3005 process. */
3006 current_inferior = NULL;
3007 }
3008 else
3009 {
3010 /* Set a valid thread as current. */
3011 set_desired_inferior (0);
3012 }
3013 }
3014 }
3015
3016 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3017 move it out, because we need to report the stop event to GDB. For
3018 example, if the user puts a breakpoint in the jump pad, it's
3019 because she wants to debug it. */
3020
3021 static int
3022 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3023 {
3024 struct lwp_info *lwp = (struct lwp_info *) entry;
3025 struct thread_info *thread = get_lwp_thread (lwp);
3026
3027 gdb_assert (lwp->suspended == 0);
3028 gdb_assert (lwp->stopped);
3029
3030 /* Allow debugging the jump pad, gdb_collect, etc.. */
3031 return (supports_fast_tracepoints ()
3032 && agent_loaded_p ()
3033 && (gdb_breakpoint_here (lwp->stop_pc)
3034 || lwp->stopped_by_watchpoint
3035 || thread->last_resume_kind == resume_step)
3036 && linux_fast_tracepoint_collecting (lwp, NULL));
3037 }
3038
3039 static void
3040 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3041 {
3042 struct lwp_info *lwp = (struct lwp_info *) entry;
3043 struct thread_info *thread = get_lwp_thread (lwp);
3044 int *wstat;
3045
3046 gdb_assert (lwp->suspended == 0);
3047 gdb_assert (lwp->stopped);
3048
3049 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3050
3051 /* Allow debugging the jump pad, gdb_collect, etc. */
3052 if (!gdb_breakpoint_here (lwp->stop_pc)
3053 && !lwp->stopped_by_watchpoint
3054 && thread->last_resume_kind != resume_step
3055 && maybe_move_out_of_jump_pad (lwp, wstat))
3056 {
3057 if (debug_threads)
3058 fprintf (stderr,
3059 "LWP %ld needs stabilizing (in jump pad)\n",
3060 lwpid_of (lwp));
3061
3062 if (wstat)
3063 {
3064 lwp->status_pending_p = 0;
3065 enqueue_one_deferred_signal (lwp, wstat);
3066
3067 if (debug_threads)
3068 fprintf (stderr,
3069 "Signal %d for LWP %ld deferred "
3070 "(in jump pad)\n",
3071 WSTOPSIG (*wstat), lwpid_of (lwp));
3072 }
3073
3074 linux_resume_one_lwp (lwp, 0, 0, NULL);
3075 }
3076 else
3077 lwp->suspended++;
3078 }
3079
3080 static int
3081 lwp_running (struct inferior_list_entry *entry, void *data)
3082 {
3083 struct lwp_info *lwp = (struct lwp_info *) entry;
3084
3085 if (lwp->dead)
3086 return 0;
3087 if (lwp->stopped)
3088 return 0;
3089 return 1;
3090 }
3091
3092 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3093 If SUSPEND, then also increase the suspend count of every LWP,
3094 except EXCEPT. */
3095
3096 static void
3097 stop_all_lwps (int suspend, struct lwp_info *except)
3098 {
3099 /* Should not be called recursively. */
3100 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3101
3102 stopping_threads = (suspend
3103 ? STOPPING_AND_SUSPENDING_THREADS
3104 : STOPPING_THREADS);
3105
3106 if (suspend)
3107 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3108 else
3109 find_inferior (&all_lwps, send_sigstop_callback, except);
3110 for_each_inferior (&all_lwps, wait_for_sigstop);
3111 stopping_threads = NOT_STOPPING_THREADS;
3112 }
3113
3114 /* Resume execution of the inferior process.
3115 If STEP is nonzero, single-step it.
3116 If SIGNAL is nonzero, give it that signal. */
3117
3118 static void
3119 linux_resume_one_lwp (struct lwp_info *lwp,
3120 int step, int signal, siginfo_t *info)
3121 {
3122 struct thread_info *saved_inferior;
3123 int fast_tp_collecting;
3124
3125 if (lwp->stopped == 0)
3126 return;
3127
3128 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3129
3130 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3131
3132 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3133 user used the "jump" command, or "set $pc = foo"). */
3134 if (lwp->stop_pc != get_pc (lwp))
3135 {
3136 /* Collecting 'while-stepping' actions doesn't make sense
3137 anymore. */
3138 release_while_stepping_state_list (get_lwp_thread (lwp));
3139 }
3140
3141 /* If we have pending signals or status, and a new signal, enqueue the
3142 signal. Also enqueue the signal if we are waiting to reinsert a
3143 breakpoint; it will be picked up again below. */
3144 if (signal != 0
3145 && (lwp->status_pending_p
3146 || lwp->pending_signals != NULL
3147 || lwp->bp_reinsert != 0
3148 || fast_tp_collecting))
3149 {
3150 struct pending_signals *p_sig;
3151 p_sig = xmalloc (sizeof (*p_sig));
3152 p_sig->prev = lwp->pending_signals;
3153 p_sig->signal = signal;
3154 if (info == NULL)
3155 memset (&p_sig->info, 0, sizeof (siginfo_t));
3156 else
3157 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3158 lwp->pending_signals = p_sig;
3159 }
3160
3161 if (lwp->status_pending_p)
3162 {
3163 if (debug_threads)
3164 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3165 " has pending status\n",
3166 lwpid_of (lwp), step ? "step" : "continue", signal,
3167 lwp->stop_expected ? "expected" : "not expected");
3168 return;
3169 }
3170
3171 saved_inferior = current_inferior;
3172 current_inferior = get_lwp_thread (lwp);
3173
3174 if (debug_threads)
3175 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3176 lwpid_of (lwp), step ? "step" : "continue", signal,
3177 lwp->stop_expected ? "expected" : "not expected");
3178
3179 /* This bit needs some thinking about. If we get a signal that
3180 we must report while a single-step reinsert is still pending,
3181 we often end up resuming the thread. It might be better to
3182 (ew) allow a stack of pending events; then we could be sure that
3183 the reinsert happened right away and not lose any signals.
3184
3185 Making this stack would also shrink the window in which breakpoints are
3186 uninserted (see comment in linux_wait_for_lwp) but not enough for
3187 complete correctness, so it won't solve that problem. It may be
3188 worthwhile just to solve this one, however. */
3189 if (lwp->bp_reinsert != 0)
3190 {
3191 if (debug_threads)
3192 fprintf (stderr, " pending reinsert at 0x%s\n",
3193 paddress (lwp->bp_reinsert));
3194
3195 if (can_hardware_single_step ())
3196 {
3197 if (fast_tp_collecting == 0)
3198 {
3199 if (step == 0)
3200 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3201 if (lwp->suspended)
3202 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3203 lwp->suspended);
3204 }
3205
3206 step = 1;
3207 }
3208
3209 /* Postpone any pending signal. It was enqueued above. */
3210 signal = 0;
3211 }
3212
3213 if (fast_tp_collecting == 1)
3214 {
3215 if (debug_threads)
3216 fprintf (stderr, "\
3217 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3218 lwpid_of (lwp));
3219
3220 /* Postpone any pending signal. It was enqueued above. */
3221 signal = 0;
3222 }
3223 else if (fast_tp_collecting == 2)
3224 {
3225 if (debug_threads)
3226 fprintf (stderr, "\
3227 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3228 lwpid_of (lwp));
3229
3230 if (can_hardware_single_step ())
3231 step = 1;
3232 else
3233 fatal ("moving out of jump pad single-stepping"
3234 " not implemented on this target");
3235
3236 /* Postpone any pending signal. It was enqueued above. */
3237 signal = 0;
3238 }
3239
3240 /* If we have while-stepping actions in this thread set it stepping.
3241 If we have a signal to deliver, it may or may not be set to
3242 SIG_IGN, we don't know. Assume so, and allow collecting
3243 while-stepping into a signal handler. A possible smart thing to
3244 do would be to set an internal breakpoint at the signal return
3245 address, continue, and carry on catching this while-stepping
3246 action only when that breakpoint is hit. A future
3247 enhancement. */
3248 if (get_lwp_thread (lwp)->while_stepping != NULL
3249 && can_hardware_single_step ())
3250 {
3251 if (debug_threads)
3252 fprintf (stderr,
3253 "lwp %ld has a while-stepping action -> forcing step.\n",
3254 lwpid_of (lwp));
3255 step = 1;
3256 }
3257
3258 if (debug_threads && the_low_target.get_pc != NULL)
3259 {
3260 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3261 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3262 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3263 }
3264
3265 /* If we have pending signals, consume one unless we are trying to
3266 reinsert a breakpoint or we're trying to finish a fast tracepoint
3267 collect. */
3268 if (lwp->pending_signals != NULL
3269 && lwp->bp_reinsert == 0
3270 && fast_tp_collecting == 0)
3271 {
3272 struct pending_signals **p_sig;
3273
3274 p_sig = &lwp->pending_signals;
3275 while ((*p_sig)->prev != NULL)
3276 p_sig = &(*p_sig)->prev;
3277
3278 signal = (*p_sig)->signal;
3279 if ((*p_sig)->info.si_signo != 0)
3280 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3281
3282 free (*p_sig);
3283 *p_sig = NULL;
3284 }
3285
3286 if (the_low_target.prepare_to_resume != NULL)
3287 the_low_target.prepare_to_resume (lwp);
3288
3289 regcache_invalidate_one ((struct inferior_list_entry *)
3290 get_lwp_thread (lwp));
3291 errno = 0;
3292 lwp->stopped = 0;
3293 lwp->stopped_by_watchpoint = 0;
3294 lwp->stepping = step;
3295 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3296 /* Coerce to a uintptr_t first to avoid potential gcc warning
3297 of coercing an 8 byte integer to a 4 byte pointer. */
3298 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3299
3300 current_inferior = saved_inferior;
3301 if (errno)
3302 {
3303 /* ESRCH from ptrace either means that the thread was already
3304 running (an error) or that it is gone (a race condition). If
3305 it's gone, we will get a notification the next time we wait,
3306 so we can ignore the error. We could differentiate these
3307 two, but it's tricky without waiting; the thread still exists
3308 as a zombie, so sending it signal 0 would succeed. So just
3309 ignore ESRCH. */
3310 if (errno == ESRCH)
3311 return;
3312
3313 perror_with_name ("ptrace");
3314 }
3315 }
3316
3317 struct thread_resume_array
3318 {
3319 struct thread_resume *resume;
3320 size_t n;
3321 };
3322
3323 /* This function is called once per thread. We look up the thread
3324 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3325 resume request.
3326
3327 This algorithm is O(threads * resume elements), but resume elements
3328 is small (and will remain small at least until GDB supports thread
3329 suspension). */
3330 static int
3331 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3332 {
3333 struct lwp_info *lwp;
3334 struct thread_info *thread;
3335 int ndx;
3336 struct thread_resume_array *r;
3337
3338 thread = (struct thread_info *) entry;
3339 lwp = get_thread_lwp (thread);
3340 r = arg;
3341
3342 for (ndx = 0; ndx < r->n; ndx++)
3343 {
3344 ptid_t ptid = r->resume[ndx].thread;
3345 if (ptid_equal (ptid, minus_one_ptid)
3346 || ptid_equal (ptid, entry->id)
3347 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3348 of PID'. */
3349 || (ptid_get_pid (ptid) == pid_of (lwp)
3350 && (ptid_is_pid (ptid)
3351 || ptid_get_lwp (ptid) == -1)))
3352 {
3353 if (r->resume[ndx].kind == resume_stop
3354 && thread->last_resume_kind == resume_stop)
3355 {
3356 if (debug_threads)
3357 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3358 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3359 ? "stopped"
3360 : "stopping",
3361 lwpid_of (lwp));
3362
3363 continue;
3364 }
3365
3366 lwp->resume = &r->resume[ndx];
3367 thread->last_resume_kind = lwp->resume->kind;
3368
3369 /* If we had a deferred signal to report, dequeue one now.
3370 This can happen if LWP gets more than one signal while
3371 trying to get out of a jump pad. */
3372 if (lwp->stopped
3373 && !lwp->status_pending_p
3374 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3375 {
3376 lwp->status_pending_p = 1;
3377
3378 if (debug_threads)
3379 fprintf (stderr,
3380 "Dequeueing deferred signal %d for LWP %ld, "
3381 "leaving status pending.\n",
3382 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3383 }
3384
3385 return 0;
3386 }
3387 }
3388
3389 /* No resume action for this thread. */
3390 lwp->resume = NULL;
3391
3392 return 0;
3393 }
3394
3395
3396 /* Set *FLAG_P if this lwp has an interesting status pending. */
3397 static int
3398 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3399 {
3400 struct lwp_info *lwp = (struct lwp_info *) entry;
3401
3402 /* LWPs which will not be resumed are not interesting, because
3403 we might not wait for them next time through linux_wait. */
3404 if (lwp->resume == NULL)
3405 return 0;
3406
3407 if (lwp->status_pending_p)
3408 * (int *) flag_p = 1;
3409
3410 return 0;
3411 }
3412
3413 /* Return 1 if this lwp that GDB wants running is stopped at an
3414 internal breakpoint that we need to step over. It assumes that any
3415 required STOP_PC adjustment has already been propagated to the
3416 inferior's regcache. */
3417
3418 static int
3419 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3420 {
3421 struct lwp_info *lwp = (struct lwp_info *) entry;
3422 struct thread_info *thread;
3423 struct thread_info *saved_inferior;
3424 CORE_ADDR pc;
3425
3426 /* LWPs which will not be resumed are not interesting, because we
3427 might not wait for them next time through linux_wait. */
3428
3429 if (!lwp->stopped)
3430 {
3431 if (debug_threads)
3432 fprintf (stderr,
3433 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3434 lwpid_of (lwp));
3435 return 0;
3436 }
3437
3438 thread = get_lwp_thread (lwp);
3439
3440 if (thread->last_resume_kind == resume_stop)
3441 {
3442 if (debug_threads)
3443 fprintf (stderr,
3444 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3445 lwpid_of (lwp));
3446 return 0;
3447 }
3448
3449 gdb_assert (lwp->suspended >= 0);
3450
3451 if (lwp->suspended)
3452 {
3453 if (debug_threads)
3454 fprintf (stderr,
3455 "Need step over [LWP %ld]? Ignoring, suspended\n",
3456 lwpid_of (lwp));
3457 return 0;
3458 }
3459
3460 if (!lwp->need_step_over)
3461 {
3462 if (debug_threads)
3463 fprintf (stderr,
3464 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3465 }
3466
3467 if (lwp->status_pending_p)
3468 {
3469 if (debug_threads)
3470 fprintf (stderr,
3471 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3472 lwpid_of (lwp));
3473 return 0;
3474 }
3475
3476 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3477 or we have. */
3478 pc = get_pc (lwp);
3479
3480 /* If the PC has changed since we stopped, then don't do anything,
3481 and let the breakpoint/tracepoint be hit. This happens if, for
3482 instance, GDB handled the decr_pc_after_break subtraction itself,
3483 GDB is OOL stepping this thread, or the user has issued a "jump"
3484 command, or poked thread's registers herself. */
3485 if (pc != lwp->stop_pc)
3486 {
3487 if (debug_threads)
3488 fprintf (stderr,
3489 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3490 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3491 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3492
3493 lwp->need_step_over = 0;
3494 return 0;
3495 }
3496
3497 saved_inferior = current_inferior;
3498 current_inferior = thread;
3499
3500 /* We can only step over breakpoints we know about. */
3501 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3502 {
3503 /* Don't step over a breakpoint that GDB expects to hit
3504 though. If the condition is being evaluated on the target's side
3505 and it evaluate to false, step over this breakpoint as well. */
3506 if (gdb_breakpoint_here (pc)
3507 && gdb_condition_true_at_breakpoint (pc)
3508 && gdb_no_commands_at_breakpoint (pc))
3509 {
3510 if (debug_threads)
3511 fprintf (stderr,
3512 "Need step over [LWP %ld]? yes, but found"
3513 " GDB breakpoint at 0x%s; skipping step over\n",
3514 lwpid_of (lwp), paddress (pc));
3515
3516 current_inferior = saved_inferior;
3517 return 0;
3518 }
3519 else
3520 {
3521 if (debug_threads)
3522 fprintf (stderr,
3523 "Need step over [LWP %ld]? yes, "
3524 "found breakpoint at 0x%s\n",
3525 lwpid_of (lwp), paddress (pc));
3526
3527 /* We've found an lwp that needs stepping over --- return 1 so
3528 that find_inferior stops looking. */
3529 current_inferior = saved_inferior;
3530
3531 /* If the step over is cancelled, this is set again. */
3532 lwp->need_step_over = 0;
3533 return 1;
3534 }
3535 }
3536
3537 current_inferior = saved_inferior;
3538
3539 if (debug_threads)
3540 fprintf (stderr,
3541 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3542 lwpid_of (lwp), paddress (pc));
3543
3544 return 0;
3545 }
3546
3547 /* Start a step-over operation on LWP. When LWP stopped at a
3548 breakpoint, to make progress, we need to remove the breakpoint out
3549 of the way. If we let other threads run while we do that, they may
3550 pass by the breakpoint location and miss hitting it. To avoid
3551 that, a step-over momentarily stops all threads while LWP is
3552 single-stepped while the breakpoint is temporarily uninserted from
3553 the inferior. When the single-step finishes, we reinsert the
3554 breakpoint, and let all threads that are supposed to be running,
3555 run again.
3556
3557 On targets that don't support hardware single-step, we don't
3558 currently support full software single-stepping. Instead, we only
3559 support stepping over the thread event breakpoint, by asking the
3560 low target where to place a reinsert breakpoint. Since this
3561 routine assumes the breakpoint being stepped over is a thread event
3562 breakpoint, it usually assumes the return address of the current
3563 function is a good enough place to set the reinsert breakpoint. */
3564
3565 static int
3566 start_step_over (struct lwp_info *lwp)
3567 {
3568 struct thread_info *saved_inferior;
3569 CORE_ADDR pc;
3570 int step;
3571
3572 if (debug_threads)
3573 fprintf (stderr,
3574 "Starting step-over on LWP %ld. Stopping all threads\n",
3575 lwpid_of (lwp));
3576
3577 stop_all_lwps (1, lwp);
3578 gdb_assert (lwp->suspended == 0);
3579
3580 if (debug_threads)
3581 fprintf (stderr, "Done stopping all threads for step-over.\n");
3582
3583 /* Note, we should always reach here with an already adjusted PC,
3584 either by GDB (if we're resuming due to GDB's request), or by our
3585 caller, if we just finished handling an internal breakpoint GDB
3586 shouldn't care about. */
3587 pc = get_pc (lwp);
3588
3589 saved_inferior = current_inferior;
3590 current_inferior = get_lwp_thread (lwp);
3591
3592 lwp->bp_reinsert = pc;
3593 uninsert_breakpoints_at (pc);
3594 uninsert_fast_tracepoint_jumps_at (pc);
3595
3596 if (can_hardware_single_step ())
3597 {
3598 step = 1;
3599 }
3600 else
3601 {
3602 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3603 set_reinsert_breakpoint (raddr);
3604 step = 0;
3605 }
3606
3607 current_inferior = saved_inferior;
3608
3609 linux_resume_one_lwp (lwp, step, 0, NULL);
3610
3611 /* Require next event from this LWP. */
3612 step_over_bkpt = lwp->head.id;
3613 return 1;
3614 }
3615
3616 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3617 start_step_over, if still there, and delete any reinsert
3618 breakpoints we've set, on non hardware single-step targets. */
3619
3620 static int
3621 finish_step_over (struct lwp_info *lwp)
3622 {
3623 if (lwp->bp_reinsert != 0)
3624 {
3625 if (debug_threads)
3626 fprintf (stderr, "Finished step over.\n");
3627
3628 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3629 may be no breakpoint to reinsert there by now. */
3630 reinsert_breakpoints_at (lwp->bp_reinsert);
3631 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3632
3633 lwp->bp_reinsert = 0;
3634
3635 /* Delete any software-single-step reinsert breakpoints. No
3636 longer needed. We don't have to worry about other threads
3637 hitting this trap, and later not being able to explain it,
3638 because we were stepping over a breakpoint, and we hold all
3639 threads but LWP stopped while doing that. */
3640 if (!can_hardware_single_step ())
3641 delete_reinsert_breakpoints ();
3642
3643 step_over_bkpt = null_ptid;
3644 return 1;
3645 }
3646 else
3647 return 0;
3648 }
3649
3650 /* This function is called once per thread. We check the thread's resume
3651 request, which will tell us whether to resume, step, or leave the thread
3652 stopped; and what signal, if any, it should be sent.
3653
3654 For threads which we aren't explicitly told otherwise, we preserve
3655 the stepping flag; this is used for stepping over gdbserver-placed
3656 breakpoints.
3657
3658 If pending_flags was set in any thread, we queue any needed
3659 signals, since we won't actually resume. We already have a pending
3660 event to report, so we don't need to preserve any step requests;
3661 they should be re-issued if necessary. */
3662
3663 static int
3664 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3665 {
3666 struct lwp_info *lwp;
3667 struct thread_info *thread;
3668 int step;
3669 int leave_all_stopped = * (int *) arg;
3670 int leave_pending;
3671
3672 thread = (struct thread_info *) entry;
3673 lwp = get_thread_lwp (thread);
3674
3675 if (lwp->resume == NULL)
3676 return 0;
3677
3678 if (lwp->resume->kind == resume_stop)
3679 {
3680 if (debug_threads)
3681 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3682
3683 if (!lwp->stopped)
3684 {
3685 if (debug_threads)
3686 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3687
3688 /* Stop the thread, and wait for the event asynchronously,
3689 through the event loop. */
3690 send_sigstop (lwp);
3691 }
3692 else
3693 {
3694 if (debug_threads)
3695 fprintf (stderr, "already stopped LWP %ld\n",
3696 lwpid_of (lwp));
3697
3698 /* The LWP may have been stopped in an internal event that
3699 was not meant to be notified back to GDB (e.g., gdbserver
3700 breakpoint), so we should be reporting a stop event in
3701 this case too. */
3702
3703 /* If the thread already has a pending SIGSTOP, this is a
3704 no-op. Otherwise, something later will presumably resume
3705 the thread and this will cause it to cancel any pending
3706 operation, due to last_resume_kind == resume_stop. If
3707 the thread already has a pending status to report, we
3708 will still report it the next time we wait - see
3709 status_pending_p_callback. */
3710
3711 /* If we already have a pending signal to report, then
3712 there's no need to queue a SIGSTOP, as this means we're
3713 midway through moving the LWP out of the jumppad, and we
3714 will report the pending signal as soon as that is
3715 finished. */
3716 if (lwp->pending_signals_to_report == NULL)
3717 send_sigstop (lwp);
3718 }
3719
3720 /* For stop requests, we're done. */
3721 lwp->resume = NULL;
3722 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3723 return 0;
3724 }
3725
3726 /* If this thread which is about to be resumed has a pending status,
3727 then don't resume any threads - we can just report the pending
3728 status. Make sure to queue any signals that would otherwise be
3729 sent. In all-stop mode, we do this decision based on if *any*
3730 thread has a pending status. If there's a thread that needs the
3731 step-over-breakpoint dance, then don't resume any other thread
3732 but that particular one. */
3733 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3734
3735 if (!leave_pending)
3736 {
3737 if (debug_threads)
3738 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3739
3740 step = (lwp->resume->kind == resume_step);
3741 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3742 }
3743 else
3744 {
3745 if (debug_threads)
3746 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3747
3748 /* If we have a new signal, enqueue the signal. */
3749 if (lwp->resume->sig != 0)
3750 {
3751 struct pending_signals *p_sig;
3752 p_sig = xmalloc (sizeof (*p_sig));
3753 p_sig->prev = lwp->pending_signals;
3754 p_sig->signal = lwp->resume->sig;
3755 memset (&p_sig->info, 0, sizeof (siginfo_t));
3756
3757 /* If this is the same signal we were previously stopped by,
3758 make sure to queue its siginfo. We can ignore the return
3759 value of ptrace; if it fails, we'll skip
3760 PTRACE_SETSIGINFO. */
3761 if (WIFSTOPPED (lwp->last_status)
3762 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3763 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3764
3765 lwp->pending_signals = p_sig;
3766 }
3767 }
3768
3769 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3770 lwp->resume = NULL;
3771 return 0;
3772 }
3773
3774 static void
3775 linux_resume (struct thread_resume *resume_info, size_t n)
3776 {
3777 struct thread_resume_array array = { resume_info, n };
3778 struct lwp_info *need_step_over = NULL;
3779 int any_pending;
3780 int leave_all_stopped;
3781
3782 find_inferior (&all_threads, linux_set_resume_request, &array);
3783
3784 /* If there is a thread which would otherwise be resumed, which has
3785 a pending status, then don't resume any threads - we can just
3786 report the pending status. Make sure to queue any signals that
3787 would otherwise be sent. In non-stop mode, we'll apply this
3788 logic to each thread individually. We consume all pending events
3789 before considering to start a step-over (in all-stop). */
3790 any_pending = 0;
3791 if (!non_stop)
3792 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3793
3794 /* If there is a thread which would otherwise be resumed, which is
3795 stopped at a breakpoint that needs stepping over, then don't
3796 resume any threads - have it step over the breakpoint with all
3797 other threads stopped, then resume all threads again. Make sure
3798 to queue any signals that would otherwise be delivered or
3799 queued. */
3800 if (!any_pending && supports_breakpoints ())
3801 need_step_over
3802 = (struct lwp_info *) find_inferior (&all_lwps,
3803 need_step_over_p, NULL);
3804
3805 leave_all_stopped = (need_step_over != NULL || any_pending);
3806
3807 if (debug_threads)
3808 {
3809 if (need_step_over != NULL)
3810 fprintf (stderr, "Not resuming all, need step over\n");
3811 else if (any_pending)
3812 fprintf (stderr,
3813 "Not resuming, all-stop and found "
3814 "an LWP with pending status\n");
3815 else
3816 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3817 }
3818
3819 /* Even if we're leaving threads stopped, queue all signals we'd
3820 otherwise deliver. */
3821 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3822
3823 if (need_step_over)
3824 start_step_over (need_step_over);
3825 }
3826
3827 /* This function is called once per thread. We check the thread's
3828 last resume request, which will tell us whether to resume, step, or
3829 leave the thread stopped. Any signal the client requested to be
3830 delivered has already been enqueued at this point.
3831
3832 If any thread that GDB wants running is stopped at an internal
3833 breakpoint that needs stepping over, we start a step-over operation
3834 on that particular thread, and leave all others stopped. */
3835
3836 static int
3837 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3838 {
3839 struct lwp_info *lwp = (struct lwp_info *) entry;
3840 struct thread_info *thread;
3841 int step;
3842
3843 if (lwp == except)
3844 return 0;
3845
3846 if (debug_threads)
3847 fprintf (stderr,
3848 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3849
3850 if (!lwp->stopped)
3851 {
3852 if (debug_threads)
3853 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3854 return 0;
3855 }
3856
3857 thread = get_lwp_thread (lwp);
3858
3859 if (thread->last_resume_kind == resume_stop
3860 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3861 {
3862 if (debug_threads)
3863 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3864 lwpid_of (lwp));
3865 return 0;
3866 }
3867
3868 if (lwp->status_pending_p)
3869 {
3870 if (debug_threads)
3871 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3872 lwpid_of (lwp));
3873 return 0;
3874 }
3875
3876 gdb_assert (lwp->suspended >= 0);
3877
3878 if (lwp->suspended)
3879 {
3880 if (debug_threads)
3881 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3882 return 0;
3883 }
3884
3885 if (thread->last_resume_kind == resume_stop
3886 && lwp->pending_signals_to_report == NULL
3887 && lwp->collecting_fast_tracepoint == 0)
3888 {
3889 /* We haven't reported this LWP as stopped yet (otherwise, the
3890 last_status.kind check above would catch it, and we wouldn't
3891 reach here. This LWP may have been momentarily paused by a
3892 stop_all_lwps call while handling for example, another LWP's
3893 step-over. In that case, the pending expected SIGSTOP signal
3894 that was queued at vCont;t handling time will have already
3895 been consumed by wait_for_sigstop, and so we need to requeue
3896 another one here. Note that if the LWP already has a SIGSTOP
3897 pending, this is a no-op. */
3898
3899 if (debug_threads)
3900 fprintf (stderr,
3901 "Client wants LWP %ld to stop. "
3902 "Making sure it has a SIGSTOP pending\n",
3903 lwpid_of (lwp));
3904
3905 send_sigstop (lwp);
3906 }
3907
3908 step = thread->last_resume_kind == resume_step;
3909 linux_resume_one_lwp (lwp, step, 0, NULL);
3910 return 0;
3911 }
3912
3913 static int
3914 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3915 {
3916 struct lwp_info *lwp = (struct lwp_info *) entry;
3917
3918 if (lwp == except)
3919 return 0;
3920
3921 lwp->suspended--;
3922 gdb_assert (lwp->suspended >= 0);
3923
3924 return proceed_one_lwp (entry, except);
3925 }
3926
3927 /* When we finish a step-over, set threads running again. If there's
3928 another thread that may need a step-over, now's the time to start
3929 it. Eventually, we'll move all threads past their breakpoints. */
3930
3931 static void
3932 proceed_all_lwps (void)
3933 {
3934 struct lwp_info *need_step_over;
3935
3936 /* If there is a thread which would otherwise be resumed, which is
3937 stopped at a breakpoint that needs stepping over, then don't
3938 resume any threads - have it step over the breakpoint with all
3939 other threads stopped, then resume all threads again. */
3940
3941 if (supports_breakpoints ())
3942 {
3943 need_step_over
3944 = (struct lwp_info *) find_inferior (&all_lwps,
3945 need_step_over_p, NULL);
3946
3947 if (need_step_over != NULL)
3948 {
3949 if (debug_threads)
3950 fprintf (stderr, "proceed_all_lwps: found "
3951 "thread %ld needing a step-over\n",
3952 lwpid_of (need_step_over));
3953
3954 start_step_over (need_step_over);
3955 return;
3956 }
3957 }
3958
3959 if (debug_threads)
3960 fprintf (stderr, "Proceeding, no step-over needed\n");
3961
3962 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3963 }
3964
3965 /* Stopped LWPs that the client wanted to be running, that don't have
3966 pending statuses, are set to run again, except for EXCEPT, if not
3967 NULL. This undoes a stop_all_lwps call. */
3968
3969 static void
3970 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3971 {
3972 if (debug_threads)
3973 {
3974 if (except)
3975 fprintf (stderr,
3976 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3977 else
3978 fprintf (stderr,
3979 "unstopping all lwps\n");
3980 }
3981
3982 if (unsuspend)
3983 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3984 else
3985 find_inferior (&all_lwps, proceed_one_lwp, except);
3986 }
3987
3988
3989 #ifdef HAVE_LINUX_REGSETS
3990
3991 #define use_linux_regsets 1
3992
3993 static int
3994 regsets_fetch_inferior_registers (struct regcache *regcache)
3995 {
3996 struct regset_info *regset;
3997 int saw_general_regs = 0;
3998 int pid;
3999 struct iovec iov;
4000
4001 regset = target_regsets;
4002
4003 pid = lwpid_of (get_thread_lwp (current_inferior));
4004 while (regset->size >= 0)
4005 {
4006 void *buf, *data;
4007 int nt_type, res;
4008
4009 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4010 {
4011 regset ++;
4012 continue;
4013 }
4014
4015 buf = xmalloc (regset->size);
4016
4017 nt_type = regset->nt_type;
4018 if (nt_type)
4019 {
4020 iov.iov_base = buf;
4021 iov.iov_len = regset->size;
4022 data = (void *) &iov;
4023 }
4024 else
4025 data = buf;
4026
4027 #ifndef __sparc__
4028 res = ptrace (regset->get_request, pid,
4029 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4030 #else
4031 res = ptrace (regset->get_request, pid, data, nt_type);
4032 #endif
4033 if (res < 0)
4034 {
4035 if (errno == EIO)
4036 {
4037 /* If we get EIO on a regset, do not try it again for
4038 this process. */
4039 disabled_regsets[regset - target_regsets] = 1;
4040 free (buf);
4041 continue;
4042 }
4043 else
4044 {
4045 char s[256];
4046 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4047 pid);
4048 perror (s);
4049 }
4050 }
4051 else if (regset->type == GENERAL_REGS)
4052 saw_general_regs = 1;
4053 regset->store_function (regcache, buf);
4054 regset ++;
4055 free (buf);
4056 }
4057 if (saw_general_regs)
4058 return 0;
4059 else
4060 return 1;
4061 }
4062
4063 static int
4064 regsets_store_inferior_registers (struct regcache *regcache)
4065 {
4066 struct regset_info *regset;
4067 int saw_general_regs = 0;
4068 int pid;
4069 struct iovec iov;
4070
4071 regset = target_regsets;
4072
4073 pid = lwpid_of (get_thread_lwp (current_inferior));
4074 while (regset->size >= 0)
4075 {
4076 void *buf, *data;
4077 int nt_type, res;
4078
4079 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4080 {
4081 regset ++;
4082 continue;
4083 }
4084
4085 buf = xmalloc (regset->size);
4086
4087 /* First fill the buffer with the current register set contents,
4088 in case there are any items in the kernel's regset that are
4089 not in gdbserver's regcache. */
4090
4091 nt_type = regset->nt_type;
4092 if (nt_type)
4093 {
4094 iov.iov_base = buf;
4095 iov.iov_len = regset->size;
4096 data = (void *) &iov;
4097 }
4098 else
4099 data = buf;
4100
4101 #ifndef __sparc__
4102 res = ptrace (regset->get_request, pid,
4103 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4104 #else
4105 res = ptrace (regset->get_request, pid, data, nt_type);
4106 #endif
4107
4108 if (res == 0)
4109 {
4110 /* Then overlay our cached registers on that. */
4111 regset->fill_function (regcache, buf);
4112
4113 /* Only now do we write the register set. */
4114 #ifndef __sparc__
4115 res = ptrace (regset->set_request, pid,
4116 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4117 #else
4118 res = ptrace (regset->set_request, pid, data, nt_type);
4119 #endif
4120 }
4121
4122 if (res < 0)
4123 {
4124 if (errno == EIO)
4125 {
4126 /* If we get EIO on a regset, do not try it again for
4127 this process. */
4128 disabled_regsets[regset - target_regsets] = 1;
4129 free (buf);
4130 continue;
4131 }
4132 else if (errno == ESRCH)
4133 {
4134 /* At this point, ESRCH should mean the process is
4135 already gone, in which case we simply ignore attempts
4136 to change its registers. See also the related
4137 comment in linux_resume_one_lwp. */
4138 free (buf);
4139 return 0;
4140 }
4141 else
4142 {
4143 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4144 }
4145 }
4146 else if (regset->type == GENERAL_REGS)
4147 saw_general_regs = 1;
4148 regset ++;
4149 free (buf);
4150 }
4151 if (saw_general_regs)
4152 return 0;
4153 else
4154 return 1;
4155 }
4156
4157 #else /* !HAVE_LINUX_REGSETS */
4158
4159 #define use_linux_regsets 0
4160 #define regsets_fetch_inferior_registers(regcache) 1
4161 #define regsets_store_inferior_registers(regcache) 1
4162
4163 #endif
4164
4165 /* Return 1 if register REGNO is supported by one of the regset ptrace
4166 calls or 0 if it has to be transferred individually. */
4167
4168 static int
4169 linux_register_in_regsets (int regno)
4170 {
4171 unsigned char mask = 1 << (regno % 8);
4172 size_t index = regno / 8;
4173
4174 return (use_linux_regsets
4175 && (the_low_target.regset_bitmap == NULL
4176 || (the_low_target.regset_bitmap[index] & mask) != 0));
4177 }
4178
4179 #ifdef HAVE_LINUX_USRREGS
4180
4181 int
4182 register_addr (int regnum)
4183 {
4184 int addr;
4185
4186 if (regnum < 0 || regnum >= the_low_target.num_regs)
4187 error ("Invalid register number %d.", regnum);
4188
4189 addr = the_low_target.regmap[regnum];
4190
4191 return addr;
4192 }
4193
4194 /* Fetch one register. */
4195 static void
4196 fetch_register (struct regcache *regcache, int regno)
4197 {
4198 CORE_ADDR regaddr;
4199 int i, size;
4200 char *buf;
4201 int pid;
4202
4203 if (regno >= the_low_target.num_regs)
4204 return;
4205 if ((*the_low_target.cannot_fetch_register) (regno))
4206 return;
4207
4208 regaddr = register_addr (regno);
4209 if (regaddr == -1)
4210 return;
4211
4212 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4213 & -sizeof (PTRACE_XFER_TYPE));
4214 buf = alloca (size);
4215
4216 pid = lwpid_of (get_thread_lwp (current_inferior));
4217 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4218 {
4219 errno = 0;
4220 *(PTRACE_XFER_TYPE *) (buf + i) =
4221 ptrace (PTRACE_PEEKUSER, pid,
4222 /* Coerce to a uintptr_t first to avoid potential gcc warning
4223 of coercing an 8 byte integer to a 4 byte pointer. */
4224 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4225 regaddr += sizeof (PTRACE_XFER_TYPE);
4226 if (errno != 0)
4227 error ("reading register %d: %s", regno, strerror (errno));
4228 }
4229
4230 if (the_low_target.supply_ptrace_register)
4231 the_low_target.supply_ptrace_register (regcache, regno, buf);
4232 else
4233 supply_register (regcache, regno, buf);
4234 }
4235
4236 /* Store one register. */
4237 static void
4238 store_register (struct regcache *regcache, int regno)
4239 {
4240 CORE_ADDR regaddr;
4241 int i, size;
4242 char *buf;
4243 int pid;
4244
4245 if (regno >= the_low_target.num_regs)
4246 return;
4247 if ((*the_low_target.cannot_store_register) (regno))
4248 return;
4249
4250 regaddr = register_addr (regno);
4251 if (regaddr == -1)
4252 return;
4253
4254 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4255 & -sizeof (PTRACE_XFER_TYPE));
4256 buf = alloca (size);
4257 memset (buf, 0, size);
4258
4259 if (the_low_target.collect_ptrace_register)
4260 the_low_target.collect_ptrace_register (regcache, regno, buf);
4261 else
4262 collect_register (regcache, regno, buf);
4263
4264 pid = lwpid_of (get_thread_lwp (current_inferior));
4265 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4266 {
4267 errno = 0;
4268 ptrace (PTRACE_POKEUSER, pid,
4269 /* Coerce to a uintptr_t first to avoid potential gcc warning
4270 about coercing an 8 byte integer to a 4 byte pointer. */
4271 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4272 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4273 if (errno != 0)
4274 {
4275 /* At this point, ESRCH should mean the process is
4276 already gone, in which case we simply ignore attempts
4277 to change its registers. See also the related
4278 comment in linux_resume_one_lwp. */
4279 if (errno == ESRCH)
4280 return;
4281
4282 if ((*the_low_target.cannot_store_register) (regno) == 0)
4283 error ("writing register %d: %s", regno, strerror (errno));
4284 }
4285 regaddr += sizeof (PTRACE_XFER_TYPE);
4286 }
4287 }
4288
4289 /* Fetch all registers, or just one, from the child process.
4290 If REGNO is -1, do this for all registers, skipping any that are
4291 assumed to have been retrieved by regsets_fetch_inferior_registers,
4292 unless ALL is non-zero.
4293 Otherwise, REGNO specifies which register (so we can save time). */
4294 static void
4295 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4296 {
4297 if (regno == -1)
4298 {
4299 for (regno = 0; regno < the_low_target.num_regs; regno++)
4300 if (all || !linux_register_in_regsets (regno))
4301 fetch_register (regcache, regno);
4302 }
4303 else
4304 fetch_register (regcache, regno);
4305 }
4306
4307 /* Store our register values back into the inferior.
4308 If REGNO is -1, do this for all registers, skipping any that are
4309 assumed to have been saved by regsets_store_inferior_registers,
4310 unless ALL is non-zero.
4311 Otherwise, REGNO specifies which register (so we can save time). */
4312 static void
4313 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4314 {
4315 if (regno == -1)
4316 {
4317 for (regno = 0; regno < the_low_target.num_regs; regno++)
4318 if (all || !linux_register_in_regsets (regno))
4319 store_register (regcache, regno);
4320 }
4321 else
4322 store_register (regcache, regno);
4323 }
4324
4325 #else /* !HAVE_LINUX_USRREGS */
4326
4327 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4328 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4329
4330 #endif
4331
4332
4333 void
4334 linux_fetch_registers (struct regcache *regcache, int regno)
4335 {
4336 int use_regsets;
4337 int all = 0;
4338
4339 if (regno == -1)
4340 {
4341 if (the_low_target.fetch_register != NULL)
4342 for (regno = 0; regno < the_low_target.num_regs; regno++)
4343 (*the_low_target.fetch_register) (regcache, regno);
4344
4345 all = regsets_fetch_inferior_registers (regcache);
4346 usr_fetch_inferior_registers (regcache, -1, all);
4347 }
4348 else
4349 {
4350 if (the_low_target.fetch_register != NULL
4351 && (*the_low_target.fetch_register) (regcache, regno))
4352 return;
4353
4354 use_regsets = linux_register_in_regsets (regno);
4355 if (use_regsets)
4356 all = regsets_fetch_inferior_registers (regcache);
4357 if (!use_regsets || all)
4358 usr_fetch_inferior_registers (regcache, regno, 1);
4359 }
4360 }
4361
4362 void
4363 linux_store_registers (struct regcache *regcache, int regno)
4364 {
4365 int use_regsets;
4366 int all = 0;
4367
4368 if (regno == -1)
4369 {
4370 all = regsets_store_inferior_registers (regcache);
4371 usr_store_inferior_registers (regcache, regno, all);
4372 }
4373 else
4374 {
4375 use_regsets = linux_register_in_regsets (regno);
4376 if (use_regsets)
4377 all = regsets_store_inferior_registers (regcache);
4378 if (!use_regsets || all)
4379 usr_store_inferior_registers (regcache, regno, 1);
4380 }
4381 }
4382
4383
4384 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4385 to debugger memory starting at MYADDR. */
4386
4387 static int
4388 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4389 {
4390 int pid = lwpid_of (get_thread_lwp (current_inferior));
4391 register PTRACE_XFER_TYPE *buffer;
4392 register CORE_ADDR addr;
4393 register int count;
4394 char filename[64];
4395 register int i;
4396 int ret;
4397 int fd;
4398
4399 /* Try using /proc. Don't bother for one word. */
4400 if (len >= 3 * sizeof (long))
4401 {
4402 int bytes;
4403
4404 /* We could keep this file open and cache it - possibly one per
4405 thread. That requires some juggling, but is even faster. */
4406 sprintf (filename, "/proc/%d/mem", pid);
4407 fd = open (filename, O_RDONLY | O_LARGEFILE);
4408 if (fd == -1)
4409 goto no_proc;
4410
4411 /* If pread64 is available, use it. It's faster if the kernel
4412 supports it (only one syscall), and it's 64-bit safe even on
4413 32-bit platforms (for instance, SPARC debugging a SPARC64
4414 application). */
4415 #ifdef HAVE_PREAD64
4416 bytes = pread64 (fd, myaddr, len, memaddr);
4417 #else
4418 bytes = -1;
4419 if (lseek (fd, memaddr, SEEK_SET) != -1)
4420 bytes = read (fd, myaddr, len);
4421 #endif
4422
4423 close (fd);
4424 if (bytes == len)
4425 return 0;
4426
4427 /* Some data was read, we'll try to get the rest with ptrace. */
4428 if (bytes > 0)
4429 {
4430 memaddr += bytes;
4431 myaddr += bytes;
4432 len -= bytes;
4433 }
4434 }
4435
4436 no_proc:
4437 /* Round starting address down to longword boundary. */
4438 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4439 /* Round ending address up; get number of longwords that makes. */
4440 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4441 / sizeof (PTRACE_XFER_TYPE));
4442 /* Allocate buffer of that many longwords. */
4443 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4444
4445 /* Read all the longwords */
4446 errno = 0;
4447 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4448 {
4449 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4450 about coercing an 8 byte integer to a 4 byte pointer. */
4451 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4452 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4453 if (errno)
4454 break;
4455 }
4456 ret = errno;
4457
4458 /* Copy appropriate bytes out of the buffer. */
4459 if (i > 0)
4460 {
4461 i *= sizeof (PTRACE_XFER_TYPE);
4462 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4463 memcpy (myaddr,
4464 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4465 i < len ? i : len);
4466 }
4467
4468 return ret;
4469 }
4470
4471 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4472 memory at MEMADDR. On failure (cannot write to the inferior)
4473 returns the value of errno. */
4474
4475 static int
4476 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4477 {
4478 register int i;
4479 /* Round starting address down to longword boundary. */
4480 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4481 /* Round ending address up; get number of longwords that makes. */
4482 register int count
4483 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4484 / sizeof (PTRACE_XFER_TYPE);
4485
4486 /* Allocate buffer of that many longwords. */
4487 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4488 alloca (count * sizeof (PTRACE_XFER_TYPE));
4489
4490 int pid = lwpid_of (get_thread_lwp (current_inferior));
4491
4492 if (debug_threads)
4493 {
4494 /* Dump up to four bytes. */
4495 unsigned int val = * (unsigned int *) myaddr;
4496 if (len == 1)
4497 val = val & 0xff;
4498 else if (len == 2)
4499 val = val & 0xffff;
4500 else if (len == 3)
4501 val = val & 0xffffff;
4502 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4503 val, (long)memaddr);
4504 }
4505
4506 /* Fill start and end extra bytes of buffer with existing memory data. */
4507
4508 errno = 0;
4509 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4510 about coercing an 8 byte integer to a 4 byte pointer. */
4511 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4512 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4513 if (errno)
4514 return errno;
4515
4516 if (count > 1)
4517 {
4518 errno = 0;
4519 buffer[count - 1]
4520 = ptrace (PTRACE_PEEKTEXT, pid,
4521 /* Coerce to a uintptr_t first to avoid potential gcc warning
4522 about coercing an 8 byte integer to a 4 byte pointer. */
4523 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4524 * sizeof (PTRACE_XFER_TYPE)),
4525 0);
4526 if (errno)
4527 return errno;
4528 }
4529
4530 /* Copy data to be written over corresponding part of buffer. */
4531
4532 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4533 myaddr, len);
4534
4535 /* Write the entire buffer. */
4536
4537 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4538 {
4539 errno = 0;
4540 ptrace (PTRACE_POKETEXT, pid,
4541 /* Coerce to a uintptr_t first to avoid potential gcc warning
4542 about coercing an 8 byte integer to a 4 byte pointer. */
4543 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4544 (PTRACE_ARG4_TYPE) buffer[i]);
4545 if (errno)
4546 return errno;
4547 }
4548
4549 return 0;
4550 }
4551
4552 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4553 static int linux_supports_tracefork_flag;
4554
4555 static void
4556 linux_enable_event_reporting (int pid)
4557 {
4558 if (!linux_supports_tracefork_flag)
4559 return;
4560
4561 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4562 }
4563
4564 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4565
4566 static int
4567 linux_tracefork_grandchild (void *arg)
4568 {
4569 _exit (0);
4570 }
4571
4572 #define STACK_SIZE 4096
4573
4574 static int
4575 linux_tracefork_child (void *arg)
4576 {
4577 ptrace (PTRACE_TRACEME, 0, 0, 0);
4578 kill (getpid (), SIGSTOP);
4579
4580 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4581
4582 if (fork () == 0)
4583 linux_tracefork_grandchild (NULL);
4584
4585 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4586
4587 #ifdef __ia64__
4588 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4589 CLONE_VM | SIGCHLD, NULL);
4590 #else
4591 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4592 CLONE_VM | SIGCHLD, NULL);
4593 #endif
4594
4595 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4596
4597 _exit (0);
4598 }
4599
4600 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4601 sure that we can enable the option, and that it had the desired
4602 effect. */
4603
4604 static void
4605 linux_test_for_tracefork (void)
4606 {
4607 int child_pid, ret, status;
4608 long second_pid;
4609 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4610 char *stack = xmalloc (STACK_SIZE * 4);
4611 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4612
4613 linux_supports_tracefork_flag = 0;
4614
4615 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4616
4617 child_pid = fork ();
4618 if (child_pid == 0)
4619 linux_tracefork_child (NULL);
4620
4621 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4622
4623 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4624 #ifdef __ia64__
4625 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4626 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4627 #else /* !__ia64__ */
4628 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4629 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4630 #endif /* !__ia64__ */
4631
4632 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4633
4634 if (child_pid == -1)
4635 perror_with_name ("clone");
4636
4637 ret = my_waitpid (child_pid, &status, 0);
4638 if (ret == -1)
4639 perror_with_name ("waitpid");
4640 else if (ret != child_pid)
4641 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4642 if (! WIFSTOPPED (status))
4643 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4644
4645 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4646 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4647 if (ret != 0)
4648 {
4649 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4650 if (ret != 0)
4651 {
4652 warning ("linux_test_for_tracefork: failed to kill child");
4653 return;
4654 }
4655
4656 ret = my_waitpid (child_pid, &status, 0);
4657 if (ret != child_pid)
4658 warning ("linux_test_for_tracefork: failed to wait for killed child");
4659 else if (!WIFSIGNALED (status))
4660 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4661 "killed child", status);
4662
4663 return;
4664 }
4665
4666 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4667 if (ret != 0)
4668 warning ("linux_test_for_tracefork: failed to resume child");
4669
4670 ret = my_waitpid (child_pid, &status, 0);
4671
4672 if (ret == child_pid && WIFSTOPPED (status)
4673 && status >> 16 == PTRACE_EVENT_FORK)
4674 {
4675 second_pid = 0;
4676 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4677 if (ret == 0 && second_pid != 0)
4678 {
4679 int second_status;
4680
4681 linux_supports_tracefork_flag = 1;
4682 my_waitpid (second_pid, &second_status, 0);
4683 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4684 if (ret != 0)
4685 warning ("linux_test_for_tracefork: failed to kill second child");
4686 my_waitpid (second_pid, &status, 0);
4687 }
4688 }
4689 else
4690 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4691 "(%d, status 0x%x)", ret, status);
4692
4693 do
4694 {
4695 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4696 if (ret != 0)
4697 warning ("linux_test_for_tracefork: failed to kill child");
4698 my_waitpid (child_pid, &status, 0);
4699 }
4700 while (WIFSTOPPED (status));
4701
4702 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4703 free (stack);
4704 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4705 }
4706
4707
4708 static void
4709 linux_look_up_symbols (void)
4710 {
4711 #ifdef USE_THREAD_DB
4712 struct process_info *proc = current_process ();
4713
4714 if (proc->private->thread_db != NULL)
4715 return;
4716
4717 /* If the kernel supports tracing forks then it also supports tracing
4718 clones, and then we don't need to use the magic thread event breakpoint
4719 to learn about threads. */
4720 thread_db_init (!linux_supports_tracefork_flag);
4721 #endif
4722 }
4723
4724 static void
4725 linux_request_interrupt (void)
4726 {
4727 extern unsigned long signal_pid;
4728
4729 if (!ptid_equal (cont_thread, null_ptid)
4730 && !ptid_equal (cont_thread, minus_one_ptid))
4731 {
4732 struct lwp_info *lwp;
4733 int lwpid;
4734
4735 lwp = get_thread_lwp (current_inferior);
4736 lwpid = lwpid_of (lwp);
4737 kill_lwp (lwpid, SIGINT);
4738 }
4739 else
4740 kill_lwp (signal_pid, SIGINT);
4741 }
4742
4743 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4744 to debugger memory starting at MYADDR. */
4745
4746 static int
4747 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4748 {
4749 char filename[PATH_MAX];
4750 int fd, n;
4751 int pid = lwpid_of (get_thread_lwp (current_inferior));
4752
4753 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4754
4755 fd = open (filename, O_RDONLY);
4756 if (fd < 0)
4757 return -1;
4758
4759 if (offset != (CORE_ADDR) 0
4760 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4761 n = -1;
4762 else
4763 n = read (fd, myaddr, len);
4764
4765 close (fd);
4766
4767 return n;
4768 }
4769
4770 /* These breakpoint and watchpoint related wrapper functions simply
4771 pass on the function call if the target has registered a
4772 corresponding function. */
4773
4774 static int
4775 linux_insert_point (char type, CORE_ADDR addr, int len)
4776 {
4777 if (the_low_target.insert_point != NULL)
4778 return the_low_target.insert_point (type, addr, len);
4779 else
4780 /* Unsupported (see target.h). */
4781 return 1;
4782 }
4783
4784 static int
4785 linux_remove_point (char type, CORE_ADDR addr, int len)
4786 {
4787 if (the_low_target.remove_point != NULL)
4788 return the_low_target.remove_point (type, addr, len);
4789 else
4790 /* Unsupported (see target.h). */
4791 return 1;
4792 }
4793
4794 static int
4795 linux_stopped_by_watchpoint (void)
4796 {
4797 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4798
4799 return lwp->stopped_by_watchpoint;
4800 }
4801
4802 static CORE_ADDR
4803 linux_stopped_data_address (void)
4804 {
4805 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4806
4807 return lwp->stopped_data_address;
4808 }
4809
4810 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4811 #if ! (defined(PT_TEXT_ADDR) \
4812 || defined(PT_DATA_ADDR) \
4813 || defined(PT_TEXT_END_ADDR))
4814 #if defined(__mcoldfire__)
4815 /* These should really be defined in the kernel's ptrace.h header. */
4816 #define PT_TEXT_ADDR 49*4
4817 #define PT_DATA_ADDR 50*4
4818 #define PT_TEXT_END_ADDR 51*4
4819 #elif defined(BFIN)
4820 #define PT_TEXT_ADDR 220
4821 #define PT_TEXT_END_ADDR 224
4822 #define PT_DATA_ADDR 228
4823 #elif defined(__TMS320C6X__)
4824 #define PT_TEXT_ADDR (0x10000*4)
4825 #define PT_DATA_ADDR (0x10004*4)
4826 #define PT_TEXT_END_ADDR (0x10008*4)
4827 #endif
4828 #endif
4829
4830 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4831 to tell gdb about. */
4832
4833 static int
4834 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4835 {
4836 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4837 unsigned long text, text_end, data;
4838 int pid = lwpid_of (get_thread_lwp (current_inferior));
4839
4840 errno = 0;
4841
4842 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4843 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4844 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4845
4846 if (errno == 0)
4847 {
4848 /* Both text and data offsets produced at compile-time (and so
4849 used by gdb) are relative to the beginning of the program,
4850 with the data segment immediately following the text segment.
4851 However, the actual runtime layout in memory may put the data
4852 somewhere else, so when we send gdb a data base-address, we
4853 use the real data base address and subtract the compile-time
4854 data base-address from it (which is just the length of the
4855 text segment). BSS immediately follows data in both
4856 cases. */
4857 *text_p = text;
4858 *data_p = data - (text_end - text);
4859
4860 return 1;
4861 }
4862 #endif
4863 return 0;
4864 }
4865 #endif
4866
4867 static int
4868 linux_qxfer_osdata (const char *annex,
4869 unsigned char *readbuf, unsigned const char *writebuf,
4870 CORE_ADDR offset, int len)
4871 {
4872 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4873 }
4874
4875 /* Convert a native/host siginfo object, into/from the siginfo in the
4876 layout of the inferiors' architecture. */
4877
4878 static void
4879 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4880 {
4881 int done = 0;
4882
4883 if (the_low_target.siginfo_fixup != NULL)
4884 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4885
4886 /* If there was no callback, or the callback didn't do anything,
4887 then just do a straight memcpy. */
4888 if (!done)
4889 {
4890 if (direction == 1)
4891 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4892 else
4893 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4894 }
4895 }
4896
4897 static int
4898 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4899 unsigned const char *writebuf, CORE_ADDR offset, int len)
4900 {
4901 int pid;
4902 siginfo_t siginfo;
4903 char inf_siginfo[sizeof (siginfo_t)];
4904
4905 if (current_inferior == NULL)
4906 return -1;
4907
4908 pid = lwpid_of (get_thread_lwp (current_inferior));
4909
4910 if (debug_threads)
4911 fprintf (stderr, "%s siginfo for lwp %d.\n",
4912 readbuf != NULL ? "Reading" : "Writing",
4913 pid);
4914
4915 if (offset >= sizeof (siginfo))
4916 return -1;
4917
4918 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4919 return -1;
4920
4921 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4922 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4923 inferior with a 64-bit GDBSERVER should look the same as debugging it
4924 with a 32-bit GDBSERVER, we need to convert it. */
4925 siginfo_fixup (&siginfo, inf_siginfo, 0);
4926
4927 if (offset + len > sizeof (siginfo))
4928 len = sizeof (siginfo) - offset;
4929
4930 if (readbuf != NULL)
4931 memcpy (readbuf, inf_siginfo + offset, len);
4932 else
4933 {
4934 memcpy (inf_siginfo + offset, writebuf, len);
4935
4936 /* Convert back to ptrace layout before flushing it out. */
4937 siginfo_fixup (&siginfo, inf_siginfo, 1);
4938
4939 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4940 return -1;
4941 }
4942
4943 return len;
4944 }
4945
4946 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4947 so we notice when children change state; as the handler for the
4948 sigsuspend in my_waitpid. */
4949
4950 static void
4951 sigchld_handler (int signo)
4952 {
4953 int old_errno = errno;
4954
4955 if (debug_threads)
4956 {
4957 do
4958 {
4959 /* fprintf is not async-signal-safe, so call write
4960 directly. */
4961 if (write (2, "sigchld_handler\n",
4962 sizeof ("sigchld_handler\n") - 1) < 0)
4963 break; /* just ignore */
4964 } while (0);
4965 }
4966
4967 if (target_is_async_p ())
4968 async_file_mark (); /* trigger a linux_wait */
4969
4970 errno = old_errno;
4971 }
4972
4973 static int
4974 linux_supports_non_stop (void)
4975 {
4976 return 1;
4977 }
4978
4979 static int
4980 linux_async (int enable)
4981 {
4982 int previous = (linux_event_pipe[0] != -1);
4983
4984 if (debug_threads)
4985 fprintf (stderr, "linux_async (%d), previous=%d\n",
4986 enable, previous);
4987
4988 if (previous != enable)
4989 {
4990 sigset_t mask;
4991 sigemptyset (&mask);
4992 sigaddset (&mask, SIGCHLD);
4993
4994 sigprocmask (SIG_BLOCK, &mask, NULL);
4995
4996 if (enable)
4997 {
4998 if (pipe (linux_event_pipe) == -1)
4999 fatal ("creating event pipe failed.");
5000
5001 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5002 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5003
5004 /* Register the event loop handler. */
5005 add_file_handler (linux_event_pipe[0],
5006 handle_target_event, NULL);
5007
5008 /* Always trigger a linux_wait. */
5009 async_file_mark ();
5010 }
5011 else
5012 {
5013 delete_file_handler (linux_event_pipe[0]);
5014
5015 close (linux_event_pipe[0]);
5016 close (linux_event_pipe[1]);
5017 linux_event_pipe[0] = -1;
5018 linux_event_pipe[1] = -1;
5019 }
5020
5021 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5022 }
5023
5024 return previous;
5025 }
5026
5027 static int
5028 linux_start_non_stop (int nonstop)
5029 {
5030 /* Register or unregister from event-loop accordingly. */
5031 linux_async (nonstop);
5032 return 0;
5033 }
5034
5035 static int
5036 linux_supports_multi_process (void)
5037 {
5038 return 1;
5039 }
5040
5041 static int
5042 linux_supports_disable_randomization (void)
5043 {
5044 #ifdef HAVE_PERSONALITY
5045 return 1;
5046 #else
5047 return 0;
5048 #endif
5049 }
5050
5051 static int
5052 linux_supports_agent (void)
5053 {
5054 return 1;
5055 }
5056
5057 /* Enumerate spufs IDs for process PID. */
5058 static int
5059 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5060 {
5061 int pos = 0;
5062 int written = 0;
5063 char path[128];
5064 DIR *dir;
5065 struct dirent *entry;
5066
5067 sprintf (path, "/proc/%ld/fd", pid);
5068 dir = opendir (path);
5069 if (!dir)
5070 return -1;
5071
5072 rewinddir (dir);
5073 while ((entry = readdir (dir)) != NULL)
5074 {
5075 struct stat st;
5076 struct statfs stfs;
5077 int fd;
5078
5079 fd = atoi (entry->d_name);
5080 if (!fd)
5081 continue;
5082
5083 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5084 if (stat (path, &st) != 0)
5085 continue;
5086 if (!S_ISDIR (st.st_mode))
5087 continue;
5088
5089 if (statfs (path, &stfs) != 0)
5090 continue;
5091 if (stfs.f_type != SPUFS_MAGIC)
5092 continue;
5093
5094 if (pos >= offset && pos + 4 <= offset + len)
5095 {
5096 *(unsigned int *)(buf + pos - offset) = fd;
5097 written += 4;
5098 }
5099 pos += 4;
5100 }
5101
5102 closedir (dir);
5103 return written;
5104 }
5105
5106 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5107 object type, using the /proc file system. */
5108 static int
5109 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5110 unsigned const char *writebuf,
5111 CORE_ADDR offset, int len)
5112 {
5113 long pid = lwpid_of (get_thread_lwp (current_inferior));
5114 char buf[128];
5115 int fd = 0;
5116 int ret = 0;
5117
5118 if (!writebuf && !readbuf)
5119 return -1;
5120
5121 if (!*annex)
5122 {
5123 if (!readbuf)
5124 return -1;
5125 else
5126 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5127 }
5128
5129 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5130 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5131 if (fd <= 0)
5132 return -1;
5133
5134 if (offset != 0
5135 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5136 {
5137 close (fd);
5138 return 0;
5139 }
5140
5141 if (writebuf)
5142 ret = write (fd, writebuf, (size_t) len);
5143 else
5144 ret = read (fd, readbuf, (size_t) len);
5145
5146 close (fd);
5147 return ret;
5148 }
5149
5150 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5151 struct target_loadseg
5152 {
5153 /* Core address to which the segment is mapped. */
5154 Elf32_Addr addr;
5155 /* VMA recorded in the program header. */
5156 Elf32_Addr p_vaddr;
5157 /* Size of this segment in memory. */
5158 Elf32_Word p_memsz;
5159 };
5160
5161 # if defined PT_GETDSBT
5162 struct target_loadmap
5163 {
5164 /* Protocol version number, must be zero. */
5165 Elf32_Word version;
5166 /* Pointer to the DSBT table, its size, and the DSBT index. */
5167 unsigned *dsbt_table;
5168 unsigned dsbt_size, dsbt_index;
5169 /* Number of segments in this map. */
5170 Elf32_Word nsegs;
5171 /* The actual memory map. */
5172 struct target_loadseg segs[/*nsegs*/];
5173 };
5174 # define LINUX_LOADMAP PT_GETDSBT
5175 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5176 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5177 # else
5178 struct target_loadmap
5179 {
5180 /* Protocol version number, must be zero. */
5181 Elf32_Half version;
5182 /* Number of segments in this map. */
5183 Elf32_Half nsegs;
5184 /* The actual memory map. */
5185 struct target_loadseg segs[/*nsegs*/];
5186 };
5187 # define LINUX_LOADMAP PTRACE_GETFDPIC
5188 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5189 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5190 # endif
5191
5192 static int
5193 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5194 unsigned char *myaddr, unsigned int len)
5195 {
5196 int pid = lwpid_of (get_thread_lwp (current_inferior));
5197 int addr = -1;
5198 struct target_loadmap *data = NULL;
5199 unsigned int actual_length, copy_length;
5200
5201 if (strcmp (annex, "exec") == 0)
5202 addr = (int) LINUX_LOADMAP_EXEC;
5203 else if (strcmp (annex, "interp") == 0)
5204 addr = (int) LINUX_LOADMAP_INTERP;
5205 else
5206 return -1;
5207
5208 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5209 return -1;
5210
5211 if (data == NULL)
5212 return -1;
5213
5214 actual_length = sizeof (struct target_loadmap)
5215 + sizeof (struct target_loadseg) * data->nsegs;
5216
5217 if (offset < 0 || offset > actual_length)
5218 return -1;
5219
5220 copy_length = actual_length - offset < len ? actual_length - offset : len;
5221 memcpy (myaddr, (char *) data + offset, copy_length);
5222 return copy_length;
5223 }
5224 #else
5225 # define linux_read_loadmap NULL
5226 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5227
5228 static void
5229 linux_process_qsupported (const char *query)
5230 {
5231 if (the_low_target.process_qsupported != NULL)
5232 the_low_target.process_qsupported (query);
5233 }
5234
5235 static int
5236 linux_supports_tracepoints (void)
5237 {
5238 if (*the_low_target.supports_tracepoints == NULL)
5239 return 0;
5240
5241 return (*the_low_target.supports_tracepoints) ();
5242 }
5243
5244 static CORE_ADDR
5245 linux_read_pc (struct regcache *regcache)
5246 {
5247 if (the_low_target.get_pc == NULL)
5248 return 0;
5249
5250 return (*the_low_target.get_pc) (regcache);
5251 }
5252
5253 static void
5254 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5255 {
5256 gdb_assert (the_low_target.set_pc != NULL);
5257
5258 (*the_low_target.set_pc) (regcache, pc);
5259 }
5260
5261 static int
5262 linux_thread_stopped (struct thread_info *thread)
5263 {
5264 return get_thread_lwp (thread)->stopped;
5265 }
5266
5267 /* This exposes stop-all-threads functionality to other modules. */
5268
5269 static void
5270 linux_pause_all (int freeze)
5271 {
5272 stop_all_lwps (freeze, NULL);
5273 }
5274
5275 /* This exposes unstop-all-threads functionality to other gdbserver
5276 modules. */
5277
5278 static void
5279 linux_unpause_all (int unfreeze)
5280 {
5281 unstop_all_lwps (unfreeze, NULL);
5282 }
5283
5284 static int
5285 linux_prepare_to_access_memory (void)
5286 {
5287 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5288 running LWP. */
5289 if (non_stop)
5290 linux_pause_all (1);
5291 return 0;
5292 }
5293
5294 static void
5295 linux_done_accessing_memory (void)
5296 {
5297 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5298 running LWP. */
5299 if (non_stop)
5300 linux_unpause_all (1);
5301 }
5302
5303 static int
5304 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5305 CORE_ADDR collector,
5306 CORE_ADDR lockaddr,
5307 ULONGEST orig_size,
5308 CORE_ADDR *jump_entry,
5309 CORE_ADDR *trampoline,
5310 ULONGEST *trampoline_size,
5311 unsigned char *jjump_pad_insn,
5312 ULONGEST *jjump_pad_insn_size,
5313 CORE_ADDR *adjusted_insn_addr,
5314 CORE_ADDR *adjusted_insn_addr_end,
5315 char *err)
5316 {
5317 return (*the_low_target.install_fast_tracepoint_jump_pad)
5318 (tpoint, tpaddr, collector, lockaddr, orig_size,
5319 jump_entry, trampoline, trampoline_size,
5320 jjump_pad_insn, jjump_pad_insn_size,
5321 adjusted_insn_addr, adjusted_insn_addr_end,
5322 err);
5323 }
5324
5325 static struct emit_ops *
5326 linux_emit_ops (void)
5327 {
5328 if (the_low_target.emit_ops != NULL)
5329 return (*the_low_target.emit_ops) ();
5330 else
5331 return NULL;
5332 }
5333
5334 static int
5335 linux_get_min_fast_tracepoint_insn_len (void)
5336 {
5337 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5338 }
5339
5340 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5341
5342 static int
5343 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5344 CORE_ADDR *phdr_memaddr, int *num_phdr)
5345 {
5346 char filename[PATH_MAX];
5347 int fd;
5348 const int auxv_size = is_elf64
5349 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5350 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5351
5352 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5353
5354 fd = open (filename, O_RDONLY);
5355 if (fd < 0)
5356 return 1;
5357
5358 *phdr_memaddr = 0;
5359 *num_phdr = 0;
5360 while (read (fd, buf, auxv_size) == auxv_size
5361 && (*phdr_memaddr == 0 || *num_phdr == 0))
5362 {
5363 if (is_elf64)
5364 {
5365 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5366
5367 switch (aux->a_type)
5368 {
5369 case AT_PHDR:
5370 *phdr_memaddr = aux->a_un.a_val;
5371 break;
5372 case AT_PHNUM:
5373 *num_phdr = aux->a_un.a_val;
5374 break;
5375 }
5376 }
5377 else
5378 {
5379 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5380
5381 switch (aux->a_type)
5382 {
5383 case AT_PHDR:
5384 *phdr_memaddr = aux->a_un.a_val;
5385 break;
5386 case AT_PHNUM:
5387 *num_phdr = aux->a_un.a_val;
5388 break;
5389 }
5390 }
5391 }
5392
5393 close (fd);
5394
5395 if (*phdr_memaddr == 0 || *num_phdr == 0)
5396 {
5397 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5398 "phdr_memaddr = %ld, phdr_num = %d",
5399 (long) *phdr_memaddr, *num_phdr);
5400 return 2;
5401 }
5402
5403 return 0;
5404 }
5405
5406 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5407
5408 static CORE_ADDR
5409 get_dynamic (const int pid, const int is_elf64)
5410 {
5411 CORE_ADDR phdr_memaddr, relocation;
5412 int num_phdr, i;
5413 unsigned char *phdr_buf;
5414 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5415
5416 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5417 return 0;
5418
5419 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5420 phdr_buf = alloca (num_phdr * phdr_size);
5421
5422 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5423 return 0;
5424
5425 /* Compute relocation: it is expected to be 0 for "regular" executables,
5426 non-zero for PIE ones. */
5427 relocation = -1;
5428 for (i = 0; relocation == -1 && i < num_phdr; i++)
5429 if (is_elf64)
5430 {
5431 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5432
5433 if (p->p_type == PT_PHDR)
5434 relocation = phdr_memaddr - p->p_vaddr;
5435 }
5436 else
5437 {
5438 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5439
5440 if (p->p_type == PT_PHDR)
5441 relocation = phdr_memaddr - p->p_vaddr;
5442 }
5443
5444 if (relocation == -1)
5445 {
5446 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5447 any real world executables, including PIE executables, have always
5448 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5449 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5450 or present DT_DEBUG anyway (fpc binaries are statically linked).
5451
5452 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5453
5454 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5455
5456 return 0;
5457 }
5458
5459 for (i = 0; i < num_phdr; i++)
5460 {
5461 if (is_elf64)
5462 {
5463 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5464
5465 if (p->p_type == PT_DYNAMIC)
5466 return p->p_vaddr + relocation;
5467 }
5468 else
5469 {
5470 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5471
5472 if (p->p_type == PT_DYNAMIC)
5473 return p->p_vaddr + relocation;
5474 }
5475 }
5476
5477 return 0;
5478 }
5479
5480 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5481 can be 0 if the inferior does not yet have the library list initialized.
5482 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5483 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5484
5485 static CORE_ADDR
5486 get_r_debug (const int pid, const int is_elf64)
5487 {
5488 CORE_ADDR dynamic_memaddr;
5489 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5490 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5491 CORE_ADDR map = -1;
5492
5493 dynamic_memaddr = get_dynamic (pid, is_elf64);
5494 if (dynamic_memaddr == 0)
5495 return map;
5496
5497 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5498 {
5499 if (is_elf64)
5500 {
5501 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5502 #ifdef DT_MIPS_RLD_MAP
5503 union
5504 {
5505 Elf64_Xword map;
5506 unsigned char buf[sizeof (Elf64_Xword)];
5507 }
5508 rld_map;
5509
5510 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5511 {
5512 if (linux_read_memory (dyn->d_un.d_val,
5513 rld_map.buf, sizeof (rld_map.buf)) == 0)
5514 return rld_map.map;
5515 else
5516 break;
5517 }
5518 #endif /* DT_MIPS_RLD_MAP */
5519
5520 if (dyn->d_tag == DT_DEBUG && map == -1)
5521 map = dyn->d_un.d_val;
5522
5523 if (dyn->d_tag == DT_NULL)
5524 break;
5525 }
5526 else
5527 {
5528 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5529 #ifdef DT_MIPS_RLD_MAP
5530 union
5531 {
5532 Elf32_Word map;
5533 unsigned char buf[sizeof (Elf32_Word)];
5534 }
5535 rld_map;
5536
5537 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5538 {
5539 if (linux_read_memory (dyn->d_un.d_val,
5540 rld_map.buf, sizeof (rld_map.buf)) == 0)
5541 return rld_map.map;
5542 else
5543 break;
5544 }
5545 #endif /* DT_MIPS_RLD_MAP */
5546
5547 if (dyn->d_tag == DT_DEBUG && map == -1)
5548 map = dyn->d_un.d_val;
5549
5550 if (dyn->d_tag == DT_NULL)
5551 break;
5552 }
5553
5554 dynamic_memaddr += dyn_size;
5555 }
5556
5557 return map;
5558 }
5559
5560 /* Read one pointer from MEMADDR in the inferior. */
5561
5562 static int
5563 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5564 {
5565 int ret;
5566
5567 /* Go through a union so this works on either big or little endian
5568 hosts, when the inferior's pointer size is smaller than the size
5569 of CORE_ADDR. It is assumed the inferior's endianness is the
5570 same of the superior's. */
5571 union
5572 {
5573 CORE_ADDR core_addr;
5574 unsigned int ui;
5575 unsigned char uc;
5576 } addr;
5577
5578 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5579 if (ret == 0)
5580 {
5581 if (ptr_size == sizeof (CORE_ADDR))
5582 *ptr = addr.core_addr;
5583 else if (ptr_size == sizeof (unsigned int))
5584 *ptr = addr.ui;
5585 else
5586 gdb_assert_not_reached ("unhandled pointer size");
5587 }
5588 return ret;
5589 }
5590
5591 struct link_map_offsets
5592 {
5593 /* Offset and size of r_debug.r_version. */
5594 int r_version_offset;
5595
5596 /* Offset and size of r_debug.r_map. */
5597 int r_map_offset;
5598
5599 /* Offset to l_addr field in struct link_map. */
5600 int l_addr_offset;
5601
5602 /* Offset to l_name field in struct link_map. */
5603 int l_name_offset;
5604
5605 /* Offset to l_ld field in struct link_map. */
5606 int l_ld_offset;
5607
5608 /* Offset to l_next field in struct link_map. */
5609 int l_next_offset;
5610
5611 /* Offset to l_prev field in struct link_map. */
5612 int l_prev_offset;
5613 };
5614
5615 /* Construct qXfer:libraries-svr4:read reply. */
5616
5617 static int
5618 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5619 unsigned const char *writebuf,
5620 CORE_ADDR offset, int len)
5621 {
5622 char *document;
5623 unsigned document_len;
5624 struct process_info_private *const priv = current_process ()->private;
5625 char filename[PATH_MAX];
5626 int pid, is_elf64;
5627
5628 static const struct link_map_offsets lmo_32bit_offsets =
5629 {
5630 0, /* r_version offset. */
5631 4, /* r_debug.r_map offset. */
5632 0, /* l_addr offset in link_map. */
5633 4, /* l_name offset in link_map. */
5634 8, /* l_ld offset in link_map. */
5635 12, /* l_next offset in link_map. */
5636 16 /* l_prev offset in link_map. */
5637 };
5638
5639 static const struct link_map_offsets lmo_64bit_offsets =
5640 {
5641 0, /* r_version offset. */
5642 8, /* r_debug.r_map offset. */
5643 0, /* l_addr offset in link_map. */
5644 8, /* l_name offset in link_map. */
5645 16, /* l_ld offset in link_map. */
5646 24, /* l_next offset in link_map. */
5647 32 /* l_prev offset in link_map. */
5648 };
5649 const struct link_map_offsets *lmo;
5650 unsigned int machine;
5651
5652 if (writebuf != NULL)
5653 return -2;
5654 if (readbuf == NULL)
5655 return -1;
5656
5657 pid = lwpid_of (get_thread_lwp (current_inferior));
5658 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5659 is_elf64 = elf_64_file_p (filename, &machine);
5660 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5661
5662 if (priv->r_debug == 0)
5663 priv->r_debug = get_r_debug (pid, is_elf64);
5664
5665 /* We failed to find DT_DEBUG. Such situation will not change for this
5666 inferior - do not retry it. Report it to GDB as E01, see for the reasons
5667 at the GDB solib-svr4.c side. */
5668 if (priv->r_debug == (CORE_ADDR) -1)
5669 return -1;
5670
5671 if (priv->r_debug == 0)
5672 {
5673 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5674 }
5675 else
5676 {
5677 int allocated = 1024;
5678 char *p;
5679 const int ptr_size = is_elf64 ? 8 : 4;
5680 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5681 int r_version, header_done = 0;
5682
5683 document = xmalloc (allocated);
5684 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5685 p = document + strlen (document);
5686
5687 r_version = 0;
5688 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5689 (unsigned char *) &r_version,
5690 sizeof (r_version)) != 0
5691 || r_version != 1)
5692 {
5693 warning ("unexpected r_debug version %d", r_version);
5694 goto done;
5695 }
5696
5697 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5698 &lm_addr, ptr_size) != 0)
5699 {
5700 warning ("unable to read r_map from 0x%lx",
5701 (long) priv->r_debug + lmo->r_map_offset);
5702 goto done;
5703 }
5704
5705 lm_prev = 0;
5706 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5707 &l_name, ptr_size) == 0
5708 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5709 &l_addr, ptr_size) == 0
5710 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5711 &l_ld, ptr_size) == 0
5712 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5713 &l_prev, ptr_size) == 0
5714 && read_one_ptr (lm_addr + lmo->l_next_offset,
5715 &l_next, ptr_size) == 0)
5716 {
5717 unsigned char libname[PATH_MAX];
5718
5719 if (lm_prev != l_prev)
5720 {
5721 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5722 (long) lm_prev, (long) l_prev);
5723 break;
5724 }
5725
5726 /* Not checking for error because reading may stop before
5727 we've got PATH_MAX worth of characters. */
5728 libname[0] = '\0';
5729 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5730 libname[sizeof (libname) - 1] = '\0';
5731 if (libname[0] != '\0')
5732 {
5733 /* 6x the size for xml_escape_text below. */
5734 size_t len = 6 * strlen ((char *) libname);
5735 char *name;
5736
5737 if (!header_done)
5738 {
5739 /* Terminate `<library-list-svr4'. */
5740 *p++ = '>';
5741 header_done = 1;
5742 }
5743
5744 while (allocated < p - document + len + 200)
5745 {
5746 /* Expand to guarantee sufficient storage. */
5747 uintptr_t document_len = p - document;
5748
5749 document = xrealloc (document, 2 * allocated);
5750 allocated *= 2;
5751 p = document + document_len;
5752 }
5753
5754 name = xml_escape_text ((char *) libname);
5755 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5756 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5757 name, (unsigned long) lm_addr,
5758 (unsigned long) l_addr, (unsigned long) l_ld);
5759 free (name);
5760 }
5761 else if (lm_prev == 0)
5762 {
5763 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5764 p = p + strlen (p);
5765 }
5766
5767 if (l_next == 0)
5768 break;
5769
5770 lm_prev = lm_addr;
5771 lm_addr = l_next;
5772 }
5773 done:
5774 if (!header_done)
5775 {
5776 /* Empty list; terminate `<library-list-svr4'. */
5777 strcpy (p, "/>");
5778 }
5779 else
5780 strcpy (p, "</library-list-svr4>");
5781 }
5782
5783 document_len = strlen (document);
5784 if (offset < document_len)
5785 document_len -= offset;
5786 else
5787 document_len = 0;
5788 if (len > document_len)
5789 len = document_len;
5790
5791 memcpy (readbuf, document + offset, len);
5792 xfree (document);
5793
5794 return len;
5795 }
5796
5797 static struct target_ops linux_target_ops = {
5798 linux_create_inferior,
5799 linux_attach,
5800 linux_kill,
5801 linux_detach,
5802 linux_mourn,
5803 linux_join,
5804 linux_thread_alive,
5805 linux_resume,
5806 linux_wait,
5807 linux_fetch_registers,
5808 linux_store_registers,
5809 linux_prepare_to_access_memory,
5810 linux_done_accessing_memory,
5811 linux_read_memory,
5812 linux_write_memory,
5813 linux_look_up_symbols,
5814 linux_request_interrupt,
5815 linux_read_auxv,
5816 linux_insert_point,
5817 linux_remove_point,
5818 linux_stopped_by_watchpoint,
5819 linux_stopped_data_address,
5820 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5821 linux_read_offsets,
5822 #else
5823 NULL,
5824 #endif
5825 #ifdef USE_THREAD_DB
5826 thread_db_get_tls_address,
5827 #else
5828 NULL,
5829 #endif
5830 linux_qxfer_spu,
5831 hostio_last_error_from_errno,
5832 linux_qxfer_osdata,
5833 linux_xfer_siginfo,
5834 linux_supports_non_stop,
5835 linux_async,
5836 linux_start_non_stop,
5837 linux_supports_multi_process,
5838 #ifdef USE_THREAD_DB
5839 thread_db_handle_monitor_command,
5840 #else
5841 NULL,
5842 #endif
5843 linux_common_core_of_thread,
5844 linux_read_loadmap,
5845 linux_process_qsupported,
5846 linux_supports_tracepoints,
5847 linux_read_pc,
5848 linux_write_pc,
5849 linux_thread_stopped,
5850 NULL,
5851 linux_pause_all,
5852 linux_unpause_all,
5853 linux_cancel_breakpoints,
5854 linux_stabilize_threads,
5855 linux_install_fast_tracepoint_jump_pad,
5856 linux_emit_ops,
5857 linux_supports_disable_randomization,
5858 linux_get_min_fast_tracepoint_insn_len,
5859 linux_qxfer_libraries_svr4,
5860 linux_supports_agent,
5861 };
5862
5863 static void
5864 linux_init_signals ()
5865 {
5866 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5867 to find what the cancel signal actually is. */
5868 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5869 signal (__SIGRTMIN+1, SIG_IGN);
5870 #endif
5871 }
5872
5873 void
5874 initialize_low (void)
5875 {
5876 struct sigaction sigchld_action;
5877 memset (&sigchld_action, 0, sizeof (sigchld_action));
5878 set_target_ops (&linux_target_ops);
5879 set_breakpoint_data (the_low_target.breakpoint,
5880 the_low_target.breakpoint_len);
5881 linux_init_signals ();
5882 linux_test_for_tracefork ();
5883 linux_ptrace_init_warnings ();
5884 #ifdef HAVE_LINUX_REGSETS
5885 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5886 ;
5887 disabled_regsets = xmalloc (num_regsets);
5888 #endif
5889
5890 sigchld_action.sa_handler = sigchld_handler;
5891 sigemptyset (&sigchld_action.sa_mask);
5892 sigchld_action.sa_flags = SA_RESTART;
5893 sigaction (SIGCHLD, &sigchld_action, NULL);
5894 }
This page took 0.172635 seconds and 4 git commands to generate.