Extraneous NULL in linux_target_ops when HAVE_LINUX_BTRACE not defined
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "gdb_wait.h"
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include "gdb_stat.h"
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 /* PTRACE_TEXT_ADDR and friends. */
82 #include <asm/ptrace.h>
83 #define HAS_NOMMU
84 #endif
85 #endif
86
87 #ifdef HAVE_LINUX_BTRACE
88 # include "linux-btrace.h"
89 #endif
90
91 #ifndef HAVE_ELF32_AUXV_T
92 /* Copied from glibc's elf.h. */
93 typedef struct
94 {
95 uint32_t a_type; /* Entry type */
96 union
97 {
98 uint32_t a_val; /* Integer value */
99 /* We use to have pointer elements added here. We cannot do that,
100 though, since it does not work when using 32-bit definitions
101 on 64-bit platforms and vice versa. */
102 } a_un;
103 } Elf32_auxv_t;
104 #endif
105
106 #ifndef HAVE_ELF64_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
109 {
110 uint64_t a_type; /* Entry type */
111 union
112 {
113 uint64_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf64_auxv_t;
119 #endif
120
121 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
122 representation of the thread ID.
123
124 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
125 the same as the LWP ID.
126
127 ``all_processes'' is keyed by the "overall process ID", which
128 GNU/Linux calls tgid, "thread group ID". */
129
130 struct inferior_list all_lwps;
131
132 /* A list of all unknown processes which receive stop signals. Some
133 other process will presumably claim each of these as forked
134 children momentarily. */
135
136 struct simple_pid_list
137 {
138 /* The process ID. */
139 int pid;
140
141 /* The status as reported by waitpid. */
142 int status;
143
144 /* Next in chain. */
145 struct simple_pid_list *next;
146 };
147 struct simple_pid_list *stopped_pids;
148
149 /* Trivial list manipulation functions to keep track of a list of new
150 stopped processes. */
151
152 static void
153 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
154 {
155 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
156
157 new_pid->pid = pid;
158 new_pid->status = status;
159 new_pid->next = *listp;
160 *listp = new_pid;
161 }
162
163 static int
164 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
165 {
166 struct simple_pid_list **p;
167
168 for (p = listp; *p != NULL; p = &(*p)->next)
169 if ((*p)->pid == pid)
170 {
171 struct simple_pid_list *next = (*p)->next;
172
173 *statusp = (*p)->status;
174 xfree (*p);
175 *p = next;
176 return 1;
177 }
178 return 0;
179 }
180
181 enum stopping_threads_kind
182 {
183 /* Not stopping threads presently. */
184 NOT_STOPPING_THREADS,
185
186 /* Stopping threads. */
187 STOPPING_THREADS,
188
189 /* Stopping and suspending threads. */
190 STOPPING_AND_SUSPENDING_THREADS
191 };
192
193 /* This is set while stop_all_lwps is in effect. */
194 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
195
196 /* FIXME make into a target method? */
197 int using_threads = 1;
198
199 /* True if we're presently stabilizing threads (moving them out of
200 jump pads). */
201 static int stabilizing_threads;
202
203 /* This flag is true iff we've just created or attached to our first
204 inferior but it has not stopped yet. As soon as it does, we need
205 to call the low target's arch_setup callback. Doing this only on
206 the first inferior avoids reinializing the architecture on every
207 inferior, and avoids messing with the register caches of the
208 already running inferiors. NOTE: this assumes all inferiors under
209 control of gdbserver have the same architecture. */
210 static int new_inferior;
211
212 static void linux_resume_one_lwp (struct lwp_info *lwp,
213 int step, int signal, siginfo_t *info);
214 static void linux_resume (struct thread_resume *resume_info, size_t n);
215 static void stop_all_lwps (int suspend, struct lwp_info *except);
216 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static void *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
224 static int kill_lwp (unsigned long lwpid, int signo);
225 static void linux_enable_event_reporting (int pid);
226
227 /* True if the low target can hardware single-step. Such targets
228 don't need a BREAKPOINT_REINSERT_ADDR callback. */
229
230 static int
231 can_hardware_single_step (void)
232 {
233 return (the_low_target.breakpoint_reinsert_addr == NULL);
234 }
235
236 /* True if the low target supports memory breakpoints. If so, we'll
237 have a GET_PC implementation. */
238
239 static int
240 supports_breakpoints (void)
241 {
242 return (the_low_target.get_pc != NULL);
243 }
244
245 /* Returns true if this target can support fast tracepoints. This
246 does not mean that the in-process agent has been loaded in the
247 inferior. */
248
249 static int
250 supports_fast_tracepoints (void)
251 {
252 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
253 }
254
255 struct pending_signals
256 {
257 int signal;
258 siginfo_t info;
259 struct pending_signals *prev;
260 };
261
262 #ifdef HAVE_LINUX_REGSETS
263 static char *disabled_regsets;
264 static int num_regsets;
265 #endif
266
267 /* The read/write ends of the pipe registered as waitable file in the
268 event loop. */
269 static int linux_event_pipe[2] = { -1, -1 };
270
271 /* True if we're currently in async mode. */
272 #define target_is_async_p() (linux_event_pipe[0] != -1)
273
274 static void send_sigstop (struct lwp_info *lwp);
275 static void wait_for_sigstop (struct inferior_list_entry *entry);
276
277 /* Return non-zero if HEADER is a 64-bit ELF file. */
278
279 static int
280 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
281 {
282 if (header->e_ident[EI_MAG0] == ELFMAG0
283 && header->e_ident[EI_MAG1] == ELFMAG1
284 && header->e_ident[EI_MAG2] == ELFMAG2
285 && header->e_ident[EI_MAG3] == ELFMAG3)
286 {
287 *machine = header->e_machine;
288 return header->e_ident[EI_CLASS] == ELFCLASS64;
289
290 }
291 *machine = EM_NONE;
292 return -1;
293 }
294
295 /* Return non-zero if FILE is a 64-bit ELF file,
296 zero if the file is not a 64-bit ELF file,
297 and -1 if the file is not accessible or doesn't exist. */
298
299 static int
300 elf_64_file_p (const char *file, unsigned int *machine)
301 {
302 Elf64_Ehdr header;
303 int fd;
304
305 fd = open (file, O_RDONLY);
306 if (fd < 0)
307 return -1;
308
309 if (read (fd, &header, sizeof (header)) != sizeof (header))
310 {
311 close (fd);
312 return 0;
313 }
314 close (fd);
315
316 return elf_64_header_p (&header, machine);
317 }
318
319 /* Accepts an integer PID; Returns true if the executable PID is
320 running is a 64-bit ELF file.. */
321
322 int
323 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
324 {
325 char file[MAXPATHLEN];
326
327 sprintf (file, "/proc/%d/exe", pid);
328 return elf_64_file_p (file, machine);
329 }
330
331 static void
332 delete_lwp (struct lwp_info *lwp)
333 {
334 remove_thread (get_lwp_thread (lwp));
335 remove_inferior (&all_lwps, &lwp->head);
336 free (lwp->arch_private);
337 free (lwp);
338 }
339
340 /* Add a process to the common process list, and set its private
341 data. */
342
343 static struct process_info *
344 linux_add_process (int pid, int attached)
345 {
346 struct process_info *proc;
347
348 /* Is this the first process? If so, then set the arch. */
349 if (all_processes.head == NULL)
350 new_inferior = 1;
351
352 proc = add_process (pid, attached);
353 proc->private = xcalloc (1, sizeof (*proc->private));
354
355 if (the_low_target.new_process != NULL)
356 proc->private->arch_private = the_low_target.new_process ();
357
358 return proc;
359 }
360
361 /* Wrapper function for waitpid which handles EINTR, and emulates
362 __WALL for systems where that is not available. */
363
364 static int
365 my_waitpid (int pid, int *status, int flags)
366 {
367 int ret, out_errno;
368
369 if (debug_threads)
370 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
371
372 if (flags & __WALL)
373 {
374 sigset_t block_mask, org_mask, wake_mask;
375 int wnohang;
376
377 wnohang = (flags & WNOHANG) != 0;
378 flags &= ~(__WALL | __WCLONE);
379 flags |= WNOHANG;
380
381 /* Block all signals while here. This avoids knowing about
382 LinuxThread's signals. */
383 sigfillset (&block_mask);
384 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
385
386 /* ... except during the sigsuspend below. */
387 sigemptyset (&wake_mask);
388
389 while (1)
390 {
391 /* Since all signals are blocked, there's no need to check
392 for EINTR here. */
393 ret = waitpid (pid, status, flags);
394 out_errno = errno;
395
396 if (ret == -1 && out_errno != ECHILD)
397 break;
398 else if (ret > 0)
399 break;
400
401 if (flags & __WCLONE)
402 {
403 /* We've tried both flavors now. If WNOHANG is set,
404 there's nothing else to do, just bail out. */
405 if (wnohang)
406 break;
407
408 if (debug_threads)
409 fprintf (stderr, "blocking\n");
410
411 /* Block waiting for signals. */
412 sigsuspend (&wake_mask);
413 }
414
415 flags ^= __WCLONE;
416 }
417
418 sigprocmask (SIG_SETMASK, &org_mask, NULL);
419 }
420 else
421 {
422 do
423 ret = waitpid (pid, status, flags);
424 while (ret == -1 && errno == EINTR);
425 out_errno = errno;
426 }
427
428 if (debug_threads)
429 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
430 pid, flags, status ? *status : -1, ret);
431
432 errno = out_errno;
433 return ret;
434 }
435
436 /* Handle a GNU/Linux extended wait response. If we see a clone
437 event, we need to add the new LWP to our list (and not report the
438 trap to higher layers). */
439
440 static void
441 handle_extended_wait (struct lwp_info *event_child, int wstat)
442 {
443 int event = wstat >> 16;
444 struct lwp_info *new_lwp;
445
446 if (event == PTRACE_EVENT_CLONE)
447 {
448 ptid_t ptid;
449 unsigned long new_pid;
450 int ret, status;
451
452 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_ARG3_TYPE) 0,
453 &new_pid);
454
455 /* If we haven't already seen the new PID stop, wait for it now. */
456 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
457 {
458 /* The new child has a pending SIGSTOP. We can't affect it until it
459 hits the SIGSTOP, but we're already attached. */
460
461 ret = my_waitpid (new_pid, &status, __WALL);
462
463 if (ret == -1)
464 perror_with_name ("waiting for new child");
465 else if (ret != new_pid)
466 warning ("wait returned unexpected PID %d", ret);
467 else if (!WIFSTOPPED (status))
468 warning ("wait returned unexpected status 0x%x", status);
469 }
470
471 linux_enable_event_reporting (new_pid);
472
473 ptid = ptid_build (pid_of (event_child), new_pid, 0);
474 new_lwp = (struct lwp_info *) add_lwp (ptid);
475 add_thread (ptid, new_lwp);
476
477 /* Either we're going to immediately resume the new thread
478 or leave it stopped. linux_resume_one_lwp is a nop if it
479 thinks the thread is currently running, so set this first
480 before calling linux_resume_one_lwp. */
481 new_lwp->stopped = 1;
482
483 /* If we're suspending all threads, leave this one suspended
484 too. */
485 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
486 new_lwp->suspended = 1;
487
488 /* Normally we will get the pending SIGSTOP. But in some cases
489 we might get another signal delivered to the group first.
490 If we do get another signal, be sure not to lose it. */
491 if (WSTOPSIG (status) == SIGSTOP)
492 {
493 if (stopping_threads != NOT_STOPPING_THREADS)
494 new_lwp->stop_pc = get_stop_pc (new_lwp);
495 else
496 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
497 }
498 else
499 {
500 new_lwp->stop_expected = 1;
501
502 if (stopping_threads != NOT_STOPPING_THREADS)
503 {
504 new_lwp->stop_pc = get_stop_pc (new_lwp);
505 new_lwp->status_pending_p = 1;
506 new_lwp->status_pending = status;
507 }
508 else
509 /* Pass the signal on. This is what GDB does - except
510 shouldn't we really report it instead? */
511 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
512 }
513
514 /* Always resume the current thread. If we are stopping
515 threads, it will have a pending SIGSTOP; we may as well
516 collect it now. */
517 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
518 }
519 }
520
521 /* Return the PC as read from the regcache of LWP, without any
522 adjustment. */
523
524 static CORE_ADDR
525 get_pc (struct lwp_info *lwp)
526 {
527 struct thread_info *saved_inferior;
528 struct regcache *regcache;
529 CORE_ADDR pc;
530
531 if (the_low_target.get_pc == NULL)
532 return 0;
533
534 saved_inferior = current_inferior;
535 current_inferior = get_lwp_thread (lwp);
536
537 regcache = get_thread_regcache (current_inferior, 1);
538 pc = (*the_low_target.get_pc) (regcache);
539
540 if (debug_threads)
541 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
542
543 current_inferior = saved_inferior;
544 return pc;
545 }
546
547 /* This function should only be called if LWP got a SIGTRAP.
548 The SIGTRAP could mean several things.
549
550 On i386, where decr_pc_after_break is non-zero:
551 If we were single-stepping this process using PTRACE_SINGLESTEP,
552 we will get only the one SIGTRAP (even if the instruction we
553 stepped over was a breakpoint). The value of $eip will be the
554 next instruction.
555 If we continue the process using PTRACE_CONT, we will get a
556 SIGTRAP when we hit a breakpoint. The value of $eip will be
557 the instruction after the breakpoint (i.e. needs to be
558 decremented). If we report the SIGTRAP to GDB, we must also
559 report the undecremented PC. If we cancel the SIGTRAP, we
560 must resume at the decremented PC.
561
562 (Presumably, not yet tested) On a non-decr_pc_after_break machine
563 with hardware or kernel single-step:
564 If we single-step over a breakpoint instruction, our PC will
565 point at the following instruction. If we continue and hit a
566 breakpoint instruction, our PC will point at the breakpoint
567 instruction. */
568
569 static CORE_ADDR
570 get_stop_pc (struct lwp_info *lwp)
571 {
572 CORE_ADDR stop_pc;
573
574 if (the_low_target.get_pc == NULL)
575 return 0;
576
577 stop_pc = get_pc (lwp);
578
579 if (WSTOPSIG (lwp->last_status) == SIGTRAP
580 && !lwp->stepping
581 && !lwp->stopped_by_watchpoint
582 && lwp->last_status >> 16 == 0)
583 stop_pc -= the_low_target.decr_pc_after_break;
584
585 if (debug_threads)
586 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
587
588 return stop_pc;
589 }
590
591 static void *
592 add_lwp (ptid_t ptid)
593 {
594 struct lwp_info *lwp;
595
596 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
597 memset (lwp, 0, sizeof (*lwp));
598
599 lwp->head.id = ptid;
600
601 if (the_low_target.new_thread != NULL)
602 lwp->arch_private = the_low_target.new_thread ();
603
604 add_inferior_to_list (&all_lwps, &lwp->head);
605
606 return lwp;
607 }
608
609 /* Start an inferior process and returns its pid.
610 ALLARGS is a vector of program-name and args. */
611
612 static int
613 linux_create_inferior (char *program, char **allargs)
614 {
615 #ifdef HAVE_PERSONALITY
616 int personality_orig = 0, personality_set = 0;
617 #endif
618 struct lwp_info *new_lwp;
619 int pid;
620 ptid_t ptid;
621
622 #ifdef HAVE_PERSONALITY
623 if (disable_randomization)
624 {
625 errno = 0;
626 personality_orig = personality (0xffffffff);
627 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
628 {
629 personality_set = 1;
630 personality (personality_orig | ADDR_NO_RANDOMIZE);
631 }
632 if (errno != 0 || (personality_set
633 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
634 warning ("Error disabling address space randomization: %s",
635 strerror (errno));
636 }
637 #endif
638
639 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
640 pid = vfork ();
641 #else
642 pid = fork ();
643 #endif
644 if (pid < 0)
645 perror_with_name ("fork");
646
647 if (pid == 0)
648 {
649 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
650
651 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
652 signal (__SIGRTMIN + 1, SIG_DFL);
653 #endif
654
655 setpgid (0, 0);
656
657 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
658 stdout to stderr so that inferior i/o doesn't corrupt the connection.
659 Also, redirect stdin to /dev/null. */
660 if (remote_connection_is_stdio ())
661 {
662 close (0);
663 open ("/dev/null", O_RDONLY);
664 dup2 (2, 1);
665 if (write (2, "stdin/stdout redirected\n",
666 sizeof ("stdin/stdout redirected\n") - 1) < 0)
667 {
668 /* Errors ignored. */;
669 }
670 }
671
672 execv (program, allargs);
673 if (errno == ENOENT)
674 execvp (program, allargs);
675
676 fprintf (stderr, "Cannot exec %s: %s.\n", program,
677 strerror (errno));
678 fflush (stderr);
679 _exit (0177);
680 }
681
682 #ifdef HAVE_PERSONALITY
683 if (personality_set)
684 {
685 errno = 0;
686 personality (personality_orig);
687 if (errno != 0)
688 warning ("Error restoring address space randomization: %s",
689 strerror (errno));
690 }
691 #endif
692
693 linux_add_process (pid, 0);
694
695 ptid = ptid_build (pid, pid, 0);
696 new_lwp = add_lwp (ptid);
697 add_thread (ptid, new_lwp);
698 new_lwp->must_set_ptrace_flags = 1;
699
700 return pid;
701 }
702
703 /* Attach to an inferior process. */
704
705 static void
706 linux_attach_lwp_1 (unsigned long lwpid, int initial)
707 {
708 ptid_t ptid;
709 struct lwp_info *new_lwp;
710
711 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0)
712 != 0)
713 {
714 struct buffer buffer;
715
716 if (!initial)
717 {
718 /* If we fail to attach to an LWP, just warn. */
719 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
720 strerror (errno), errno);
721 fflush (stderr);
722 return;
723 }
724
725 /* If we fail to attach to a process, report an error. */
726 buffer_init (&buffer);
727 linux_ptrace_attach_warnings (lwpid, &buffer);
728 buffer_grow_str0 (&buffer, "");
729 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
730 lwpid, strerror (errno), errno);
731 }
732
733 if (initial)
734 /* If lwp is the tgid, we handle adding existing threads later.
735 Otherwise we just add lwp without bothering about any other
736 threads. */
737 ptid = ptid_build (lwpid, lwpid, 0);
738 else
739 {
740 /* Note that extracting the pid from the current inferior is
741 safe, since we're always called in the context of the same
742 process as this new thread. */
743 int pid = pid_of (get_thread_lwp (current_inferior));
744 ptid = ptid_build (pid, lwpid, 0);
745 }
746
747 new_lwp = (struct lwp_info *) add_lwp (ptid);
748 add_thread (ptid, new_lwp);
749
750 /* We need to wait for SIGSTOP before being able to make the next
751 ptrace call on this LWP. */
752 new_lwp->must_set_ptrace_flags = 1;
753
754 if (linux_proc_pid_is_stopped (lwpid))
755 {
756 if (debug_threads)
757 fprintf (stderr,
758 "Attached to a stopped process\n");
759
760 /* The process is definitely stopped. It is in a job control
761 stop, unless the kernel predates the TASK_STOPPED /
762 TASK_TRACED distinction, in which case it might be in a
763 ptrace stop. Make sure it is in a ptrace stop; from there we
764 can kill it, signal it, et cetera.
765
766 First make sure there is a pending SIGSTOP. Since we are
767 already attached, the process can not transition from stopped
768 to running without a PTRACE_CONT; so we know this signal will
769 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
770 probably already in the queue (unless this kernel is old
771 enough to use TASK_STOPPED for ptrace stops); but since
772 SIGSTOP is not an RT signal, it can only be queued once. */
773 kill_lwp (lwpid, SIGSTOP);
774
775 /* Finally, resume the stopped process. This will deliver the
776 SIGSTOP (or a higher priority signal, just like normal
777 PTRACE_ATTACH), which we'll catch later on. */
778 ptrace (PTRACE_CONT, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
779 }
780
781 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
782 brings it to a halt.
783
784 There are several cases to consider here:
785
786 1) gdbserver has already attached to the process and is being notified
787 of a new thread that is being created.
788 In this case we should ignore that SIGSTOP and resume the
789 process. This is handled below by setting stop_expected = 1,
790 and the fact that add_thread sets last_resume_kind ==
791 resume_continue.
792
793 2) This is the first thread (the process thread), and we're attaching
794 to it via attach_inferior.
795 In this case we want the process thread to stop.
796 This is handled by having linux_attach set last_resume_kind ==
797 resume_stop after we return.
798
799 If the pid we are attaching to is also the tgid, we attach to and
800 stop all the existing threads. Otherwise, we attach to pid and
801 ignore any other threads in the same group as this pid.
802
803 3) GDB is connecting to gdbserver and is requesting an enumeration of all
804 existing threads.
805 In this case we want the thread to stop.
806 FIXME: This case is currently not properly handled.
807 We should wait for the SIGSTOP but don't. Things work apparently
808 because enough time passes between when we ptrace (ATTACH) and when
809 gdb makes the next ptrace call on the thread.
810
811 On the other hand, if we are currently trying to stop all threads, we
812 should treat the new thread as if we had sent it a SIGSTOP. This works
813 because we are guaranteed that the add_lwp call above added us to the
814 end of the list, and so the new thread has not yet reached
815 wait_for_sigstop (but will). */
816 new_lwp->stop_expected = 1;
817 }
818
819 void
820 linux_attach_lwp (unsigned long lwpid)
821 {
822 linux_attach_lwp_1 (lwpid, 0);
823 }
824
825 /* Attach to PID. If PID is the tgid, attach to it and all
826 of its threads. */
827
828 static int
829 linux_attach (unsigned long pid)
830 {
831 /* Attach to PID. We will check for other threads
832 soon. */
833 linux_attach_lwp_1 (pid, 1);
834 linux_add_process (pid, 1);
835
836 if (!non_stop)
837 {
838 struct thread_info *thread;
839
840 /* Don't ignore the initial SIGSTOP if we just attached to this
841 process. It will be collected by wait shortly. */
842 thread = find_thread_ptid (ptid_build (pid, pid, 0));
843 thread->last_resume_kind = resume_stop;
844 }
845
846 if (linux_proc_get_tgid (pid) == pid)
847 {
848 DIR *dir;
849 char pathname[128];
850
851 sprintf (pathname, "/proc/%ld/task", pid);
852
853 dir = opendir (pathname);
854
855 if (!dir)
856 {
857 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
858 fflush (stderr);
859 }
860 else
861 {
862 /* At this point we attached to the tgid. Scan the task for
863 existing threads. */
864 unsigned long lwp;
865 int new_threads_found;
866 int iterations = 0;
867 struct dirent *dp;
868
869 while (iterations < 2)
870 {
871 new_threads_found = 0;
872 /* Add all the other threads. While we go through the
873 threads, new threads may be spawned. Cycle through
874 the list of threads until we have done two iterations without
875 finding new threads. */
876 while ((dp = readdir (dir)) != NULL)
877 {
878 /* Fetch one lwp. */
879 lwp = strtoul (dp->d_name, NULL, 10);
880
881 /* Is this a new thread? */
882 if (lwp
883 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
884 {
885 linux_attach_lwp_1 (lwp, 0);
886 new_threads_found++;
887
888 if (debug_threads)
889 fprintf (stderr, "\
890 Found and attached to new lwp %ld\n", lwp);
891 }
892 }
893
894 if (!new_threads_found)
895 iterations++;
896 else
897 iterations = 0;
898
899 rewinddir (dir);
900 }
901 closedir (dir);
902 }
903 }
904
905 return 0;
906 }
907
908 struct counter
909 {
910 int pid;
911 int count;
912 };
913
914 static int
915 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
916 {
917 struct counter *counter = args;
918
919 if (ptid_get_pid (entry->id) == counter->pid)
920 {
921 if (++counter->count > 1)
922 return 1;
923 }
924
925 return 0;
926 }
927
928 static int
929 last_thread_of_process_p (struct thread_info *thread)
930 {
931 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
932 int pid = ptid_get_pid (ptid);
933 struct counter counter = { pid , 0 };
934
935 return (find_inferior (&all_threads,
936 second_thread_of_pid_p, &counter) == NULL);
937 }
938
939 /* Kill LWP. */
940
941 static void
942 linux_kill_one_lwp (struct lwp_info *lwp)
943 {
944 int pid = lwpid_of (lwp);
945
946 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
947 there is no signal context, and ptrace(PTRACE_KILL) (or
948 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
949 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
950 alternative is to kill with SIGKILL. We only need one SIGKILL
951 per process, not one for each thread. But since we still support
952 linuxthreads, and we also support debugging programs using raw
953 clone without CLONE_THREAD, we send one for each thread. For
954 years, we used PTRACE_KILL only, so we're being a bit paranoid
955 about some old kernels where PTRACE_KILL might work better
956 (dubious if there are any such, but that's why it's paranoia), so
957 we try SIGKILL first, PTRACE_KILL second, and so we're fine
958 everywhere. */
959
960 errno = 0;
961 kill (pid, SIGKILL);
962 if (debug_threads)
963 fprintf (stderr,
964 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
965 target_pid_to_str (ptid_of (lwp)),
966 errno ? strerror (errno) : "OK");
967
968 errno = 0;
969 ptrace (PTRACE_KILL, pid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
970 if (debug_threads)
971 fprintf (stderr,
972 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
973 target_pid_to_str (ptid_of (lwp)),
974 errno ? strerror (errno) : "OK");
975 }
976
977 /* Callback for `find_inferior'. Kills an lwp of a given process,
978 except the leader. */
979
980 static int
981 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
982 {
983 struct thread_info *thread = (struct thread_info *) entry;
984 struct lwp_info *lwp = get_thread_lwp (thread);
985 int wstat;
986 int pid = * (int *) args;
987
988 if (ptid_get_pid (entry->id) != pid)
989 return 0;
990
991 /* We avoid killing the first thread here, because of a Linux kernel (at
992 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
993 the children get a chance to be reaped, it will remain a zombie
994 forever. */
995
996 if (lwpid_of (lwp) == pid)
997 {
998 if (debug_threads)
999 fprintf (stderr, "lkop: is last of process %s\n",
1000 target_pid_to_str (entry->id));
1001 return 0;
1002 }
1003
1004 do
1005 {
1006 linux_kill_one_lwp (lwp);
1007
1008 /* Make sure it died. The loop is most likely unnecessary. */
1009 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1010 } while (pid > 0 && WIFSTOPPED (wstat));
1011
1012 return 0;
1013 }
1014
1015 static int
1016 linux_kill (int pid)
1017 {
1018 struct process_info *process;
1019 struct lwp_info *lwp;
1020 int wstat;
1021 int lwpid;
1022
1023 process = find_process_pid (pid);
1024 if (process == NULL)
1025 return -1;
1026
1027 /* If we're killing a running inferior, make sure it is stopped
1028 first, as PTRACE_KILL will not work otherwise. */
1029 stop_all_lwps (0, NULL);
1030
1031 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1032
1033 /* See the comment in linux_kill_one_lwp. We did not kill the first
1034 thread in the list, so do so now. */
1035 lwp = find_lwp_pid (pid_to_ptid (pid));
1036
1037 if (lwp == NULL)
1038 {
1039 if (debug_threads)
1040 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1041 lwpid_of (lwp), pid);
1042 }
1043 else
1044 {
1045 if (debug_threads)
1046 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1047 lwpid_of (lwp), pid);
1048
1049 do
1050 {
1051 linux_kill_one_lwp (lwp);
1052
1053 /* Make sure it died. The loop is most likely unnecessary. */
1054 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1055 } while (lwpid > 0 && WIFSTOPPED (wstat));
1056 }
1057
1058 the_target->mourn (process);
1059
1060 /* Since we presently can only stop all lwps of all processes, we
1061 need to unstop lwps of other processes. */
1062 unstop_all_lwps (0, NULL);
1063 return 0;
1064 }
1065
1066 /* Get pending signal of THREAD, for detaching purposes. This is the
1067 signal the thread last stopped for, which we need to deliver to the
1068 thread when detaching, otherwise, it'd be suppressed/lost. */
1069
1070 static int
1071 get_detach_signal (struct thread_info *thread)
1072 {
1073 enum gdb_signal signo = GDB_SIGNAL_0;
1074 int status;
1075 struct lwp_info *lp = get_thread_lwp (thread);
1076
1077 if (lp->status_pending_p)
1078 status = lp->status_pending;
1079 else
1080 {
1081 /* If the thread had been suspended by gdbserver, and it stopped
1082 cleanly, then it'll have stopped with SIGSTOP. But we don't
1083 want to deliver that SIGSTOP. */
1084 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1085 || thread->last_status.value.sig == GDB_SIGNAL_0)
1086 return 0;
1087
1088 /* Otherwise, we may need to deliver the signal we
1089 intercepted. */
1090 status = lp->last_status;
1091 }
1092
1093 if (!WIFSTOPPED (status))
1094 {
1095 if (debug_threads)
1096 fprintf (stderr,
1097 "GPS: lwp %s hasn't stopped: no pending signal\n",
1098 target_pid_to_str (ptid_of (lp)));
1099 return 0;
1100 }
1101
1102 /* Extended wait statuses aren't real SIGTRAPs. */
1103 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1104 {
1105 if (debug_threads)
1106 fprintf (stderr,
1107 "GPS: lwp %s had stopped with extended "
1108 "status: no pending signal\n",
1109 target_pid_to_str (ptid_of (lp)));
1110 return 0;
1111 }
1112
1113 signo = gdb_signal_from_host (WSTOPSIG (status));
1114
1115 if (program_signals_p && !program_signals[signo])
1116 {
1117 if (debug_threads)
1118 fprintf (stderr,
1119 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1120 target_pid_to_str (ptid_of (lp)),
1121 gdb_signal_to_string (signo));
1122 return 0;
1123 }
1124 else if (!program_signals_p
1125 /* If we have no way to know which signals GDB does not
1126 want to have passed to the program, assume
1127 SIGTRAP/SIGINT, which is GDB's default. */
1128 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1129 {
1130 if (debug_threads)
1131 fprintf (stderr,
1132 "GPS: lwp %s had signal %s, "
1133 "but we don't know if we should pass it. Default to not.\n",
1134 target_pid_to_str (ptid_of (lp)),
1135 gdb_signal_to_string (signo));
1136 return 0;
1137 }
1138 else
1139 {
1140 if (debug_threads)
1141 fprintf (stderr,
1142 "GPS: lwp %s has pending signal %s: delivering it.\n",
1143 target_pid_to_str (ptid_of (lp)),
1144 gdb_signal_to_string (signo));
1145
1146 return WSTOPSIG (status);
1147 }
1148 }
1149
1150 static int
1151 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1152 {
1153 struct thread_info *thread = (struct thread_info *) entry;
1154 struct lwp_info *lwp = get_thread_lwp (thread);
1155 int pid = * (int *) args;
1156 int sig;
1157
1158 if (ptid_get_pid (entry->id) != pid)
1159 return 0;
1160
1161 /* If there is a pending SIGSTOP, get rid of it. */
1162 if (lwp->stop_expected)
1163 {
1164 if (debug_threads)
1165 fprintf (stderr,
1166 "Sending SIGCONT to %s\n",
1167 target_pid_to_str (ptid_of (lwp)));
1168
1169 kill_lwp (lwpid_of (lwp), SIGCONT);
1170 lwp->stop_expected = 0;
1171 }
1172
1173 /* Flush any pending changes to the process's registers. */
1174 regcache_invalidate_one ((struct inferior_list_entry *)
1175 get_lwp_thread (lwp));
1176
1177 /* Pass on any pending signal for this thread. */
1178 sig = get_detach_signal (thread);
1179
1180 /* Finally, let it resume. */
1181 if (the_low_target.prepare_to_resume != NULL)
1182 the_low_target.prepare_to_resume (lwp);
1183 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1184 (PTRACE_ARG4_TYPE) (long) sig) < 0)
1185 error (_("Can't detach %s: %s"),
1186 target_pid_to_str (ptid_of (lwp)),
1187 strerror (errno));
1188
1189 delete_lwp (lwp);
1190 return 0;
1191 }
1192
1193 static int
1194 linux_detach (int pid)
1195 {
1196 struct process_info *process;
1197
1198 process = find_process_pid (pid);
1199 if (process == NULL)
1200 return -1;
1201
1202 /* Stop all threads before detaching. First, ptrace requires that
1203 the thread is stopped to sucessfully detach. Second, thread_db
1204 may need to uninstall thread event breakpoints from memory, which
1205 only works with a stopped process anyway. */
1206 stop_all_lwps (0, NULL);
1207
1208 #ifdef USE_THREAD_DB
1209 thread_db_detach (process);
1210 #endif
1211
1212 /* Stabilize threads (move out of jump pads). */
1213 stabilize_threads ();
1214
1215 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1216
1217 the_target->mourn (process);
1218
1219 /* Since we presently can only stop all lwps of all processes, we
1220 need to unstop lwps of other processes. */
1221 unstop_all_lwps (0, NULL);
1222 return 0;
1223 }
1224
1225 /* Remove all LWPs that belong to process PROC from the lwp list. */
1226
1227 static int
1228 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1229 {
1230 struct lwp_info *lwp = (struct lwp_info *) entry;
1231 struct process_info *process = proc;
1232
1233 if (pid_of (lwp) == pid_of (process))
1234 delete_lwp (lwp);
1235
1236 return 0;
1237 }
1238
1239 static void
1240 linux_mourn (struct process_info *process)
1241 {
1242 struct process_info_private *priv;
1243
1244 #ifdef USE_THREAD_DB
1245 thread_db_mourn (process);
1246 #endif
1247
1248 find_inferior (&all_lwps, delete_lwp_callback, process);
1249
1250 /* Freeing all private data. */
1251 priv = process->private;
1252 free (priv->arch_private);
1253 free (priv);
1254 process->private = NULL;
1255
1256 remove_process (process);
1257 }
1258
1259 static void
1260 linux_join (int pid)
1261 {
1262 int status, ret;
1263
1264 do {
1265 ret = my_waitpid (pid, &status, 0);
1266 if (WIFEXITED (status) || WIFSIGNALED (status))
1267 break;
1268 } while (ret != -1 || errno != ECHILD);
1269 }
1270
1271 /* Return nonzero if the given thread is still alive. */
1272 static int
1273 linux_thread_alive (ptid_t ptid)
1274 {
1275 struct lwp_info *lwp = find_lwp_pid (ptid);
1276
1277 /* We assume we always know if a thread exits. If a whole process
1278 exited but we still haven't been able to report it to GDB, we'll
1279 hold on to the last lwp of the dead process. */
1280 if (lwp != NULL)
1281 return !lwp->dead;
1282 else
1283 return 0;
1284 }
1285
1286 /* Return 1 if this lwp has an interesting status pending. */
1287 static int
1288 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1289 {
1290 struct lwp_info *lwp = (struct lwp_info *) entry;
1291 ptid_t ptid = * (ptid_t *) arg;
1292 struct thread_info *thread;
1293
1294 /* Check if we're only interested in events from a specific process
1295 or its lwps. */
1296 if (!ptid_equal (minus_one_ptid, ptid)
1297 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1298 return 0;
1299
1300 thread = get_lwp_thread (lwp);
1301
1302 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1303 report any status pending the LWP may have. */
1304 if (thread->last_resume_kind == resume_stop
1305 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1306 return 0;
1307
1308 return lwp->status_pending_p;
1309 }
1310
1311 static int
1312 same_lwp (struct inferior_list_entry *entry, void *data)
1313 {
1314 ptid_t ptid = *(ptid_t *) data;
1315 int lwp;
1316
1317 if (ptid_get_lwp (ptid) != 0)
1318 lwp = ptid_get_lwp (ptid);
1319 else
1320 lwp = ptid_get_pid (ptid);
1321
1322 if (ptid_get_lwp (entry->id) == lwp)
1323 return 1;
1324
1325 return 0;
1326 }
1327
1328 struct lwp_info *
1329 find_lwp_pid (ptid_t ptid)
1330 {
1331 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1332 }
1333
1334 static struct lwp_info *
1335 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1336 {
1337 int ret;
1338 int to_wait_for = -1;
1339 struct lwp_info *child = NULL;
1340
1341 if (debug_threads)
1342 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1343
1344 if (ptid_equal (ptid, minus_one_ptid))
1345 to_wait_for = -1; /* any child */
1346 else
1347 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1348
1349 options |= __WALL;
1350
1351 retry:
1352
1353 ret = my_waitpid (to_wait_for, wstatp, options);
1354 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1355 return NULL;
1356 else if (ret == -1)
1357 perror_with_name ("waitpid");
1358
1359 if (debug_threads
1360 && (!WIFSTOPPED (*wstatp)
1361 || (WSTOPSIG (*wstatp) != 32
1362 && WSTOPSIG (*wstatp) != 33)))
1363 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1364
1365 child = find_lwp_pid (pid_to_ptid (ret));
1366
1367 /* If we didn't find a process, one of two things presumably happened:
1368 - A process we started and then detached from has exited. Ignore it.
1369 - A process we are controlling has forked and the new child's stop
1370 was reported to us by the kernel. Save its PID. */
1371 if (child == NULL && WIFSTOPPED (*wstatp))
1372 {
1373 add_to_pid_list (&stopped_pids, ret, *wstatp);
1374 goto retry;
1375 }
1376 else if (child == NULL)
1377 goto retry;
1378
1379 child->stopped = 1;
1380
1381 child->last_status = *wstatp;
1382
1383 /* Architecture-specific setup after inferior is running.
1384 This needs to happen after we have attached to the inferior
1385 and it is stopped for the first time, but before we access
1386 any inferior registers. */
1387 if (new_inferior)
1388 {
1389 the_low_target.arch_setup ();
1390 #ifdef HAVE_LINUX_REGSETS
1391 memset (disabled_regsets, 0, num_regsets);
1392 #endif
1393 new_inferior = 0;
1394 }
1395
1396 /* Fetch the possibly triggered data watchpoint info and store it in
1397 CHILD.
1398
1399 On some archs, like x86, that use debug registers to set
1400 watchpoints, it's possible that the way to know which watched
1401 address trapped, is to check the register that is used to select
1402 which address to watch. Problem is, between setting the
1403 watchpoint and reading back which data address trapped, the user
1404 may change the set of watchpoints, and, as a consequence, GDB
1405 changes the debug registers in the inferior. To avoid reading
1406 back a stale stopped-data-address when that happens, we cache in
1407 LP the fact that a watchpoint trapped, and the corresponding data
1408 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1409 changes the debug registers meanwhile, we have the cached data we
1410 can rely on. */
1411
1412 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1413 {
1414 if (the_low_target.stopped_by_watchpoint == NULL)
1415 {
1416 child->stopped_by_watchpoint = 0;
1417 }
1418 else
1419 {
1420 struct thread_info *saved_inferior;
1421
1422 saved_inferior = current_inferior;
1423 current_inferior = get_lwp_thread (child);
1424
1425 child->stopped_by_watchpoint
1426 = the_low_target.stopped_by_watchpoint ();
1427
1428 if (child->stopped_by_watchpoint)
1429 {
1430 if (the_low_target.stopped_data_address != NULL)
1431 child->stopped_data_address
1432 = the_low_target.stopped_data_address ();
1433 else
1434 child->stopped_data_address = 0;
1435 }
1436
1437 current_inferior = saved_inferior;
1438 }
1439 }
1440
1441 /* Store the STOP_PC, with adjustment applied. This depends on the
1442 architecture being defined already (so that CHILD has a valid
1443 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1444 not). */
1445 if (WIFSTOPPED (*wstatp))
1446 child->stop_pc = get_stop_pc (child);
1447
1448 if (debug_threads
1449 && WIFSTOPPED (*wstatp)
1450 && the_low_target.get_pc != NULL)
1451 {
1452 struct thread_info *saved_inferior = current_inferior;
1453 struct regcache *regcache;
1454 CORE_ADDR pc;
1455
1456 current_inferior = get_lwp_thread (child);
1457 regcache = get_thread_regcache (current_inferior, 1);
1458 pc = (*the_low_target.get_pc) (regcache);
1459 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1460 current_inferior = saved_inferior;
1461 }
1462
1463 return child;
1464 }
1465
1466 /* This function should only be called if the LWP got a SIGTRAP.
1467
1468 Handle any tracepoint steps or hits. Return true if a tracepoint
1469 event was handled, 0 otherwise. */
1470
1471 static int
1472 handle_tracepoints (struct lwp_info *lwp)
1473 {
1474 struct thread_info *tinfo = get_lwp_thread (lwp);
1475 int tpoint_related_event = 0;
1476
1477 /* If this tracepoint hit causes a tracing stop, we'll immediately
1478 uninsert tracepoints. To do this, we temporarily pause all
1479 threads, unpatch away, and then unpause threads. We need to make
1480 sure the unpausing doesn't resume LWP too. */
1481 lwp->suspended++;
1482
1483 /* And we need to be sure that any all-threads-stopping doesn't try
1484 to move threads out of the jump pads, as it could deadlock the
1485 inferior (LWP could be in the jump pad, maybe even holding the
1486 lock.) */
1487
1488 /* Do any necessary step collect actions. */
1489 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1490
1491 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1492
1493 /* See if we just hit a tracepoint and do its main collect
1494 actions. */
1495 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1496
1497 lwp->suspended--;
1498
1499 gdb_assert (lwp->suspended == 0);
1500 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1501
1502 if (tpoint_related_event)
1503 {
1504 if (debug_threads)
1505 fprintf (stderr, "got a tracepoint event\n");
1506 return 1;
1507 }
1508
1509 return 0;
1510 }
1511
1512 /* Convenience wrapper. Returns true if LWP is presently collecting a
1513 fast tracepoint. */
1514
1515 static int
1516 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1517 struct fast_tpoint_collect_status *status)
1518 {
1519 CORE_ADDR thread_area;
1520
1521 if (the_low_target.get_thread_area == NULL)
1522 return 0;
1523
1524 /* Get the thread area address. This is used to recognize which
1525 thread is which when tracing with the in-process agent library.
1526 We don't read anything from the address, and treat it as opaque;
1527 it's the address itself that we assume is unique per-thread. */
1528 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1529 return 0;
1530
1531 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1532 }
1533
1534 /* The reason we resume in the caller, is because we want to be able
1535 to pass lwp->status_pending as WSTAT, and we need to clear
1536 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1537 refuses to resume. */
1538
1539 static int
1540 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1541 {
1542 struct thread_info *saved_inferior;
1543
1544 saved_inferior = current_inferior;
1545 current_inferior = get_lwp_thread (lwp);
1546
1547 if ((wstat == NULL
1548 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1549 && supports_fast_tracepoints ()
1550 && agent_loaded_p ())
1551 {
1552 struct fast_tpoint_collect_status status;
1553 int r;
1554
1555 if (debug_threads)
1556 fprintf (stderr, "\
1557 Checking whether LWP %ld needs to move out of the jump pad.\n",
1558 lwpid_of (lwp));
1559
1560 r = linux_fast_tracepoint_collecting (lwp, &status);
1561
1562 if (wstat == NULL
1563 || (WSTOPSIG (*wstat) != SIGILL
1564 && WSTOPSIG (*wstat) != SIGFPE
1565 && WSTOPSIG (*wstat) != SIGSEGV
1566 && WSTOPSIG (*wstat) != SIGBUS))
1567 {
1568 lwp->collecting_fast_tracepoint = r;
1569
1570 if (r != 0)
1571 {
1572 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1573 {
1574 /* Haven't executed the original instruction yet.
1575 Set breakpoint there, and wait till it's hit,
1576 then single-step until exiting the jump pad. */
1577 lwp->exit_jump_pad_bkpt
1578 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1579 }
1580
1581 if (debug_threads)
1582 fprintf (stderr, "\
1583 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1584 lwpid_of (lwp));
1585 current_inferior = saved_inferior;
1586
1587 return 1;
1588 }
1589 }
1590 else
1591 {
1592 /* If we get a synchronous signal while collecting, *and*
1593 while executing the (relocated) original instruction,
1594 reset the PC to point at the tpoint address, before
1595 reporting to GDB. Otherwise, it's an IPA lib bug: just
1596 report the signal to GDB, and pray for the best. */
1597
1598 lwp->collecting_fast_tracepoint = 0;
1599
1600 if (r != 0
1601 && (status.adjusted_insn_addr <= lwp->stop_pc
1602 && lwp->stop_pc < status.adjusted_insn_addr_end))
1603 {
1604 siginfo_t info;
1605 struct regcache *regcache;
1606
1607 /* The si_addr on a few signals references the address
1608 of the faulting instruction. Adjust that as
1609 well. */
1610 if ((WSTOPSIG (*wstat) == SIGILL
1611 || WSTOPSIG (*wstat) == SIGFPE
1612 || WSTOPSIG (*wstat) == SIGBUS
1613 || WSTOPSIG (*wstat) == SIGSEGV)
1614 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1615 (PTRACE_ARG3_TYPE) 0, &info) == 0
1616 /* Final check just to make sure we don't clobber
1617 the siginfo of non-kernel-sent signals. */
1618 && (uintptr_t) info.si_addr == lwp->stop_pc)
1619 {
1620 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1621 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1622 (PTRACE_ARG3_TYPE) 0, &info);
1623 }
1624
1625 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1626 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1627 lwp->stop_pc = status.tpoint_addr;
1628
1629 /* Cancel any fast tracepoint lock this thread was
1630 holding. */
1631 force_unlock_trace_buffer ();
1632 }
1633
1634 if (lwp->exit_jump_pad_bkpt != NULL)
1635 {
1636 if (debug_threads)
1637 fprintf (stderr,
1638 "Cancelling fast exit-jump-pad: removing bkpt. "
1639 "stopping all threads momentarily.\n");
1640
1641 stop_all_lwps (1, lwp);
1642 cancel_breakpoints ();
1643
1644 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1645 lwp->exit_jump_pad_bkpt = NULL;
1646
1647 unstop_all_lwps (1, lwp);
1648
1649 gdb_assert (lwp->suspended >= 0);
1650 }
1651 }
1652 }
1653
1654 if (debug_threads)
1655 fprintf (stderr, "\
1656 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1657 lwpid_of (lwp));
1658
1659 current_inferior = saved_inferior;
1660 return 0;
1661 }
1662
1663 /* Enqueue one signal in the "signals to report later when out of the
1664 jump pad" list. */
1665
1666 static void
1667 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1668 {
1669 struct pending_signals *p_sig;
1670
1671 if (debug_threads)
1672 fprintf (stderr, "\
1673 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1674
1675 if (debug_threads)
1676 {
1677 struct pending_signals *sig;
1678
1679 for (sig = lwp->pending_signals_to_report;
1680 sig != NULL;
1681 sig = sig->prev)
1682 fprintf (stderr,
1683 " Already queued %d\n",
1684 sig->signal);
1685
1686 fprintf (stderr, " (no more currently queued signals)\n");
1687 }
1688
1689 /* Don't enqueue non-RT signals if they are already in the deferred
1690 queue. (SIGSTOP being the easiest signal to see ending up here
1691 twice) */
1692 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1693 {
1694 struct pending_signals *sig;
1695
1696 for (sig = lwp->pending_signals_to_report;
1697 sig != NULL;
1698 sig = sig->prev)
1699 {
1700 if (sig->signal == WSTOPSIG (*wstat))
1701 {
1702 if (debug_threads)
1703 fprintf (stderr,
1704 "Not requeuing already queued non-RT signal %d"
1705 " for LWP %ld\n",
1706 sig->signal,
1707 lwpid_of (lwp));
1708 return;
1709 }
1710 }
1711 }
1712
1713 p_sig = xmalloc (sizeof (*p_sig));
1714 p_sig->prev = lwp->pending_signals_to_report;
1715 p_sig->signal = WSTOPSIG (*wstat);
1716 memset (&p_sig->info, 0, sizeof (siginfo_t));
1717 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1718 &p_sig->info);
1719
1720 lwp->pending_signals_to_report = p_sig;
1721 }
1722
1723 /* Dequeue one signal from the "signals to report later when out of
1724 the jump pad" list. */
1725
1726 static int
1727 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1728 {
1729 if (lwp->pending_signals_to_report != NULL)
1730 {
1731 struct pending_signals **p_sig;
1732
1733 p_sig = &lwp->pending_signals_to_report;
1734 while ((*p_sig)->prev != NULL)
1735 p_sig = &(*p_sig)->prev;
1736
1737 *wstat = W_STOPCODE ((*p_sig)->signal);
1738 if ((*p_sig)->info.si_signo != 0)
1739 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1740 &(*p_sig)->info);
1741 free (*p_sig);
1742 *p_sig = NULL;
1743
1744 if (debug_threads)
1745 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1746 WSTOPSIG (*wstat), lwpid_of (lwp));
1747
1748 if (debug_threads)
1749 {
1750 struct pending_signals *sig;
1751
1752 for (sig = lwp->pending_signals_to_report;
1753 sig != NULL;
1754 sig = sig->prev)
1755 fprintf (stderr,
1756 " Still queued %d\n",
1757 sig->signal);
1758
1759 fprintf (stderr, " (no more queued signals)\n");
1760 }
1761
1762 return 1;
1763 }
1764
1765 return 0;
1766 }
1767
1768 /* Arrange for a breakpoint to be hit again later. We don't keep the
1769 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1770 will handle the current event, eventually we will resume this LWP,
1771 and this breakpoint will trap again. */
1772
1773 static int
1774 cancel_breakpoint (struct lwp_info *lwp)
1775 {
1776 struct thread_info *saved_inferior;
1777
1778 /* There's nothing to do if we don't support breakpoints. */
1779 if (!supports_breakpoints ())
1780 return 0;
1781
1782 /* breakpoint_at reads from current inferior. */
1783 saved_inferior = current_inferior;
1784 current_inferior = get_lwp_thread (lwp);
1785
1786 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1787 {
1788 if (debug_threads)
1789 fprintf (stderr,
1790 "CB: Push back breakpoint for %s\n",
1791 target_pid_to_str (ptid_of (lwp)));
1792
1793 /* Back up the PC if necessary. */
1794 if (the_low_target.decr_pc_after_break)
1795 {
1796 struct regcache *regcache
1797 = get_thread_regcache (current_inferior, 1);
1798 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1799 }
1800
1801 current_inferior = saved_inferior;
1802 return 1;
1803 }
1804 else
1805 {
1806 if (debug_threads)
1807 fprintf (stderr,
1808 "CB: No breakpoint found at %s for [%s]\n",
1809 paddress (lwp->stop_pc),
1810 target_pid_to_str (ptid_of (lwp)));
1811 }
1812
1813 current_inferior = saved_inferior;
1814 return 0;
1815 }
1816
1817 /* When the event-loop is doing a step-over, this points at the thread
1818 being stepped. */
1819 ptid_t step_over_bkpt;
1820
1821 /* Wait for an event from child PID. If PID is -1, wait for any
1822 child. Store the stop status through the status pointer WSTAT.
1823 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1824 event was found and OPTIONS contains WNOHANG. Return the PID of
1825 the stopped child otherwise. */
1826
1827 static int
1828 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1829 {
1830 struct lwp_info *event_child, *requested_child;
1831 ptid_t wait_ptid;
1832
1833 event_child = NULL;
1834 requested_child = NULL;
1835
1836 /* Check for a lwp with a pending status. */
1837
1838 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1839 {
1840 event_child = (struct lwp_info *)
1841 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1842 if (debug_threads && event_child)
1843 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1844 }
1845 else
1846 {
1847 requested_child = find_lwp_pid (ptid);
1848
1849 if (stopping_threads == NOT_STOPPING_THREADS
1850 && requested_child->status_pending_p
1851 && requested_child->collecting_fast_tracepoint)
1852 {
1853 enqueue_one_deferred_signal (requested_child,
1854 &requested_child->status_pending);
1855 requested_child->status_pending_p = 0;
1856 requested_child->status_pending = 0;
1857 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1858 }
1859
1860 if (requested_child->suspended
1861 && requested_child->status_pending_p)
1862 fatal ("requesting an event out of a suspended child?");
1863
1864 if (requested_child->status_pending_p)
1865 event_child = requested_child;
1866 }
1867
1868 if (event_child != NULL)
1869 {
1870 if (debug_threads)
1871 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1872 lwpid_of (event_child), event_child->status_pending);
1873 *wstat = event_child->status_pending;
1874 event_child->status_pending_p = 0;
1875 event_child->status_pending = 0;
1876 current_inferior = get_lwp_thread (event_child);
1877 return lwpid_of (event_child);
1878 }
1879
1880 if (ptid_is_pid (ptid))
1881 {
1882 /* A request to wait for a specific tgid. This is not possible
1883 with waitpid, so instead, we wait for any child, and leave
1884 children we're not interested in right now with a pending
1885 status to report later. */
1886 wait_ptid = minus_one_ptid;
1887 }
1888 else
1889 wait_ptid = ptid;
1890
1891 /* We only enter this loop if no process has a pending wait status. Thus
1892 any action taken in response to a wait status inside this loop is
1893 responding as soon as we detect the status, not after any pending
1894 events. */
1895 while (1)
1896 {
1897 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1898
1899 if ((options & WNOHANG) && event_child == NULL)
1900 {
1901 if (debug_threads)
1902 fprintf (stderr, "WNOHANG set, no event found\n");
1903 return 0;
1904 }
1905
1906 if (event_child == NULL)
1907 error ("event from unknown child");
1908
1909 if (ptid_is_pid (ptid)
1910 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1911 {
1912 if (! WIFSTOPPED (*wstat))
1913 mark_lwp_dead (event_child, *wstat);
1914 else
1915 {
1916 event_child->status_pending_p = 1;
1917 event_child->status_pending = *wstat;
1918 }
1919 continue;
1920 }
1921
1922 current_inferior = get_lwp_thread (event_child);
1923
1924 /* Check for thread exit. */
1925 if (! WIFSTOPPED (*wstat))
1926 {
1927 if (debug_threads)
1928 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1929
1930 /* If the last thread is exiting, just return. */
1931 if (last_thread_of_process_p (current_inferior))
1932 {
1933 if (debug_threads)
1934 fprintf (stderr, "LWP %ld is last lwp of process\n",
1935 lwpid_of (event_child));
1936 return lwpid_of (event_child);
1937 }
1938
1939 if (!non_stop)
1940 {
1941 current_inferior = (struct thread_info *) all_threads.head;
1942 if (debug_threads)
1943 fprintf (stderr, "Current inferior is now %ld\n",
1944 lwpid_of (get_thread_lwp (current_inferior)));
1945 }
1946 else
1947 {
1948 current_inferior = NULL;
1949 if (debug_threads)
1950 fprintf (stderr, "Current inferior is now <NULL>\n");
1951 }
1952
1953 /* If we were waiting for this particular child to do something...
1954 well, it did something. */
1955 if (requested_child != NULL)
1956 {
1957 int lwpid = lwpid_of (event_child);
1958
1959 /* Cancel the step-over operation --- the thread that
1960 started it is gone. */
1961 if (finish_step_over (event_child))
1962 unstop_all_lwps (1, event_child);
1963 delete_lwp (event_child);
1964 return lwpid;
1965 }
1966
1967 delete_lwp (event_child);
1968
1969 /* Wait for a more interesting event. */
1970 continue;
1971 }
1972
1973 if (event_child->must_set_ptrace_flags)
1974 {
1975 linux_enable_event_reporting (lwpid_of (event_child));
1976 event_child->must_set_ptrace_flags = 0;
1977 }
1978
1979 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1980 && *wstat >> 16 != 0)
1981 {
1982 handle_extended_wait (event_child, *wstat);
1983 continue;
1984 }
1985
1986 if (WIFSTOPPED (*wstat)
1987 && WSTOPSIG (*wstat) == SIGSTOP
1988 && event_child->stop_expected)
1989 {
1990 int should_stop;
1991
1992 if (debug_threads)
1993 fprintf (stderr, "Expected stop.\n");
1994 event_child->stop_expected = 0;
1995
1996 should_stop = (current_inferior->last_resume_kind == resume_stop
1997 || stopping_threads != NOT_STOPPING_THREADS);
1998
1999 if (!should_stop)
2000 {
2001 linux_resume_one_lwp (event_child,
2002 event_child->stepping, 0, NULL);
2003 continue;
2004 }
2005 }
2006
2007 return lwpid_of (event_child);
2008 }
2009
2010 /* NOTREACHED */
2011 return 0;
2012 }
2013
2014 /* Count the LWP's that have had events. */
2015
2016 static int
2017 count_events_callback (struct inferior_list_entry *entry, void *data)
2018 {
2019 struct lwp_info *lp = (struct lwp_info *) entry;
2020 struct thread_info *thread = get_lwp_thread (lp);
2021 int *count = data;
2022
2023 gdb_assert (count != NULL);
2024
2025 /* Count only resumed LWPs that have a SIGTRAP event pending that
2026 should be reported to GDB. */
2027 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2028 && thread->last_resume_kind != resume_stop
2029 && lp->status_pending_p
2030 && WIFSTOPPED (lp->status_pending)
2031 && WSTOPSIG (lp->status_pending) == SIGTRAP
2032 && !breakpoint_inserted_here (lp->stop_pc))
2033 (*count)++;
2034
2035 return 0;
2036 }
2037
2038 /* Select the LWP (if any) that is currently being single-stepped. */
2039
2040 static int
2041 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2042 {
2043 struct lwp_info *lp = (struct lwp_info *) entry;
2044 struct thread_info *thread = get_lwp_thread (lp);
2045
2046 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2047 && thread->last_resume_kind == resume_step
2048 && lp->status_pending_p)
2049 return 1;
2050 else
2051 return 0;
2052 }
2053
2054 /* Select the Nth LWP that has had a SIGTRAP event that should be
2055 reported to GDB. */
2056
2057 static int
2058 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2059 {
2060 struct lwp_info *lp = (struct lwp_info *) entry;
2061 struct thread_info *thread = get_lwp_thread (lp);
2062 int *selector = data;
2063
2064 gdb_assert (selector != NULL);
2065
2066 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2067 if (thread->last_resume_kind != resume_stop
2068 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2069 && lp->status_pending_p
2070 && WIFSTOPPED (lp->status_pending)
2071 && WSTOPSIG (lp->status_pending) == SIGTRAP
2072 && !breakpoint_inserted_here (lp->stop_pc))
2073 if ((*selector)-- == 0)
2074 return 1;
2075
2076 return 0;
2077 }
2078
2079 static int
2080 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2081 {
2082 struct lwp_info *lp = (struct lwp_info *) entry;
2083 struct thread_info *thread = get_lwp_thread (lp);
2084 struct lwp_info *event_lp = data;
2085
2086 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2087 if (lp == event_lp)
2088 return 0;
2089
2090 /* If a LWP other than the LWP that we're reporting an event for has
2091 hit a GDB breakpoint (as opposed to some random trap signal),
2092 then just arrange for it to hit it again later. We don't keep
2093 the SIGTRAP status and don't forward the SIGTRAP signal to the
2094 LWP. We will handle the current event, eventually we will resume
2095 all LWPs, and this one will get its breakpoint trap again.
2096
2097 If we do not do this, then we run the risk that the user will
2098 delete or disable the breakpoint, but the LWP will have already
2099 tripped on it. */
2100
2101 if (thread->last_resume_kind != resume_stop
2102 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2103 && lp->status_pending_p
2104 && WIFSTOPPED (lp->status_pending)
2105 && WSTOPSIG (lp->status_pending) == SIGTRAP
2106 && !lp->stepping
2107 && !lp->stopped_by_watchpoint
2108 && cancel_breakpoint (lp))
2109 /* Throw away the SIGTRAP. */
2110 lp->status_pending_p = 0;
2111
2112 return 0;
2113 }
2114
2115 static void
2116 linux_cancel_breakpoints (void)
2117 {
2118 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2119 }
2120
2121 /* Select one LWP out of those that have events pending. */
2122
2123 static void
2124 select_event_lwp (struct lwp_info **orig_lp)
2125 {
2126 int num_events = 0;
2127 int random_selector;
2128 struct lwp_info *event_lp;
2129
2130 /* Give preference to any LWP that is being single-stepped. */
2131 event_lp
2132 = (struct lwp_info *) find_inferior (&all_lwps,
2133 select_singlestep_lwp_callback, NULL);
2134 if (event_lp != NULL)
2135 {
2136 if (debug_threads)
2137 fprintf (stderr,
2138 "SEL: Select single-step %s\n",
2139 target_pid_to_str (ptid_of (event_lp)));
2140 }
2141 else
2142 {
2143 /* No single-stepping LWP. Select one at random, out of those
2144 which have had SIGTRAP events. */
2145
2146 /* First see how many SIGTRAP events we have. */
2147 find_inferior (&all_lwps, count_events_callback, &num_events);
2148
2149 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2150 random_selector = (int)
2151 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2152
2153 if (debug_threads && num_events > 1)
2154 fprintf (stderr,
2155 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2156 num_events, random_selector);
2157
2158 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2159 select_event_lwp_callback,
2160 &random_selector);
2161 }
2162
2163 if (event_lp != NULL)
2164 {
2165 /* Switch the event LWP. */
2166 *orig_lp = event_lp;
2167 }
2168 }
2169
2170 /* Decrement the suspend count of an LWP. */
2171
2172 static int
2173 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2174 {
2175 struct lwp_info *lwp = (struct lwp_info *) entry;
2176
2177 /* Ignore EXCEPT. */
2178 if (lwp == except)
2179 return 0;
2180
2181 lwp->suspended--;
2182
2183 gdb_assert (lwp->suspended >= 0);
2184 return 0;
2185 }
2186
2187 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2188 NULL. */
2189
2190 static void
2191 unsuspend_all_lwps (struct lwp_info *except)
2192 {
2193 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2194 }
2195
2196 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2197 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2198 void *data);
2199 static int lwp_running (struct inferior_list_entry *entry, void *data);
2200 static ptid_t linux_wait_1 (ptid_t ptid,
2201 struct target_waitstatus *ourstatus,
2202 int target_options);
2203
2204 /* Stabilize threads (move out of jump pads).
2205
2206 If a thread is midway collecting a fast tracepoint, we need to
2207 finish the collection and move it out of the jump pad before
2208 reporting the signal.
2209
2210 This avoids recursion while collecting (when a signal arrives
2211 midway, and the signal handler itself collects), which would trash
2212 the trace buffer. In case the user set a breakpoint in a signal
2213 handler, this avoids the backtrace showing the jump pad, etc..
2214 Most importantly, there are certain things we can't do safely if
2215 threads are stopped in a jump pad (or in its callee's). For
2216 example:
2217
2218 - starting a new trace run. A thread still collecting the
2219 previous run, could trash the trace buffer when resumed. The trace
2220 buffer control structures would have been reset but the thread had
2221 no way to tell. The thread could even midway memcpy'ing to the
2222 buffer, which would mean that when resumed, it would clobber the
2223 trace buffer that had been set for a new run.
2224
2225 - we can't rewrite/reuse the jump pads for new tracepoints
2226 safely. Say you do tstart while a thread is stopped midway while
2227 collecting. When the thread is later resumed, it finishes the
2228 collection, and returns to the jump pad, to execute the original
2229 instruction that was under the tracepoint jump at the time the
2230 older run had been started. If the jump pad had been rewritten
2231 since for something else in the new run, the thread would now
2232 execute the wrong / random instructions. */
2233
2234 static void
2235 linux_stabilize_threads (void)
2236 {
2237 struct thread_info *save_inferior;
2238 struct lwp_info *lwp_stuck;
2239
2240 lwp_stuck
2241 = (struct lwp_info *) find_inferior (&all_lwps,
2242 stuck_in_jump_pad_callback, NULL);
2243 if (lwp_stuck != NULL)
2244 {
2245 if (debug_threads)
2246 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2247 lwpid_of (lwp_stuck));
2248 return;
2249 }
2250
2251 save_inferior = current_inferior;
2252
2253 stabilizing_threads = 1;
2254
2255 /* Kick 'em all. */
2256 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2257
2258 /* Loop until all are stopped out of the jump pads. */
2259 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2260 {
2261 struct target_waitstatus ourstatus;
2262 struct lwp_info *lwp;
2263 int wstat;
2264
2265 /* Note that we go through the full wait even loop. While
2266 moving threads out of jump pad, we need to be able to step
2267 over internal breakpoints and such. */
2268 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2269
2270 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2271 {
2272 lwp = get_thread_lwp (current_inferior);
2273
2274 /* Lock it. */
2275 lwp->suspended++;
2276
2277 if (ourstatus.value.sig != GDB_SIGNAL_0
2278 || current_inferior->last_resume_kind == resume_stop)
2279 {
2280 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2281 enqueue_one_deferred_signal (lwp, &wstat);
2282 }
2283 }
2284 }
2285
2286 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2287
2288 stabilizing_threads = 0;
2289
2290 current_inferior = save_inferior;
2291
2292 if (debug_threads)
2293 {
2294 lwp_stuck
2295 = (struct lwp_info *) find_inferior (&all_lwps,
2296 stuck_in_jump_pad_callback, NULL);
2297 if (lwp_stuck != NULL)
2298 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2299 lwpid_of (lwp_stuck));
2300 }
2301 }
2302
2303 /* Wait for process, returns status. */
2304
2305 static ptid_t
2306 linux_wait_1 (ptid_t ptid,
2307 struct target_waitstatus *ourstatus, int target_options)
2308 {
2309 int w;
2310 struct lwp_info *event_child;
2311 int options;
2312 int pid;
2313 int step_over_finished;
2314 int bp_explains_trap;
2315 int maybe_internal_trap;
2316 int report_to_gdb;
2317 int trace_event;
2318
2319 /* Translate generic target options into linux options. */
2320 options = __WALL;
2321 if (target_options & TARGET_WNOHANG)
2322 options |= WNOHANG;
2323
2324 retry:
2325 bp_explains_trap = 0;
2326 trace_event = 0;
2327 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2328
2329 /* If we were only supposed to resume one thread, only wait for
2330 that thread - if it's still alive. If it died, however - which
2331 can happen if we're coming from the thread death case below -
2332 then we need to make sure we restart the other threads. We could
2333 pick a thread at random or restart all; restarting all is less
2334 arbitrary. */
2335 if (!non_stop
2336 && !ptid_equal (cont_thread, null_ptid)
2337 && !ptid_equal (cont_thread, minus_one_ptid))
2338 {
2339 struct thread_info *thread;
2340
2341 thread = (struct thread_info *) find_inferior_id (&all_threads,
2342 cont_thread);
2343
2344 /* No stepping, no signal - unless one is pending already, of course. */
2345 if (thread == NULL)
2346 {
2347 struct thread_resume resume_info;
2348 resume_info.thread = minus_one_ptid;
2349 resume_info.kind = resume_continue;
2350 resume_info.sig = 0;
2351 linux_resume (&resume_info, 1);
2352 }
2353 else
2354 ptid = cont_thread;
2355 }
2356
2357 if (ptid_equal (step_over_bkpt, null_ptid))
2358 pid = linux_wait_for_event (ptid, &w, options);
2359 else
2360 {
2361 if (debug_threads)
2362 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2363 target_pid_to_str (step_over_bkpt));
2364 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2365 }
2366
2367 if (pid == 0) /* only if TARGET_WNOHANG */
2368 return null_ptid;
2369
2370 event_child = get_thread_lwp (current_inferior);
2371
2372 /* If we are waiting for a particular child, and it exited,
2373 linux_wait_for_event will return its exit status. Similarly if
2374 the last child exited. If this is not the last child, however,
2375 do not report it as exited until there is a 'thread exited' response
2376 available in the remote protocol. Instead, just wait for another event.
2377 This should be safe, because if the thread crashed we will already
2378 have reported the termination signal to GDB; that should stop any
2379 in-progress stepping operations, etc.
2380
2381 Report the exit status of the last thread to exit. This matches
2382 LinuxThreads' behavior. */
2383
2384 if (last_thread_of_process_p (current_inferior))
2385 {
2386 if (WIFEXITED (w) || WIFSIGNALED (w))
2387 {
2388 if (WIFEXITED (w))
2389 {
2390 ourstatus->kind = TARGET_WAITKIND_EXITED;
2391 ourstatus->value.integer = WEXITSTATUS (w);
2392
2393 if (debug_threads)
2394 fprintf (stderr,
2395 "\nChild exited with retcode = %x \n",
2396 WEXITSTATUS (w));
2397 }
2398 else
2399 {
2400 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2401 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2402
2403 if (debug_threads)
2404 fprintf (stderr,
2405 "\nChild terminated with signal = %x \n",
2406 WTERMSIG (w));
2407
2408 }
2409
2410 return ptid_of (event_child);
2411 }
2412 }
2413 else
2414 {
2415 if (!WIFSTOPPED (w))
2416 goto retry;
2417 }
2418
2419 /* If this event was not handled before, and is not a SIGTRAP, we
2420 report it. SIGILL and SIGSEGV are also treated as traps in case
2421 a breakpoint is inserted at the current PC. If this target does
2422 not support internal breakpoints at all, we also report the
2423 SIGTRAP without further processing; it's of no concern to us. */
2424 maybe_internal_trap
2425 = (supports_breakpoints ()
2426 && (WSTOPSIG (w) == SIGTRAP
2427 || ((WSTOPSIG (w) == SIGILL
2428 || WSTOPSIG (w) == SIGSEGV)
2429 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2430
2431 if (maybe_internal_trap)
2432 {
2433 /* Handle anything that requires bookkeeping before deciding to
2434 report the event or continue waiting. */
2435
2436 /* First check if we can explain the SIGTRAP with an internal
2437 breakpoint, or if we should possibly report the event to GDB.
2438 Do this before anything that may remove or insert a
2439 breakpoint. */
2440 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2441
2442 /* We have a SIGTRAP, possibly a step-over dance has just
2443 finished. If so, tweak the state machine accordingly,
2444 reinsert breakpoints and delete any reinsert (software
2445 single-step) breakpoints. */
2446 step_over_finished = finish_step_over (event_child);
2447
2448 /* Now invoke the callbacks of any internal breakpoints there. */
2449 check_breakpoints (event_child->stop_pc);
2450
2451 /* Handle tracepoint data collecting. This may overflow the
2452 trace buffer, and cause a tracing stop, removing
2453 breakpoints. */
2454 trace_event = handle_tracepoints (event_child);
2455
2456 if (bp_explains_trap)
2457 {
2458 /* If we stepped or ran into an internal breakpoint, we've
2459 already handled it. So next time we resume (from this
2460 PC), we should step over it. */
2461 if (debug_threads)
2462 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2463
2464 if (breakpoint_here (event_child->stop_pc))
2465 event_child->need_step_over = 1;
2466 }
2467 }
2468 else
2469 {
2470 /* We have some other signal, possibly a step-over dance was in
2471 progress, and it should be cancelled too. */
2472 step_over_finished = finish_step_over (event_child);
2473 }
2474
2475 /* We have all the data we need. Either report the event to GDB, or
2476 resume threads and keep waiting for more. */
2477
2478 /* If we're collecting a fast tracepoint, finish the collection and
2479 move out of the jump pad before delivering a signal. See
2480 linux_stabilize_threads. */
2481
2482 if (WIFSTOPPED (w)
2483 && WSTOPSIG (w) != SIGTRAP
2484 && supports_fast_tracepoints ()
2485 && agent_loaded_p ())
2486 {
2487 if (debug_threads)
2488 fprintf (stderr,
2489 "Got signal %d for LWP %ld. Check if we need "
2490 "to defer or adjust it.\n",
2491 WSTOPSIG (w), lwpid_of (event_child));
2492
2493 /* Allow debugging the jump pad itself. */
2494 if (current_inferior->last_resume_kind != resume_step
2495 && maybe_move_out_of_jump_pad (event_child, &w))
2496 {
2497 enqueue_one_deferred_signal (event_child, &w);
2498
2499 if (debug_threads)
2500 fprintf (stderr,
2501 "Signal %d for LWP %ld deferred (in jump pad)\n",
2502 WSTOPSIG (w), lwpid_of (event_child));
2503
2504 linux_resume_one_lwp (event_child, 0, 0, NULL);
2505 goto retry;
2506 }
2507 }
2508
2509 if (event_child->collecting_fast_tracepoint)
2510 {
2511 if (debug_threads)
2512 fprintf (stderr, "\
2513 LWP %ld was trying to move out of the jump pad (%d). \
2514 Check if we're already there.\n",
2515 lwpid_of (event_child),
2516 event_child->collecting_fast_tracepoint);
2517
2518 trace_event = 1;
2519
2520 event_child->collecting_fast_tracepoint
2521 = linux_fast_tracepoint_collecting (event_child, NULL);
2522
2523 if (event_child->collecting_fast_tracepoint != 1)
2524 {
2525 /* No longer need this breakpoint. */
2526 if (event_child->exit_jump_pad_bkpt != NULL)
2527 {
2528 if (debug_threads)
2529 fprintf (stderr,
2530 "No longer need exit-jump-pad bkpt; removing it."
2531 "stopping all threads momentarily.\n");
2532
2533 /* Other running threads could hit this breakpoint.
2534 We don't handle moribund locations like GDB does,
2535 instead we always pause all threads when removing
2536 breakpoints, so that any step-over or
2537 decr_pc_after_break adjustment is always taken
2538 care of while the breakpoint is still
2539 inserted. */
2540 stop_all_lwps (1, event_child);
2541 cancel_breakpoints ();
2542
2543 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2544 event_child->exit_jump_pad_bkpt = NULL;
2545
2546 unstop_all_lwps (1, event_child);
2547
2548 gdb_assert (event_child->suspended >= 0);
2549 }
2550 }
2551
2552 if (event_child->collecting_fast_tracepoint == 0)
2553 {
2554 if (debug_threads)
2555 fprintf (stderr,
2556 "fast tracepoint finished "
2557 "collecting successfully.\n");
2558
2559 /* We may have a deferred signal to report. */
2560 if (dequeue_one_deferred_signal (event_child, &w))
2561 {
2562 if (debug_threads)
2563 fprintf (stderr, "dequeued one signal.\n");
2564 }
2565 else
2566 {
2567 if (debug_threads)
2568 fprintf (stderr, "no deferred signals.\n");
2569
2570 if (stabilizing_threads)
2571 {
2572 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2573 ourstatus->value.sig = GDB_SIGNAL_0;
2574 return ptid_of (event_child);
2575 }
2576 }
2577 }
2578 }
2579
2580 /* Check whether GDB would be interested in this event. */
2581
2582 /* If GDB is not interested in this signal, don't stop other
2583 threads, and don't report it to GDB. Just resume the inferior
2584 right away. We do this for threading-related signals as well as
2585 any that GDB specifically requested we ignore. But never ignore
2586 SIGSTOP if we sent it ourselves, and do not ignore signals when
2587 stepping - they may require special handling to skip the signal
2588 handler. */
2589 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2590 thread library? */
2591 if (WIFSTOPPED (w)
2592 && current_inferior->last_resume_kind != resume_step
2593 && (
2594 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2595 (current_process ()->private->thread_db != NULL
2596 && (WSTOPSIG (w) == __SIGRTMIN
2597 || WSTOPSIG (w) == __SIGRTMIN + 1))
2598 ||
2599 #endif
2600 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2601 && !(WSTOPSIG (w) == SIGSTOP
2602 && current_inferior->last_resume_kind == resume_stop))))
2603 {
2604 siginfo_t info, *info_p;
2605
2606 if (debug_threads)
2607 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2608 WSTOPSIG (w), lwpid_of (event_child));
2609
2610 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2611 (PTRACE_ARG3_TYPE) 0, &info) == 0)
2612 info_p = &info;
2613 else
2614 info_p = NULL;
2615 linux_resume_one_lwp (event_child, event_child->stepping,
2616 WSTOPSIG (w), info_p);
2617 goto retry;
2618 }
2619
2620 /* If GDB wanted this thread to single step, we always want to
2621 report the SIGTRAP, and let GDB handle it. Watchpoints should
2622 always be reported. So should signals we can't explain. A
2623 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2624 not support Z0 breakpoints. If we do, we're be able to handle
2625 GDB breakpoints on top of internal breakpoints, by handling the
2626 internal breakpoint and still reporting the event to GDB. If we
2627 don't, we're out of luck, GDB won't see the breakpoint hit. */
2628 report_to_gdb = (!maybe_internal_trap
2629 || current_inferior->last_resume_kind == resume_step
2630 || event_child->stopped_by_watchpoint
2631 || (!step_over_finished
2632 && !bp_explains_trap && !trace_event)
2633 || (gdb_breakpoint_here (event_child->stop_pc)
2634 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2635 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2636
2637 run_breakpoint_commands (event_child->stop_pc);
2638
2639 /* We found no reason GDB would want us to stop. We either hit one
2640 of our own breakpoints, or finished an internal step GDB
2641 shouldn't know about. */
2642 if (!report_to_gdb)
2643 {
2644 if (debug_threads)
2645 {
2646 if (bp_explains_trap)
2647 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2648 if (step_over_finished)
2649 fprintf (stderr, "Step-over finished.\n");
2650 if (trace_event)
2651 fprintf (stderr, "Tracepoint event.\n");
2652 }
2653
2654 /* We're not reporting this breakpoint to GDB, so apply the
2655 decr_pc_after_break adjustment to the inferior's regcache
2656 ourselves. */
2657
2658 if (the_low_target.set_pc != NULL)
2659 {
2660 struct regcache *regcache
2661 = get_thread_regcache (get_lwp_thread (event_child), 1);
2662 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2663 }
2664
2665 /* We may have finished stepping over a breakpoint. If so,
2666 we've stopped and suspended all LWPs momentarily except the
2667 stepping one. This is where we resume them all again. We're
2668 going to keep waiting, so use proceed, which handles stepping
2669 over the next breakpoint. */
2670 if (debug_threads)
2671 fprintf (stderr, "proceeding all threads.\n");
2672
2673 if (step_over_finished)
2674 unsuspend_all_lwps (event_child);
2675
2676 proceed_all_lwps ();
2677 goto retry;
2678 }
2679
2680 if (debug_threads)
2681 {
2682 if (current_inferior->last_resume_kind == resume_step)
2683 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2684 if (event_child->stopped_by_watchpoint)
2685 fprintf (stderr, "Stopped by watchpoint.\n");
2686 if (gdb_breakpoint_here (event_child->stop_pc))
2687 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2688 if (debug_threads)
2689 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2690 }
2691
2692 /* Alright, we're going to report a stop. */
2693
2694 if (!non_stop && !stabilizing_threads)
2695 {
2696 /* In all-stop, stop all threads. */
2697 stop_all_lwps (0, NULL);
2698
2699 /* If we're not waiting for a specific LWP, choose an event LWP
2700 from among those that have had events. Giving equal priority
2701 to all LWPs that have had events helps prevent
2702 starvation. */
2703 if (ptid_equal (ptid, minus_one_ptid))
2704 {
2705 event_child->status_pending_p = 1;
2706 event_child->status_pending = w;
2707
2708 select_event_lwp (&event_child);
2709
2710 event_child->status_pending_p = 0;
2711 w = event_child->status_pending;
2712 }
2713
2714 /* Now that we've selected our final event LWP, cancel any
2715 breakpoints in other LWPs that have hit a GDB breakpoint.
2716 See the comment in cancel_breakpoints_callback to find out
2717 why. */
2718 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2719
2720 /* If we were going a step-over, all other threads but the stepping one
2721 had been paused in start_step_over, with their suspend counts
2722 incremented. We don't want to do a full unstop/unpause, because we're
2723 in all-stop mode (so we want threads stopped), but we still need to
2724 unsuspend the other threads, to decrement their `suspended' count
2725 back. */
2726 if (step_over_finished)
2727 unsuspend_all_lwps (event_child);
2728
2729 /* Stabilize threads (move out of jump pads). */
2730 stabilize_threads ();
2731 }
2732 else
2733 {
2734 /* If we just finished a step-over, then all threads had been
2735 momentarily paused. In all-stop, that's fine, we want
2736 threads stopped by now anyway. In non-stop, we need to
2737 re-resume threads that GDB wanted to be running. */
2738 if (step_over_finished)
2739 unstop_all_lwps (1, event_child);
2740 }
2741
2742 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2743
2744 if (current_inferior->last_resume_kind == resume_stop
2745 && WSTOPSIG (w) == SIGSTOP)
2746 {
2747 /* A thread that has been requested to stop by GDB with vCont;t,
2748 and it stopped cleanly, so report as SIG0. The use of
2749 SIGSTOP is an implementation detail. */
2750 ourstatus->value.sig = GDB_SIGNAL_0;
2751 }
2752 else if (current_inferior->last_resume_kind == resume_stop
2753 && WSTOPSIG (w) != SIGSTOP)
2754 {
2755 /* A thread that has been requested to stop by GDB with vCont;t,
2756 but, it stopped for other reasons. */
2757 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2758 }
2759 else
2760 {
2761 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2762 }
2763
2764 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2765
2766 if (debug_threads)
2767 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2768 target_pid_to_str (ptid_of (event_child)),
2769 ourstatus->kind,
2770 ourstatus->value.sig);
2771
2772 return ptid_of (event_child);
2773 }
2774
2775 /* Get rid of any pending event in the pipe. */
2776 static void
2777 async_file_flush (void)
2778 {
2779 int ret;
2780 char buf;
2781
2782 do
2783 ret = read (linux_event_pipe[0], &buf, 1);
2784 while (ret >= 0 || (ret == -1 && errno == EINTR));
2785 }
2786
2787 /* Put something in the pipe, so the event loop wakes up. */
2788 static void
2789 async_file_mark (void)
2790 {
2791 int ret;
2792
2793 async_file_flush ();
2794
2795 do
2796 ret = write (linux_event_pipe[1], "+", 1);
2797 while (ret == 0 || (ret == -1 && errno == EINTR));
2798
2799 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2800 be awakened anyway. */
2801 }
2802
2803 static ptid_t
2804 linux_wait (ptid_t ptid,
2805 struct target_waitstatus *ourstatus, int target_options)
2806 {
2807 ptid_t event_ptid;
2808
2809 if (debug_threads)
2810 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2811
2812 /* Flush the async file first. */
2813 if (target_is_async_p ())
2814 async_file_flush ();
2815
2816 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2817
2818 /* If at least one stop was reported, there may be more. A single
2819 SIGCHLD can signal more than one child stop. */
2820 if (target_is_async_p ()
2821 && (target_options & TARGET_WNOHANG) != 0
2822 && !ptid_equal (event_ptid, null_ptid))
2823 async_file_mark ();
2824
2825 return event_ptid;
2826 }
2827
2828 /* Send a signal to an LWP. */
2829
2830 static int
2831 kill_lwp (unsigned long lwpid, int signo)
2832 {
2833 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2834 fails, then we are not using nptl threads and we should be using kill. */
2835
2836 #ifdef __NR_tkill
2837 {
2838 static int tkill_failed;
2839
2840 if (!tkill_failed)
2841 {
2842 int ret;
2843
2844 errno = 0;
2845 ret = syscall (__NR_tkill, lwpid, signo);
2846 if (errno != ENOSYS)
2847 return ret;
2848 tkill_failed = 1;
2849 }
2850 }
2851 #endif
2852
2853 return kill (lwpid, signo);
2854 }
2855
2856 void
2857 linux_stop_lwp (struct lwp_info *lwp)
2858 {
2859 send_sigstop (lwp);
2860 }
2861
2862 static void
2863 send_sigstop (struct lwp_info *lwp)
2864 {
2865 int pid;
2866
2867 pid = lwpid_of (lwp);
2868
2869 /* If we already have a pending stop signal for this process, don't
2870 send another. */
2871 if (lwp->stop_expected)
2872 {
2873 if (debug_threads)
2874 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2875
2876 return;
2877 }
2878
2879 if (debug_threads)
2880 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2881
2882 lwp->stop_expected = 1;
2883 kill_lwp (pid, SIGSTOP);
2884 }
2885
2886 static int
2887 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2888 {
2889 struct lwp_info *lwp = (struct lwp_info *) entry;
2890
2891 /* Ignore EXCEPT. */
2892 if (lwp == except)
2893 return 0;
2894
2895 if (lwp->stopped)
2896 return 0;
2897
2898 send_sigstop (lwp);
2899 return 0;
2900 }
2901
2902 /* Increment the suspend count of an LWP, and stop it, if not stopped
2903 yet. */
2904 static int
2905 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2906 void *except)
2907 {
2908 struct lwp_info *lwp = (struct lwp_info *) entry;
2909
2910 /* Ignore EXCEPT. */
2911 if (lwp == except)
2912 return 0;
2913
2914 lwp->suspended++;
2915
2916 return send_sigstop_callback (entry, except);
2917 }
2918
2919 static void
2920 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2921 {
2922 /* It's dead, really. */
2923 lwp->dead = 1;
2924
2925 /* Store the exit status for later. */
2926 lwp->status_pending_p = 1;
2927 lwp->status_pending = wstat;
2928
2929 /* Prevent trying to stop it. */
2930 lwp->stopped = 1;
2931
2932 /* No further stops are expected from a dead lwp. */
2933 lwp->stop_expected = 0;
2934 }
2935
2936 static void
2937 wait_for_sigstop (struct inferior_list_entry *entry)
2938 {
2939 struct lwp_info *lwp = (struct lwp_info *) entry;
2940 struct thread_info *saved_inferior;
2941 int wstat;
2942 ptid_t saved_tid;
2943 ptid_t ptid;
2944 int pid;
2945
2946 if (lwp->stopped)
2947 {
2948 if (debug_threads)
2949 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2950 lwpid_of (lwp));
2951 return;
2952 }
2953
2954 saved_inferior = current_inferior;
2955 if (saved_inferior != NULL)
2956 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2957 else
2958 saved_tid = null_ptid; /* avoid bogus unused warning */
2959
2960 ptid = lwp->head.id;
2961
2962 if (debug_threads)
2963 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2964
2965 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2966
2967 /* If we stopped with a non-SIGSTOP signal, save it for later
2968 and record the pending SIGSTOP. If the process exited, just
2969 return. */
2970 if (WIFSTOPPED (wstat))
2971 {
2972 if (debug_threads)
2973 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2974 lwpid_of (lwp), WSTOPSIG (wstat));
2975
2976 if (WSTOPSIG (wstat) != SIGSTOP)
2977 {
2978 if (debug_threads)
2979 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2980 lwpid_of (lwp), wstat);
2981
2982 lwp->status_pending_p = 1;
2983 lwp->status_pending = wstat;
2984 }
2985 }
2986 else
2987 {
2988 if (debug_threads)
2989 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2990
2991 lwp = find_lwp_pid (pid_to_ptid (pid));
2992 if (lwp)
2993 {
2994 /* Leave this status pending for the next time we're able to
2995 report it. In the mean time, we'll report this lwp as
2996 dead to GDB, so GDB doesn't try to read registers and
2997 memory from it. This can only happen if this was the
2998 last thread of the process; otherwise, PID is removed
2999 from the thread tables before linux_wait_for_event
3000 returns. */
3001 mark_lwp_dead (lwp, wstat);
3002 }
3003 }
3004
3005 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3006 current_inferior = saved_inferior;
3007 else
3008 {
3009 if (debug_threads)
3010 fprintf (stderr, "Previously current thread died.\n");
3011
3012 if (non_stop)
3013 {
3014 /* We can't change the current inferior behind GDB's back,
3015 otherwise, a subsequent command may apply to the wrong
3016 process. */
3017 current_inferior = NULL;
3018 }
3019 else
3020 {
3021 /* Set a valid thread as current. */
3022 set_desired_inferior (0);
3023 }
3024 }
3025 }
3026
3027 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3028 move it out, because we need to report the stop event to GDB. For
3029 example, if the user puts a breakpoint in the jump pad, it's
3030 because she wants to debug it. */
3031
3032 static int
3033 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3034 {
3035 struct lwp_info *lwp = (struct lwp_info *) entry;
3036 struct thread_info *thread = get_lwp_thread (lwp);
3037
3038 gdb_assert (lwp->suspended == 0);
3039 gdb_assert (lwp->stopped);
3040
3041 /* Allow debugging the jump pad, gdb_collect, etc.. */
3042 return (supports_fast_tracepoints ()
3043 && agent_loaded_p ()
3044 && (gdb_breakpoint_here (lwp->stop_pc)
3045 || lwp->stopped_by_watchpoint
3046 || thread->last_resume_kind == resume_step)
3047 && linux_fast_tracepoint_collecting (lwp, NULL));
3048 }
3049
3050 static void
3051 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3052 {
3053 struct lwp_info *lwp = (struct lwp_info *) entry;
3054 struct thread_info *thread = get_lwp_thread (lwp);
3055 int *wstat;
3056
3057 gdb_assert (lwp->suspended == 0);
3058 gdb_assert (lwp->stopped);
3059
3060 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3061
3062 /* Allow debugging the jump pad, gdb_collect, etc. */
3063 if (!gdb_breakpoint_here (lwp->stop_pc)
3064 && !lwp->stopped_by_watchpoint
3065 && thread->last_resume_kind != resume_step
3066 && maybe_move_out_of_jump_pad (lwp, wstat))
3067 {
3068 if (debug_threads)
3069 fprintf (stderr,
3070 "LWP %ld needs stabilizing (in jump pad)\n",
3071 lwpid_of (lwp));
3072
3073 if (wstat)
3074 {
3075 lwp->status_pending_p = 0;
3076 enqueue_one_deferred_signal (lwp, wstat);
3077
3078 if (debug_threads)
3079 fprintf (stderr,
3080 "Signal %d for LWP %ld deferred "
3081 "(in jump pad)\n",
3082 WSTOPSIG (*wstat), lwpid_of (lwp));
3083 }
3084
3085 linux_resume_one_lwp (lwp, 0, 0, NULL);
3086 }
3087 else
3088 lwp->suspended++;
3089 }
3090
3091 static int
3092 lwp_running (struct inferior_list_entry *entry, void *data)
3093 {
3094 struct lwp_info *lwp = (struct lwp_info *) entry;
3095
3096 if (lwp->dead)
3097 return 0;
3098 if (lwp->stopped)
3099 return 0;
3100 return 1;
3101 }
3102
3103 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3104 If SUSPEND, then also increase the suspend count of every LWP,
3105 except EXCEPT. */
3106
3107 static void
3108 stop_all_lwps (int suspend, struct lwp_info *except)
3109 {
3110 /* Should not be called recursively. */
3111 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3112
3113 stopping_threads = (suspend
3114 ? STOPPING_AND_SUSPENDING_THREADS
3115 : STOPPING_THREADS);
3116
3117 if (suspend)
3118 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3119 else
3120 find_inferior (&all_lwps, send_sigstop_callback, except);
3121 for_each_inferior (&all_lwps, wait_for_sigstop);
3122 stopping_threads = NOT_STOPPING_THREADS;
3123 }
3124
3125 /* Resume execution of the inferior process.
3126 If STEP is nonzero, single-step it.
3127 If SIGNAL is nonzero, give it that signal. */
3128
3129 static void
3130 linux_resume_one_lwp (struct lwp_info *lwp,
3131 int step, int signal, siginfo_t *info)
3132 {
3133 struct thread_info *saved_inferior;
3134 int fast_tp_collecting;
3135
3136 if (lwp->stopped == 0)
3137 return;
3138
3139 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3140
3141 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3142
3143 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3144 user used the "jump" command, or "set $pc = foo"). */
3145 if (lwp->stop_pc != get_pc (lwp))
3146 {
3147 /* Collecting 'while-stepping' actions doesn't make sense
3148 anymore. */
3149 release_while_stepping_state_list (get_lwp_thread (lwp));
3150 }
3151
3152 /* If we have pending signals or status, and a new signal, enqueue the
3153 signal. Also enqueue the signal if we are waiting to reinsert a
3154 breakpoint; it will be picked up again below. */
3155 if (signal != 0
3156 && (lwp->status_pending_p
3157 || lwp->pending_signals != NULL
3158 || lwp->bp_reinsert != 0
3159 || fast_tp_collecting))
3160 {
3161 struct pending_signals *p_sig;
3162 p_sig = xmalloc (sizeof (*p_sig));
3163 p_sig->prev = lwp->pending_signals;
3164 p_sig->signal = signal;
3165 if (info == NULL)
3166 memset (&p_sig->info, 0, sizeof (siginfo_t));
3167 else
3168 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3169 lwp->pending_signals = p_sig;
3170 }
3171
3172 if (lwp->status_pending_p)
3173 {
3174 if (debug_threads)
3175 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3176 " has pending status\n",
3177 lwpid_of (lwp), step ? "step" : "continue", signal,
3178 lwp->stop_expected ? "expected" : "not expected");
3179 return;
3180 }
3181
3182 saved_inferior = current_inferior;
3183 current_inferior = get_lwp_thread (lwp);
3184
3185 if (debug_threads)
3186 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3187 lwpid_of (lwp), step ? "step" : "continue", signal,
3188 lwp->stop_expected ? "expected" : "not expected");
3189
3190 /* This bit needs some thinking about. If we get a signal that
3191 we must report while a single-step reinsert is still pending,
3192 we often end up resuming the thread. It might be better to
3193 (ew) allow a stack of pending events; then we could be sure that
3194 the reinsert happened right away and not lose any signals.
3195
3196 Making this stack would also shrink the window in which breakpoints are
3197 uninserted (see comment in linux_wait_for_lwp) but not enough for
3198 complete correctness, so it won't solve that problem. It may be
3199 worthwhile just to solve this one, however. */
3200 if (lwp->bp_reinsert != 0)
3201 {
3202 if (debug_threads)
3203 fprintf (stderr, " pending reinsert at 0x%s\n",
3204 paddress (lwp->bp_reinsert));
3205
3206 if (can_hardware_single_step ())
3207 {
3208 if (fast_tp_collecting == 0)
3209 {
3210 if (step == 0)
3211 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3212 if (lwp->suspended)
3213 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3214 lwp->suspended);
3215 }
3216
3217 step = 1;
3218 }
3219
3220 /* Postpone any pending signal. It was enqueued above. */
3221 signal = 0;
3222 }
3223
3224 if (fast_tp_collecting == 1)
3225 {
3226 if (debug_threads)
3227 fprintf (stderr, "\
3228 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3229 lwpid_of (lwp));
3230
3231 /* Postpone any pending signal. It was enqueued above. */
3232 signal = 0;
3233 }
3234 else if (fast_tp_collecting == 2)
3235 {
3236 if (debug_threads)
3237 fprintf (stderr, "\
3238 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3239 lwpid_of (lwp));
3240
3241 if (can_hardware_single_step ())
3242 step = 1;
3243 else
3244 fatal ("moving out of jump pad single-stepping"
3245 " not implemented on this target");
3246
3247 /* Postpone any pending signal. It was enqueued above. */
3248 signal = 0;
3249 }
3250
3251 /* If we have while-stepping actions in this thread set it stepping.
3252 If we have a signal to deliver, it may or may not be set to
3253 SIG_IGN, we don't know. Assume so, and allow collecting
3254 while-stepping into a signal handler. A possible smart thing to
3255 do would be to set an internal breakpoint at the signal return
3256 address, continue, and carry on catching this while-stepping
3257 action only when that breakpoint is hit. A future
3258 enhancement. */
3259 if (get_lwp_thread (lwp)->while_stepping != NULL
3260 && can_hardware_single_step ())
3261 {
3262 if (debug_threads)
3263 fprintf (stderr,
3264 "lwp %ld has a while-stepping action -> forcing step.\n",
3265 lwpid_of (lwp));
3266 step = 1;
3267 }
3268
3269 if (debug_threads && the_low_target.get_pc != NULL)
3270 {
3271 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3272 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3273 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3274 }
3275
3276 /* If we have pending signals, consume one unless we are trying to
3277 reinsert a breakpoint or we're trying to finish a fast tracepoint
3278 collect. */
3279 if (lwp->pending_signals != NULL
3280 && lwp->bp_reinsert == 0
3281 && fast_tp_collecting == 0)
3282 {
3283 struct pending_signals **p_sig;
3284
3285 p_sig = &lwp->pending_signals;
3286 while ((*p_sig)->prev != NULL)
3287 p_sig = &(*p_sig)->prev;
3288
3289 signal = (*p_sig)->signal;
3290 if ((*p_sig)->info.si_signo != 0)
3291 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3292 &(*p_sig)->info);
3293
3294 free (*p_sig);
3295 *p_sig = NULL;
3296 }
3297
3298 if (the_low_target.prepare_to_resume != NULL)
3299 the_low_target.prepare_to_resume (lwp);
3300
3301 regcache_invalidate_one ((struct inferior_list_entry *)
3302 get_lwp_thread (lwp));
3303 errno = 0;
3304 lwp->stopped = 0;
3305 lwp->stopped_by_watchpoint = 0;
3306 lwp->stepping = step;
3307 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3308 (PTRACE_ARG3_TYPE) 0,
3309 /* Coerce to a uintptr_t first to avoid potential gcc warning
3310 of coercing an 8 byte integer to a 4 byte pointer. */
3311 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3312
3313 current_inferior = saved_inferior;
3314 if (errno)
3315 {
3316 /* ESRCH from ptrace either means that the thread was already
3317 running (an error) or that it is gone (a race condition). If
3318 it's gone, we will get a notification the next time we wait,
3319 so we can ignore the error. We could differentiate these
3320 two, but it's tricky without waiting; the thread still exists
3321 as a zombie, so sending it signal 0 would succeed. So just
3322 ignore ESRCH. */
3323 if (errno == ESRCH)
3324 return;
3325
3326 perror_with_name ("ptrace");
3327 }
3328 }
3329
3330 struct thread_resume_array
3331 {
3332 struct thread_resume *resume;
3333 size_t n;
3334 };
3335
3336 /* This function is called once per thread. We look up the thread
3337 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3338 resume request.
3339
3340 This algorithm is O(threads * resume elements), but resume elements
3341 is small (and will remain small at least until GDB supports thread
3342 suspension). */
3343 static int
3344 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3345 {
3346 struct lwp_info *lwp;
3347 struct thread_info *thread;
3348 int ndx;
3349 struct thread_resume_array *r;
3350
3351 thread = (struct thread_info *) entry;
3352 lwp = get_thread_lwp (thread);
3353 r = arg;
3354
3355 for (ndx = 0; ndx < r->n; ndx++)
3356 {
3357 ptid_t ptid = r->resume[ndx].thread;
3358 if (ptid_equal (ptid, minus_one_ptid)
3359 || ptid_equal (ptid, entry->id)
3360 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3361 of PID'. */
3362 || (ptid_get_pid (ptid) == pid_of (lwp)
3363 && (ptid_is_pid (ptid)
3364 || ptid_get_lwp (ptid) == -1)))
3365 {
3366 if (r->resume[ndx].kind == resume_stop
3367 && thread->last_resume_kind == resume_stop)
3368 {
3369 if (debug_threads)
3370 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3371 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3372 ? "stopped"
3373 : "stopping",
3374 lwpid_of (lwp));
3375
3376 continue;
3377 }
3378
3379 lwp->resume = &r->resume[ndx];
3380 thread->last_resume_kind = lwp->resume->kind;
3381
3382 /* If we had a deferred signal to report, dequeue one now.
3383 This can happen if LWP gets more than one signal while
3384 trying to get out of a jump pad. */
3385 if (lwp->stopped
3386 && !lwp->status_pending_p
3387 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3388 {
3389 lwp->status_pending_p = 1;
3390
3391 if (debug_threads)
3392 fprintf (stderr,
3393 "Dequeueing deferred signal %d for LWP %ld, "
3394 "leaving status pending.\n",
3395 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3396 }
3397
3398 return 0;
3399 }
3400 }
3401
3402 /* No resume action for this thread. */
3403 lwp->resume = NULL;
3404
3405 return 0;
3406 }
3407
3408
3409 /* Set *FLAG_P if this lwp has an interesting status pending. */
3410 static int
3411 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3412 {
3413 struct lwp_info *lwp = (struct lwp_info *) entry;
3414
3415 /* LWPs which will not be resumed are not interesting, because
3416 we might not wait for them next time through linux_wait. */
3417 if (lwp->resume == NULL)
3418 return 0;
3419
3420 if (lwp->status_pending_p)
3421 * (int *) flag_p = 1;
3422
3423 return 0;
3424 }
3425
3426 /* Return 1 if this lwp that GDB wants running is stopped at an
3427 internal breakpoint that we need to step over. It assumes that any
3428 required STOP_PC adjustment has already been propagated to the
3429 inferior's regcache. */
3430
3431 static int
3432 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3433 {
3434 struct lwp_info *lwp = (struct lwp_info *) entry;
3435 struct thread_info *thread;
3436 struct thread_info *saved_inferior;
3437 CORE_ADDR pc;
3438
3439 /* LWPs which will not be resumed are not interesting, because we
3440 might not wait for them next time through linux_wait. */
3441
3442 if (!lwp->stopped)
3443 {
3444 if (debug_threads)
3445 fprintf (stderr,
3446 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3447 lwpid_of (lwp));
3448 return 0;
3449 }
3450
3451 thread = get_lwp_thread (lwp);
3452
3453 if (thread->last_resume_kind == resume_stop)
3454 {
3455 if (debug_threads)
3456 fprintf (stderr,
3457 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3458 lwpid_of (lwp));
3459 return 0;
3460 }
3461
3462 gdb_assert (lwp->suspended >= 0);
3463
3464 if (lwp->suspended)
3465 {
3466 if (debug_threads)
3467 fprintf (stderr,
3468 "Need step over [LWP %ld]? Ignoring, suspended\n",
3469 lwpid_of (lwp));
3470 return 0;
3471 }
3472
3473 if (!lwp->need_step_over)
3474 {
3475 if (debug_threads)
3476 fprintf (stderr,
3477 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3478 }
3479
3480 if (lwp->status_pending_p)
3481 {
3482 if (debug_threads)
3483 fprintf (stderr,
3484 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3485 lwpid_of (lwp));
3486 return 0;
3487 }
3488
3489 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3490 or we have. */
3491 pc = get_pc (lwp);
3492
3493 /* If the PC has changed since we stopped, then don't do anything,
3494 and let the breakpoint/tracepoint be hit. This happens if, for
3495 instance, GDB handled the decr_pc_after_break subtraction itself,
3496 GDB is OOL stepping this thread, or the user has issued a "jump"
3497 command, or poked thread's registers herself. */
3498 if (pc != lwp->stop_pc)
3499 {
3500 if (debug_threads)
3501 fprintf (stderr,
3502 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3503 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3504 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3505
3506 lwp->need_step_over = 0;
3507 return 0;
3508 }
3509
3510 saved_inferior = current_inferior;
3511 current_inferior = thread;
3512
3513 /* We can only step over breakpoints we know about. */
3514 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3515 {
3516 /* Don't step over a breakpoint that GDB expects to hit
3517 though. If the condition is being evaluated on the target's side
3518 and it evaluate to false, step over this breakpoint as well. */
3519 if (gdb_breakpoint_here (pc)
3520 && gdb_condition_true_at_breakpoint (pc)
3521 && gdb_no_commands_at_breakpoint (pc))
3522 {
3523 if (debug_threads)
3524 fprintf (stderr,
3525 "Need step over [LWP %ld]? yes, but found"
3526 " GDB breakpoint at 0x%s; skipping step over\n",
3527 lwpid_of (lwp), paddress (pc));
3528
3529 current_inferior = saved_inferior;
3530 return 0;
3531 }
3532 else
3533 {
3534 if (debug_threads)
3535 fprintf (stderr,
3536 "Need step over [LWP %ld]? yes, "
3537 "found breakpoint at 0x%s\n",
3538 lwpid_of (lwp), paddress (pc));
3539
3540 /* We've found an lwp that needs stepping over --- return 1 so
3541 that find_inferior stops looking. */
3542 current_inferior = saved_inferior;
3543
3544 /* If the step over is cancelled, this is set again. */
3545 lwp->need_step_over = 0;
3546 return 1;
3547 }
3548 }
3549
3550 current_inferior = saved_inferior;
3551
3552 if (debug_threads)
3553 fprintf (stderr,
3554 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3555 lwpid_of (lwp), paddress (pc));
3556
3557 return 0;
3558 }
3559
3560 /* Start a step-over operation on LWP. When LWP stopped at a
3561 breakpoint, to make progress, we need to remove the breakpoint out
3562 of the way. If we let other threads run while we do that, they may
3563 pass by the breakpoint location and miss hitting it. To avoid
3564 that, a step-over momentarily stops all threads while LWP is
3565 single-stepped while the breakpoint is temporarily uninserted from
3566 the inferior. When the single-step finishes, we reinsert the
3567 breakpoint, and let all threads that are supposed to be running,
3568 run again.
3569
3570 On targets that don't support hardware single-step, we don't
3571 currently support full software single-stepping. Instead, we only
3572 support stepping over the thread event breakpoint, by asking the
3573 low target where to place a reinsert breakpoint. Since this
3574 routine assumes the breakpoint being stepped over is a thread event
3575 breakpoint, it usually assumes the return address of the current
3576 function is a good enough place to set the reinsert breakpoint. */
3577
3578 static int
3579 start_step_over (struct lwp_info *lwp)
3580 {
3581 struct thread_info *saved_inferior;
3582 CORE_ADDR pc;
3583 int step;
3584
3585 if (debug_threads)
3586 fprintf (stderr,
3587 "Starting step-over on LWP %ld. Stopping all threads\n",
3588 lwpid_of (lwp));
3589
3590 stop_all_lwps (1, lwp);
3591 gdb_assert (lwp->suspended == 0);
3592
3593 if (debug_threads)
3594 fprintf (stderr, "Done stopping all threads for step-over.\n");
3595
3596 /* Note, we should always reach here with an already adjusted PC,
3597 either by GDB (if we're resuming due to GDB's request), or by our
3598 caller, if we just finished handling an internal breakpoint GDB
3599 shouldn't care about. */
3600 pc = get_pc (lwp);
3601
3602 saved_inferior = current_inferior;
3603 current_inferior = get_lwp_thread (lwp);
3604
3605 lwp->bp_reinsert = pc;
3606 uninsert_breakpoints_at (pc);
3607 uninsert_fast_tracepoint_jumps_at (pc);
3608
3609 if (can_hardware_single_step ())
3610 {
3611 step = 1;
3612 }
3613 else
3614 {
3615 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3616 set_reinsert_breakpoint (raddr);
3617 step = 0;
3618 }
3619
3620 current_inferior = saved_inferior;
3621
3622 linux_resume_one_lwp (lwp, step, 0, NULL);
3623
3624 /* Require next event from this LWP. */
3625 step_over_bkpt = lwp->head.id;
3626 return 1;
3627 }
3628
3629 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3630 start_step_over, if still there, and delete any reinsert
3631 breakpoints we've set, on non hardware single-step targets. */
3632
3633 static int
3634 finish_step_over (struct lwp_info *lwp)
3635 {
3636 if (lwp->bp_reinsert != 0)
3637 {
3638 if (debug_threads)
3639 fprintf (stderr, "Finished step over.\n");
3640
3641 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3642 may be no breakpoint to reinsert there by now. */
3643 reinsert_breakpoints_at (lwp->bp_reinsert);
3644 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3645
3646 lwp->bp_reinsert = 0;
3647
3648 /* Delete any software-single-step reinsert breakpoints. No
3649 longer needed. We don't have to worry about other threads
3650 hitting this trap, and later not being able to explain it,
3651 because we were stepping over a breakpoint, and we hold all
3652 threads but LWP stopped while doing that. */
3653 if (!can_hardware_single_step ())
3654 delete_reinsert_breakpoints ();
3655
3656 step_over_bkpt = null_ptid;
3657 return 1;
3658 }
3659 else
3660 return 0;
3661 }
3662
3663 /* This function is called once per thread. We check the thread's resume
3664 request, which will tell us whether to resume, step, or leave the thread
3665 stopped; and what signal, if any, it should be sent.
3666
3667 For threads which we aren't explicitly told otherwise, we preserve
3668 the stepping flag; this is used for stepping over gdbserver-placed
3669 breakpoints.
3670
3671 If pending_flags was set in any thread, we queue any needed
3672 signals, since we won't actually resume. We already have a pending
3673 event to report, so we don't need to preserve any step requests;
3674 they should be re-issued if necessary. */
3675
3676 static int
3677 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3678 {
3679 struct lwp_info *lwp;
3680 struct thread_info *thread;
3681 int step;
3682 int leave_all_stopped = * (int *) arg;
3683 int leave_pending;
3684
3685 thread = (struct thread_info *) entry;
3686 lwp = get_thread_lwp (thread);
3687
3688 if (lwp->resume == NULL)
3689 return 0;
3690
3691 if (lwp->resume->kind == resume_stop)
3692 {
3693 if (debug_threads)
3694 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3695
3696 if (!lwp->stopped)
3697 {
3698 if (debug_threads)
3699 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3700
3701 /* Stop the thread, and wait for the event asynchronously,
3702 through the event loop. */
3703 send_sigstop (lwp);
3704 }
3705 else
3706 {
3707 if (debug_threads)
3708 fprintf (stderr, "already stopped LWP %ld\n",
3709 lwpid_of (lwp));
3710
3711 /* The LWP may have been stopped in an internal event that
3712 was not meant to be notified back to GDB (e.g., gdbserver
3713 breakpoint), so we should be reporting a stop event in
3714 this case too. */
3715
3716 /* If the thread already has a pending SIGSTOP, this is a
3717 no-op. Otherwise, something later will presumably resume
3718 the thread and this will cause it to cancel any pending
3719 operation, due to last_resume_kind == resume_stop. If
3720 the thread already has a pending status to report, we
3721 will still report it the next time we wait - see
3722 status_pending_p_callback. */
3723
3724 /* If we already have a pending signal to report, then
3725 there's no need to queue a SIGSTOP, as this means we're
3726 midway through moving the LWP out of the jumppad, and we
3727 will report the pending signal as soon as that is
3728 finished. */
3729 if (lwp->pending_signals_to_report == NULL)
3730 send_sigstop (lwp);
3731 }
3732
3733 /* For stop requests, we're done. */
3734 lwp->resume = NULL;
3735 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3736 return 0;
3737 }
3738
3739 /* If this thread which is about to be resumed has a pending status,
3740 then don't resume any threads - we can just report the pending
3741 status. Make sure to queue any signals that would otherwise be
3742 sent. In all-stop mode, we do this decision based on if *any*
3743 thread has a pending status. If there's a thread that needs the
3744 step-over-breakpoint dance, then don't resume any other thread
3745 but that particular one. */
3746 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3747
3748 if (!leave_pending)
3749 {
3750 if (debug_threads)
3751 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3752
3753 step = (lwp->resume->kind == resume_step);
3754 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3755 }
3756 else
3757 {
3758 if (debug_threads)
3759 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3760
3761 /* If we have a new signal, enqueue the signal. */
3762 if (lwp->resume->sig != 0)
3763 {
3764 struct pending_signals *p_sig;
3765 p_sig = xmalloc (sizeof (*p_sig));
3766 p_sig->prev = lwp->pending_signals;
3767 p_sig->signal = lwp->resume->sig;
3768 memset (&p_sig->info, 0, sizeof (siginfo_t));
3769
3770 /* If this is the same signal we were previously stopped by,
3771 make sure to queue its siginfo. We can ignore the return
3772 value of ptrace; if it fails, we'll skip
3773 PTRACE_SETSIGINFO. */
3774 if (WIFSTOPPED (lwp->last_status)
3775 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3776 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3777 &p_sig->info);
3778
3779 lwp->pending_signals = p_sig;
3780 }
3781 }
3782
3783 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3784 lwp->resume = NULL;
3785 return 0;
3786 }
3787
3788 static void
3789 linux_resume (struct thread_resume *resume_info, size_t n)
3790 {
3791 struct thread_resume_array array = { resume_info, n };
3792 struct lwp_info *need_step_over = NULL;
3793 int any_pending;
3794 int leave_all_stopped;
3795
3796 find_inferior (&all_threads, linux_set_resume_request, &array);
3797
3798 /* If there is a thread which would otherwise be resumed, which has
3799 a pending status, then don't resume any threads - we can just
3800 report the pending status. Make sure to queue any signals that
3801 would otherwise be sent. In non-stop mode, we'll apply this
3802 logic to each thread individually. We consume all pending events
3803 before considering to start a step-over (in all-stop). */
3804 any_pending = 0;
3805 if (!non_stop)
3806 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3807
3808 /* If there is a thread which would otherwise be resumed, which is
3809 stopped at a breakpoint that needs stepping over, then don't
3810 resume any threads - have it step over the breakpoint with all
3811 other threads stopped, then resume all threads again. Make sure
3812 to queue any signals that would otherwise be delivered or
3813 queued. */
3814 if (!any_pending && supports_breakpoints ())
3815 need_step_over
3816 = (struct lwp_info *) find_inferior (&all_lwps,
3817 need_step_over_p, NULL);
3818
3819 leave_all_stopped = (need_step_over != NULL || any_pending);
3820
3821 if (debug_threads)
3822 {
3823 if (need_step_over != NULL)
3824 fprintf (stderr, "Not resuming all, need step over\n");
3825 else if (any_pending)
3826 fprintf (stderr,
3827 "Not resuming, all-stop and found "
3828 "an LWP with pending status\n");
3829 else
3830 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3831 }
3832
3833 /* Even if we're leaving threads stopped, queue all signals we'd
3834 otherwise deliver. */
3835 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3836
3837 if (need_step_over)
3838 start_step_over (need_step_over);
3839 }
3840
3841 /* This function is called once per thread. We check the thread's
3842 last resume request, which will tell us whether to resume, step, or
3843 leave the thread stopped. Any signal the client requested to be
3844 delivered has already been enqueued at this point.
3845
3846 If any thread that GDB wants running is stopped at an internal
3847 breakpoint that needs stepping over, we start a step-over operation
3848 on that particular thread, and leave all others stopped. */
3849
3850 static int
3851 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3852 {
3853 struct lwp_info *lwp = (struct lwp_info *) entry;
3854 struct thread_info *thread;
3855 int step;
3856
3857 if (lwp == except)
3858 return 0;
3859
3860 if (debug_threads)
3861 fprintf (stderr,
3862 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3863
3864 if (!lwp->stopped)
3865 {
3866 if (debug_threads)
3867 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3868 return 0;
3869 }
3870
3871 thread = get_lwp_thread (lwp);
3872
3873 if (thread->last_resume_kind == resume_stop
3874 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3875 {
3876 if (debug_threads)
3877 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3878 lwpid_of (lwp));
3879 return 0;
3880 }
3881
3882 if (lwp->status_pending_p)
3883 {
3884 if (debug_threads)
3885 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3886 lwpid_of (lwp));
3887 return 0;
3888 }
3889
3890 gdb_assert (lwp->suspended >= 0);
3891
3892 if (lwp->suspended)
3893 {
3894 if (debug_threads)
3895 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3896 return 0;
3897 }
3898
3899 if (thread->last_resume_kind == resume_stop
3900 && lwp->pending_signals_to_report == NULL
3901 && lwp->collecting_fast_tracepoint == 0)
3902 {
3903 /* We haven't reported this LWP as stopped yet (otherwise, the
3904 last_status.kind check above would catch it, and we wouldn't
3905 reach here. This LWP may have been momentarily paused by a
3906 stop_all_lwps call while handling for example, another LWP's
3907 step-over. In that case, the pending expected SIGSTOP signal
3908 that was queued at vCont;t handling time will have already
3909 been consumed by wait_for_sigstop, and so we need to requeue
3910 another one here. Note that if the LWP already has a SIGSTOP
3911 pending, this is a no-op. */
3912
3913 if (debug_threads)
3914 fprintf (stderr,
3915 "Client wants LWP %ld to stop. "
3916 "Making sure it has a SIGSTOP pending\n",
3917 lwpid_of (lwp));
3918
3919 send_sigstop (lwp);
3920 }
3921
3922 step = thread->last_resume_kind == resume_step;
3923 linux_resume_one_lwp (lwp, step, 0, NULL);
3924 return 0;
3925 }
3926
3927 static int
3928 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3929 {
3930 struct lwp_info *lwp = (struct lwp_info *) entry;
3931
3932 if (lwp == except)
3933 return 0;
3934
3935 lwp->suspended--;
3936 gdb_assert (lwp->suspended >= 0);
3937
3938 return proceed_one_lwp (entry, except);
3939 }
3940
3941 /* When we finish a step-over, set threads running again. If there's
3942 another thread that may need a step-over, now's the time to start
3943 it. Eventually, we'll move all threads past their breakpoints. */
3944
3945 static void
3946 proceed_all_lwps (void)
3947 {
3948 struct lwp_info *need_step_over;
3949
3950 /* If there is a thread which would otherwise be resumed, which is
3951 stopped at a breakpoint that needs stepping over, then don't
3952 resume any threads - have it step over the breakpoint with all
3953 other threads stopped, then resume all threads again. */
3954
3955 if (supports_breakpoints ())
3956 {
3957 need_step_over
3958 = (struct lwp_info *) find_inferior (&all_lwps,
3959 need_step_over_p, NULL);
3960
3961 if (need_step_over != NULL)
3962 {
3963 if (debug_threads)
3964 fprintf (stderr, "proceed_all_lwps: found "
3965 "thread %ld needing a step-over\n",
3966 lwpid_of (need_step_over));
3967
3968 start_step_over (need_step_over);
3969 return;
3970 }
3971 }
3972
3973 if (debug_threads)
3974 fprintf (stderr, "Proceeding, no step-over needed\n");
3975
3976 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3977 }
3978
3979 /* Stopped LWPs that the client wanted to be running, that don't have
3980 pending statuses, are set to run again, except for EXCEPT, if not
3981 NULL. This undoes a stop_all_lwps call. */
3982
3983 static void
3984 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3985 {
3986 if (debug_threads)
3987 {
3988 if (except)
3989 fprintf (stderr,
3990 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3991 else
3992 fprintf (stderr,
3993 "unstopping all lwps\n");
3994 }
3995
3996 if (unsuspend)
3997 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3998 else
3999 find_inferior (&all_lwps, proceed_one_lwp, except);
4000 }
4001
4002
4003 #ifdef HAVE_LINUX_REGSETS
4004
4005 #define use_linux_regsets 1
4006
4007 static int
4008 regsets_fetch_inferior_registers (struct regcache *regcache)
4009 {
4010 struct regset_info *regset;
4011 int saw_general_regs = 0;
4012 int pid;
4013 struct iovec iov;
4014
4015 regset = target_regsets;
4016
4017 pid = lwpid_of (get_thread_lwp (current_inferior));
4018 while (regset->size >= 0)
4019 {
4020 void *buf, *data;
4021 int nt_type, res;
4022
4023 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4024 {
4025 regset ++;
4026 continue;
4027 }
4028
4029 buf = xmalloc (regset->size);
4030
4031 nt_type = regset->nt_type;
4032 if (nt_type)
4033 {
4034 iov.iov_base = buf;
4035 iov.iov_len = regset->size;
4036 data = (void *) &iov;
4037 }
4038 else
4039 data = buf;
4040
4041 #ifndef __sparc__
4042 res = ptrace (regset->get_request, pid,
4043 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4044 #else
4045 res = ptrace (regset->get_request, pid, data, nt_type);
4046 #endif
4047 if (res < 0)
4048 {
4049 if (errno == EIO)
4050 {
4051 /* If we get EIO on a regset, do not try it again for
4052 this process. */
4053 disabled_regsets[regset - target_regsets] = 1;
4054 free (buf);
4055 continue;
4056 }
4057 else
4058 {
4059 char s[256];
4060 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4061 pid);
4062 perror (s);
4063 }
4064 }
4065 else if (regset->type == GENERAL_REGS)
4066 saw_general_regs = 1;
4067 regset->store_function (regcache, buf);
4068 regset ++;
4069 free (buf);
4070 }
4071 if (saw_general_regs)
4072 return 0;
4073 else
4074 return 1;
4075 }
4076
4077 static int
4078 regsets_store_inferior_registers (struct regcache *regcache)
4079 {
4080 struct regset_info *regset;
4081 int saw_general_regs = 0;
4082 int pid;
4083 struct iovec iov;
4084
4085 regset = target_regsets;
4086
4087 pid = lwpid_of (get_thread_lwp (current_inferior));
4088 while (regset->size >= 0)
4089 {
4090 void *buf, *data;
4091 int nt_type, res;
4092
4093 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4094 {
4095 regset ++;
4096 continue;
4097 }
4098
4099 buf = xmalloc (regset->size);
4100
4101 /* First fill the buffer with the current register set contents,
4102 in case there are any items in the kernel's regset that are
4103 not in gdbserver's regcache. */
4104
4105 nt_type = regset->nt_type;
4106 if (nt_type)
4107 {
4108 iov.iov_base = buf;
4109 iov.iov_len = regset->size;
4110 data = (void *) &iov;
4111 }
4112 else
4113 data = buf;
4114
4115 #ifndef __sparc__
4116 res = ptrace (regset->get_request, pid,
4117 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4118 #else
4119 res = ptrace (regset->get_request, pid, data, nt_type);
4120 #endif
4121
4122 if (res == 0)
4123 {
4124 /* Then overlay our cached registers on that. */
4125 regset->fill_function (regcache, buf);
4126
4127 /* Only now do we write the register set. */
4128 #ifndef __sparc__
4129 res = ptrace (regset->set_request, pid,
4130 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4131 #else
4132 res = ptrace (regset->set_request, pid, data, nt_type);
4133 #endif
4134 }
4135
4136 if (res < 0)
4137 {
4138 if (errno == EIO)
4139 {
4140 /* If we get EIO on a regset, do not try it again for
4141 this process. */
4142 disabled_regsets[regset - target_regsets] = 1;
4143 free (buf);
4144 continue;
4145 }
4146 else if (errno == ESRCH)
4147 {
4148 /* At this point, ESRCH should mean the process is
4149 already gone, in which case we simply ignore attempts
4150 to change its registers. See also the related
4151 comment in linux_resume_one_lwp. */
4152 free (buf);
4153 return 0;
4154 }
4155 else
4156 {
4157 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4158 }
4159 }
4160 else if (regset->type == GENERAL_REGS)
4161 saw_general_regs = 1;
4162 regset ++;
4163 free (buf);
4164 }
4165 if (saw_general_regs)
4166 return 0;
4167 else
4168 return 1;
4169 }
4170
4171 #else /* !HAVE_LINUX_REGSETS */
4172
4173 #define use_linux_regsets 0
4174 #define regsets_fetch_inferior_registers(regcache) 1
4175 #define regsets_store_inferior_registers(regcache) 1
4176
4177 #endif
4178
4179 /* Return 1 if register REGNO is supported by one of the regset ptrace
4180 calls or 0 if it has to be transferred individually. */
4181
4182 static int
4183 linux_register_in_regsets (int regno)
4184 {
4185 unsigned char mask = 1 << (regno % 8);
4186 size_t index = regno / 8;
4187
4188 return (use_linux_regsets
4189 && (the_low_target.regset_bitmap == NULL
4190 || (the_low_target.regset_bitmap[index] & mask) != 0));
4191 }
4192
4193 #ifdef HAVE_LINUX_USRREGS
4194
4195 int
4196 register_addr (int regnum)
4197 {
4198 int addr;
4199
4200 if (regnum < 0 || regnum >= the_low_target.num_regs)
4201 error ("Invalid register number %d.", regnum);
4202
4203 addr = the_low_target.regmap[regnum];
4204
4205 return addr;
4206 }
4207
4208 /* Fetch one register. */
4209 static void
4210 fetch_register (struct regcache *regcache, int regno)
4211 {
4212 CORE_ADDR regaddr;
4213 int i, size;
4214 char *buf;
4215 int pid;
4216
4217 if (regno >= the_low_target.num_regs)
4218 return;
4219 if ((*the_low_target.cannot_fetch_register) (regno))
4220 return;
4221
4222 regaddr = register_addr (regno);
4223 if (regaddr == -1)
4224 return;
4225
4226 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4227 & -sizeof (PTRACE_XFER_TYPE));
4228 buf = alloca (size);
4229
4230 pid = lwpid_of (get_thread_lwp (current_inferior));
4231 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4232 {
4233 errno = 0;
4234 *(PTRACE_XFER_TYPE *) (buf + i) =
4235 ptrace (PTRACE_PEEKUSER, pid,
4236 /* Coerce to a uintptr_t first to avoid potential gcc warning
4237 of coercing an 8 byte integer to a 4 byte pointer. */
4238 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, (PTRACE_ARG4_TYPE) 0);
4239 regaddr += sizeof (PTRACE_XFER_TYPE);
4240 if (errno != 0)
4241 error ("reading register %d: %s", regno, strerror (errno));
4242 }
4243
4244 if (the_low_target.supply_ptrace_register)
4245 the_low_target.supply_ptrace_register (regcache, regno, buf);
4246 else
4247 supply_register (regcache, regno, buf);
4248 }
4249
4250 /* Store one register. */
4251 static void
4252 store_register (struct regcache *regcache, int regno)
4253 {
4254 CORE_ADDR regaddr;
4255 int i, size;
4256 char *buf;
4257 int pid;
4258
4259 if (regno >= the_low_target.num_regs)
4260 return;
4261 if ((*the_low_target.cannot_store_register) (regno))
4262 return;
4263
4264 regaddr = register_addr (regno);
4265 if (regaddr == -1)
4266 return;
4267
4268 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4269 & -sizeof (PTRACE_XFER_TYPE));
4270 buf = alloca (size);
4271 memset (buf, 0, size);
4272
4273 if (the_low_target.collect_ptrace_register)
4274 the_low_target.collect_ptrace_register (regcache, regno, buf);
4275 else
4276 collect_register (regcache, regno, buf);
4277
4278 pid = lwpid_of (get_thread_lwp (current_inferior));
4279 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4280 {
4281 errno = 0;
4282 ptrace (PTRACE_POKEUSER, pid,
4283 /* Coerce to a uintptr_t first to avoid potential gcc warning
4284 about coercing an 8 byte integer to a 4 byte pointer. */
4285 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4286 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4287 if (errno != 0)
4288 {
4289 /* At this point, ESRCH should mean the process is
4290 already gone, in which case we simply ignore attempts
4291 to change its registers. See also the related
4292 comment in linux_resume_one_lwp. */
4293 if (errno == ESRCH)
4294 return;
4295
4296 if ((*the_low_target.cannot_store_register) (regno) == 0)
4297 error ("writing register %d: %s", regno, strerror (errno));
4298 }
4299 regaddr += sizeof (PTRACE_XFER_TYPE);
4300 }
4301 }
4302
4303 /* Fetch all registers, or just one, from the child process.
4304 If REGNO is -1, do this for all registers, skipping any that are
4305 assumed to have been retrieved by regsets_fetch_inferior_registers,
4306 unless ALL is non-zero.
4307 Otherwise, REGNO specifies which register (so we can save time). */
4308 static void
4309 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4310 {
4311 if (regno == -1)
4312 {
4313 for (regno = 0; regno < the_low_target.num_regs; regno++)
4314 if (all || !linux_register_in_regsets (regno))
4315 fetch_register (regcache, regno);
4316 }
4317 else
4318 fetch_register (regcache, regno);
4319 }
4320
4321 /* Store our register values back into the inferior.
4322 If REGNO is -1, do this for all registers, skipping any that are
4323 assumed to have been saved by regsets_store_inferior_registers,
4324 unless ALL is non-zero.
4325 Otherwise, REGNO specifies which register (so we can save time). */
4326 static void
4327 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4328 {
4329 if (regno == -1)
4330 {
4331 for (regno = 0; regno < the_low_target.num_regs; regno++)
4332 if (all || !linux_register_in_regsets (regno))
4333 store_register (regcache, regno);
4334 }
4335 else
4336 store_register (regcache, regno);
4337 }
4338
4339 #else /* !HAVE_LINUX_USRREGS */
4340
4341 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4342 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4343
4344 #endif
4345
4346
4347 void
4348 linux_fetch_registers (struct regcache *regcache, int regno)
4349 {
4350 int use_regsets;
4351 int all = 0;
4352
4353 if (regno == -1)
4354 {
4355 if (the_low_target.fetch_register != NULL)
4356 for (regno = 0; regno < the_low_target.num_regs; regno++)
4357 (*the_low_target.fetch_register) (regcache, regno);
4358
4359 all = regsets_fetch_inferior_registers (regcache);
4360 usr_fetch_inferior_registers (regcache, -1, all);
4361 }
4362 else
4363 {
4364 if (the_low_target.fetch_register != NULL
4365 && (*the_low_target.fetch_register) (regcache, regno))
4366 return;
4367
4368 use_regsets = linux_register_in_regsets (regno);
4369 if (use_regsets)
4370 all = regsets_fetch_inferior_registers (regcache);
4371 if (!use_regsets || all)
4372 usr_fetch_inferior_registers (regcache, regno, 1);
4373 }
4374 }
4375
4376 void
4377 linux_store_registers (struct regcache *regcache, int regno)
4378 {
4379 int use_regsets;
4380 int all = 0;
4381
4382 if (regno == -1)
4383 {
4384 all = regsets_store_inferior_registers (regcache);
4385 usr_store_inferior_registers (regcache, regno, all);
4386 }
4387 else
4388 {
4389 use_regsets = linux_register_in_regsets (regno);
4390 if (use_regsets)
4391 all = regsets_store_inferior_registers (regcache);
4392 if (!use_regsets || all)
4393 usr_store_inferior_registers (regcache, regno, 1);
4394 }
4395 }
4396
4397
4398 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4399 to debugger memory starting at MYADDR. */
4400
4401 static int
4402 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4403 {
4404 int pid = lwpid_of (get_thread_lwp (current_inferior));
4405 register PTRACE_XFER_TYPE *buffer;
4406 register CORE_ADDR addr;
4407 register int count;
4408 char filename[64];
4409 register int i;
4410 int ret;
4411 int fd;
4412
4413 /* Try using /proc. Don't bother for one word. */
4414 if (len >= 3 * sizeof (long))
4415 {
4416 int bytes;
4417
4418 /* We could keep this file open and cache it - possibly one per
4419 thread. That requires some juggling, but is even faster. */
4420 sprintf (filename, "/proc/%d/mem", pid);
4421 fd = open (filename, O_RDONLY | O_LARGEFILE);
4422 if (fd == -1)
4423 goto no_proc;
4424
4425 /* If pread64 is available, use it. It's faster if the kernel
4426 supports it (only one syscall), and it's 64-bit safe even on
4427 32-bit platforms (for instance, SPARC debugging a SPARC64
4428 application). */
4429 #ifdef HAVE_PREAD64
4430 bytes = pread64 (fd, myaddr, len, memaddr);
4431 #else
4432 bytes = -1;
4433 if (lseek (fd, memaddr, SEEK_SET) != -1)
4434 bytes = read (fd, myaddr, len);
4435 #endif
4436
4437 close (fd);
4438 if (bytes == len)
4439 return 0;
4440
4441 /* Some data was read, we'll try to get the rest with ptrace. */
4442 if (bytes > 0)
4443 {
4444 memaddr += bytes;
4445 myaddr += bytes;
4446 len -= bytes;
4447 }
4448 }
4449
4450 no_proc:
4451 /* Round starting address down to longword boundary. */
4452 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4453 /* Round ending address up; get number of longwords that makes. */
4454 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4455 / sizeof (PTRACE_XFER_TYPE));
4456 /* Allocate buffer of that many longwords. */
4457 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4458
4459 /* Read all the longwords */
4460 errno = 0;
4461 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4462 {
4463 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4464 about coercing an 8 byte integer to a 4 byte pointer. */
4465 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4466 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4467 (PTRACE_ARG4_TYPE) 0);
4468 if (errno)
4469 break;
4470 }
4471 ret = errno;
4472
4473 /* Copy appropriate bytes out of the buffer. */
4474 if (i > 0)
4475 {
4476 i *= sizeof (PTRACE_XFER_TYPE);
4477 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4478 memcpy (myaddr,
4479 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4480 i < len ? i : len);
4481 }
4482
4483 return ret;
4484 }
4485
4486 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4487 memory at MEMADDR. On failure (cannot write to the inferior)
4488 returns the value of errno. Always succeeds if LEN is zero. */
4489
4490 static int
4491 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4492 {
4493 register int i;
4494 /* Round starting address down to longword boundary. */
4495 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4496 /* Round ending address up; get number of longwords that makes. */
4497 register int count
4498 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4499 / sizeof (PTRACE_XFER_TYPE);
4500
4501 /* Allocate buffer of that many longwords. */
4502 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4503 alloca (count * sizeof (PTRACE_XFER_TYPE));
4504
4505 int pid = lwpid_of (get_thread_lwp (current_inferior));
4506
4507 if (len == 0)
4508 {
4509 /* Zero length write always succeeds. */
4510 return 0;
4511 }
4512
4513 if (debug_threads)
4514 {
4515 /* Dump up to four bytes. */
4516 unsigned int val = * (unsigned int *) myaddr;
4517 if (len == 1)
4518 val = val & 0xff;
4519 else if (len == 2)
4520 val = val & 0xffff;
4521 else if (len == 3)
4522 val = val & 0xffffff;
4523 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4524 val, (long)memaddr);
4525 }
4526
4527 /* Fill start and end extra bytes of buffer with existing memory data. */
4528
4529 errno = 0;
4530 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4531 about coercing an 8 byte integer to a 4 byte pointer. */
4532 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4533 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4534 (PTRACE_ARG4_TYPE) 0);
4535 if (errno)
4536 return errno;
4537
4538 if (count > 1)
4539 {
4540 errno = 0;
4541 buffer[count - 1]
4542 = ptrace (PTRACE_PEEKTEXT, pid,
4543 /* Coerce to a uintptr_t first to avoid potential gcc warning
4544 about coercing an 8 byte integer to a 4 byte pointer. */
4545 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4546 * sizeof (PTRACE_XFER_TYPE)),
4547 (PTRACE_ARG4_TYPE) 0);
4548 if (errno)
4549 return errno;
4550 }
4551
4552 /* Copy data to be written over corresponding part of buffer. */
4553
4554 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4555 myaddr, len);
4556
4557 /* Write the entire buffer. */
4558
4559 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4560 {
4561 errno = 0;
4562 ptrace (PTRACE_POKETEXT, pid,
4563 /* Coerce to a uintptr_t first to avoid potential gcc warning
4564 about coercing an 8 byte integer to a 4 byte pointer. */
4565 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4566 (PTRACE_ARG4_TYPE) buffer[i]);
4567 if (errno)
4568 return errno;
4569 }
4570
4571 return 0;
4572 }
4573
4574 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4575 static int linux_supports_tracefork_flag;
4576
4577 static void
4578 linux_enable_event_reporting (int pid)
4579 {
4580 if (!linux_supports_tracefork_flag)
4581 return;
4582
4583 ptrace (PTRACE_SETOPTIONS, pid, (PTRACE_ARG3_TYPE) 0,
4584 (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4585 }
4586
4587 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4588
4589 static int
4590 linux_tracefork_grandchild (void *arg)
4591 {
4592 _exit (0);
4593 }
4594
4595 #define STACK_SIZE 4096
4596
4597 static int
4598 linux_tracefork_child (void *arg)
4599 {
4600 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
4601 kill (getpid (), SIGSTOP);
4602
4603 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4604
4605 if (fork () == 0)
4606 linux_tracefork_grandchild (NULL);
4607
4608 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4609
4610 #ifdef __ia64__
4611 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4612 CLONE_VM | SIGCHLD, NULL);
4613 #else
4614 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4615 CLONE_VM | SIGCHLD, NULL);
4616 #endif
4617
4618 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4619
4620 _exit (0);
4621 }
4622
4623 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4624 sure that we can enable the option, and that it had the desired
4625 effect. */
4626
4627 static void
4628 linux_test_for_tracefork (void)
4629 {
4630 int child_pid, ret, status;
4631 long second_pid;
4632 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4633 char *stack = xmalloc (STACK_SIZE * 4);
4634 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4635
4636 linux_supports_tracefork_flag = 0;
4637
4638 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4639
4640 child_pid = fork ();
4641 if (child_pid == 0)
4642 linux_tracefork_child (NULL);
4643
4644 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4645
4646 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4647 #ifdef __ia64__
4648 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4649 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4650 #else /* !__ia64__ */
4651 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4652 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4653 #endif /* !__ia64__ */
4654
4655 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4656
4657 if (child_pid == -1)
4658 perror_with_name ("clone");
4659
4660 ret = my_waitpid (child_pid, &status, 0);
4661 if (ret == -1)
4662 perror_with_name ("waitpid");
4663 else if (ret != child_pid)
4664 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4665 if (! WIFSTOPPED (status))
4666 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4667
4668 ret = ptrace (PTRACE_SETOPTIONS, child_pid, (PTRACE_ARG3_TYPE) 0,
4669 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4670 if (ret != 0)
4671 {
4672 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4673 (PTRACE_ARG4_TYPE) 0);
4674 if (ret != 0)
4675 {
4676 warning ("linux_test_for_tracefork: failed to kill child");
4677 return;
4678 }
4679
4680 ret = my_waitpid (child_pid, &status, 0);
4681 if (ret != child_pid)
4682 warning ("linux_test_for_tracefork: failed to wait for killed child");
4683 else if (!WIFSIGNALED (status))
4684 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4685 "killed child", status);
4686
4687 return;
4688 }
4689
4690 ret = ptrace (PTRACE_CONT, child_pid, (PTRACE_ARG3_TYPE) 0,
4691 (PTRACE_ARG4_TYPE) 0);
4692 if (ret != 0)
4693 warning ("linux_test_for_tracefork: failed to resume child");
4694
4695 ret = my_waitpid (child_pid, &status, 0);
4696
4697 if (ret == child_pid && WIFSTOPPED (status)
4698 && status >> 16 == PTRACE_EVENT_FORK)
4699 {
4700 second_pid = 0;
4701 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, (PTRACE_ARG3_TYPE) 0,
4702 &second_pid);
4703 if (ret == 0 && second_pid != 0)
4704 {
4705 int second_status;
4706
4707 linux_supports_tracefork_flag = 1;
4708 my_waitpid (second_pid, &second_status, 0);
4709 ret = ptrace (PTRACE_KILL, second_pid, (PTRACE_ARG3_TYPE) 0,
4710 (PTRACE_ARG4_TYPE) 0);
4711 if (ret != 0)
4712 warning ("linux_test_for_tracefork: failed to kill second child");
4713 my_waitpid (second_pid, &status, 0);
4714 }
4715 }
4716 else
4717 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4718 "(%d, status 0x%x)", ret, status);
4719
4720 do
4721 {
4722 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4723 (PTRACE_ARG4_TYPE) 0);
4724 if (ret != 0)
4725 warning ("linux_test_for_tracefork: failed to kill child");
4726 my_waitpid (child_pid, &status, 0);
4727 }
4728 while (WIFSTOPPED (status));
4729
4730 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4731 free (stack);
4732 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4733 }
4734
4735
4736 static void
4737 linux_look_up_symbols (void)
4738 {
4739 #ifdef USE_THREAD_DB
4740 struct process_info *proc = current_process ();
4741
4742 if (proc->private->thread_db != NULL)
4743 return;
4744
4745 /* If the kernel supports tracing forks then it also supports tracing
4746 clones, and then we don't need to use the magic thread event breakpoint
4747 to learn about threads. */
4748 thread_db_init (!linux_supports_tracefork_flag);
4749 #endif
4750 }
4751
4752 static void
4753 linux_request_interrupt (void)
4754 {
4755 extern unsigned long signal_pid;
4756
4757 if (!ptid_equal (cont_thread, null_ptid)
4758 && !ptid_equal (cont_thread, minus_one_ptid))
4759 {
4760 struct lwp_info *lwp;
4761 int lwpid;
4762
4763 lwp = get_thread_lwp (current_inferior);
4764 lwpid = lwpid_of (lwp);
4765 kill_lwp (lwpid, SIGINT);
4766 }
4767 else
4768 kill_lwp (signal_pid, SIGINT);
4769 }
4770
4771 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4772 to debugger memory starting at MYADDR. */
4773
4774 static int
4775 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4776 {
4777 char filename[PATH_MAX];
4778 int fd, n;
4779 int pid = lwpid_of (get_thread_lwp (current_inferior));
4780
4781 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4782
4783 fd = open (filename, O_RDONLY);
4784 if (fd < 0)
4785 return -1;
4786
4787 if (offset != (CORE_ADDR) 0
4788 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4789 n = -1;
4790 else
4791 n = read (fd, myaddr, len);
4792
4793 close (fd);
4794
4795 return n;
4796 }
4797
4798 /* These breakpoint and watchpoint related wrapper functions simply
4799 pass on the function call if the target has registered a
4800 corresponding function. */
4801
4802 static int
4803 linux_insert_point (char type, CORE_ADDR addr, int len)
4804 {
4805 if (the_low_target.insert_point != NULL)
4806 return the_low_target.insert_point (type, addr, len);
4807 else
4808 /* Unsupported (see target.h). */
4809 return 1;
4810 }
4811
4812 static int
4813 linux_remove_point (char type, CORE_ADDR addr, int len)
4814 {
4815 if (the_low_target.remove_point != NULL)
4816 return the_low_target.remove_point (type, addr, len);
4817 else
4818 /* Unsupported (see target.h). */
4819 return 1;
4820 }
4821
4822 static int
4823 linux_stopped_by_watchpoint (void)
4824 {
4825 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4826
4827 return lwp->stopped_by_watchpoint;
4828 }
4829
4830 static CORE_ADDR
4831 linux_stopped_data_address (void)
4832 {
4833 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4834
4835 return lwp->stopped_data_address;
4836 }
4837
4838 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4839 #if ! (defined(PT_TEXT_ADDR) \
4840 || defined(PT_DATA_ADDR) \
4841 || defined(PT_TEXT_END_ADDR))
4842 #if defined(__mcoldfire__)
4843 /* These should really be defined in the kernel's ptrace.h header. */
4844 #define PT_TEXT_ADDR 49*4
4845 #define PT_DATA_ADDR 50*4
4846 #define PT_TEXT_END_ADDR 51*4
4847 #elif defined(BFIN)
4848 #define PT_TEXT_ADDR 220
4849 #define PT_TEXT_END_ADDR 224
4850 #define PT_DATA_ADDR 228
4851 #elif defined(__TMS320C6X__)
4852 #define PT_TEXT_ADDR (0x10000*4)
4853 #define PT_DATA_ADDR (0x10004*4)
4854 #define PT_TEXT_END_ADDR (0x10008*4)
4855 #endif
4856 #endif
4857
4858 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4859 to tell gdb about. */
4860
4861 static int
4862 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4863 {
4864 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4865 unsigned long text, text_end, data;
4866 int pid = lwpid_of (get_thread_lwp (current_inferior));
4867
4868 errno = 0;
4869
4870 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_ADDR,
4871 (PTRACE_ARG4_TYPE) 0);
4872 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_END_ADDR,
4873 (PTRACE_ARG4_TYPE) 0);
4874 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_DATA_ADDR,
4875 (PTRACE_ARG4_TYPE) 0);
4876
4877 if (errno == 0)
4878 {
4879 /* Both text and data offsets produced at compile-time (and so
4880 used by gdb) are relative to the beginning of the program,
4881 with the data segment immediately following the text segment.
4882 However, the actual runtime layout in memory may put the data
4883 somewhere else, so when we send gdb a data base-address, we
4884 use the real data base address and subtract the compile-time
4885 data base-address from it (which is just the length of the
4886 text segment). BSS immediately follows data in both
4887 cases. */
4888 *text_p = text;
4889 *data_p = data - (text_end - text);
4890
4891 return 1;
4892 }
4893 #endif
4894 return 0;
4895 }
4896 #endif
4897
4898 static int
4899 linux_qxfer_osdata (const char *annex,
4900 unsigned char *readbuf, unsigned const char *writebuf,
4901 CORE_ADDR offset, int len)
4902 {
4903 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4904 }
4905
4906 /* Convert a native/host siginfo object, into/from the siginfo in the
4907 layout of the inferiors' architecture. */
4908
4909 static void
4910 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4911 {
4912 int done = 0;
4913
4914 if (the_low_target.siginfo_fixup != NULL)
4915 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4916
4917 /* If there was no callback, or the callback didn't do anything,
4918 then just do a straight memcpy. */
4919 if (!done)
4920 {
4921 if (direction == 1)
4922 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4923 else
4924 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4925 }
4926 }
4927
4928 static int
4929 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4930 unsigned const char *writebuf, CORE_ADDR offset, int len)
4931 {
4932 int pid;
4933 siginfo_t siginfo;
4934 char inf_siginfo[sizeof (siginfo_t)];
4935
4936 if (current_inferior == NULL)
4937 return -1;
4938
4939 pid = lwpid_of (get_thread_lwp (current_inferior));
4940
4941 if (debug_threads)
4942 fprintf (stderr, "%s siginfo for lwp %d.\n",
4943 readbuf != NULL ? "Reading" : "Writing",
4944 pid);
4945
4946 if (offset >= sizeof (siginfo))
4947 return -1;
4948
4949 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4950 return -1;
4951
4952 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4953 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4954 inferior with a 64-bit GDBSERVER should look the same as debugging it
4955 with a 32-bit GDBSERVER, we need to convert it. */
4956 siginfo_fixup (&siginfo, inf_siginfo, 0);
4957
4958 if (offset + len > sizeof (siginfo))
4959 len = sizeof (siginfo) - offset;
4960
4961 if (readbuf != NULL)
4962 memcpy (readbuf, inf_siginfo + offset, len);
4963 else
4964 {
4965 memcpy (inf_siginfo + offset, writebuf, len);
4966
4967 /* Convert back to ptrace layout before flushing it out. */
4968 siginfo_fixup (&siginfo, inf_siginfo, 1);
4969
4970 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
4971 return -1;
4972 }
4973
4974 return len;
4975 }
4976
4977 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4978 so we notice when children change state; as the handler for the
4979 sigsuspend in my_waitpid. */
4980
4981 static void
4982 sigchld_handler (int signo)
4983 {
4984 int old_errno = errno;
4985
4986 if (debug_threads)
4987 {
4988 do
4989 {
4990 /* fprintf is not async-signal-safe, so call write
4991 directly. */
4992 if (write (2, "sigchld_handler\n",
4993 sizeof ("sigchld_handler\n") - 1) < 0)
4994 break; /* just ignore */
4995 } while (0);
4996 }
4997
4998 if (target_is_async_p ())
4999 async_file_mark (); /* trigger a linux_wait */
5000
5001 errno = old_errno;
5002 }
5003
5004 static int
5005 linux_supports_non_stop (void)
5006 {
5007 return 1;
5008 }
5009
5010 static int
5011 linux_async (int enable)
5012 {
5013 int previous = (linux_event_pipe[0] != -1);
5014
5015 if (debug_threads)
5016 fprintf (stderr, "linux_async (%d), previous=%d\n",
5017 enable, previous);
5018
5019 if (previous != enable)
5020 {
5021 sigset_t mask;
5022 sigemptyset (&mask);
5023 sigaddset (&mask, SIGCHLD);
5024
5025 sigprocmask (SIG_BLOCK, &mask, NULL);
5026
5027 if (enable)
5028 {
5029 if (pipe (linux_event_pipe) == -1)
5030 fatal ("creating event pipe failed.");
5031
5032 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5033 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5034
5035 /* Register the event loop handler. */
5036 add_file_handler (linux_event_pipe[0],
5037 handle_target_event, NULL);
5038
5039 /* Always trigger a linux_wait. */
5040 async_file_mark ();
5041 }
5042 else
5043 {
5044 delete_file_handler (linux_event_pipe[0]);
5045
5046 close (linux_event_pipe[0]);
5047 close (linux_event_pipe[1]);
5048 linux_event_pipe[0] = -1;
5049 linux_event_pipe[1] = -1;
5050 }
5051
5052 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5053 }
5054
5055 return previous;
5056 }
5057
5058 static int
5059 linux_start_non_stop (int nonstop)
5060 {
5061 /* Register or unregister from event-loop accordingly. */
5062 linux_async (nonstop);
5063 return 0;
5064 }
5065
5066 static int
5067 linux_supports_multi_process (void)
5068 {
5069 return 1;
5070 }
5071
5072 static int
5073 linux_supports_disable_randomization (void)
5074 {
5075 #ifdef HAVE_PERSONALITY
5076 return 1;
5077 #else
5078 return 0;
5079 #endif
5080 }
5081
5082 static int
5083 linux_supports_agent (void)
5084 {
5085 return 1;
5086 }
5087
5088 /* Enumerate spufs IDs for process PID. */
5089 static int
5090 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5091 {
5092 int pos = 0;
5093 int written = 0;
5094 char path[128];
5095 DIR *dir;
5096 struct dirent *entry;
5097
5098 sprintf (path, "/proc/%ld/fd", pid);
5099 dir = opendir (path);
5100 if (!dir)
5101 return -1;
5102
5103 rewinddir (dir);
5104 while ((entry = readdir (dir)) != NULL)
5105 {
5106 struct stat st;
5107 struct statfs stfs;
5108 int fd;
5109
5110 fd = atoi (entry->d_name);
5111 if (!fd)
5112 continue;
5113
5114 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5115 if (stat (path, &st) != 0)
5116 continue;
5117 if (!S_ISDIR (st.st_mode))
5118 continue;
5119
5120 if (statfs (path, &stfs) != 0)
5121 continue;
5122 if (stfs.f_type != SPUFS_MAGIC)
5123 continue;
5124
5125 if (pos >= offset && pos + 4 <= offset + len)
5126 {
5127 *(unsigned int *)(buf + pos - offset) = fd;
5128 written += 4;
5129 }
5130 pos += 4;
5131 }
5132
5133 closedir (dir);
5134 return written;
5135 }
5136
5137 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5138 object type, using the /proc file system. */
5139 static int
5140 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5141 unsigned const char *writebuf,
5142 CORE_ADDR offset, int len)
5143 {
5144 long pid = lwpid_of (get_thread_lwp (current_inferior));
5145 char buf[128];
5146 int fd = 0;
5147 int ret = 0;
5148
5149 if (!writebuf && !readbuf)
5150 return -1;
5151
5152 if (!*annex)
5153 {
5154 if (!readbuf)
5155 return -1;
5156 else
5157 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5158 }
5159
5160 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5161 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5162 if (fd <= 0)
5163 return -1;
5164
5165 if (offset != 0
5166 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5167 {
5168 close (fd);
5169 return 0;
5170 }
5171
5172 if (writebuf)
5173 ret = write (fd, writebuf, (size_t) len);
5174 else
5175 ret = read (fd, readbuf, (size_t) len);
5176
5177 close (fd);
5178 return ret;
5179 }
5180
5181 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5182 struct target_loadseg
5183 {
5184 /* Core address to which the segment is mapped. */
5185 Elf32_Addr addr;
5186 /* VMA recorded in the program header. */
5187 Elf32_Addr p_vaddr;
5188 /* Size of this segment in memory. */
5189 Elf32_Word p_memsz;
5190 };
5191
5192 # if defined PT_GETDSBT
5193 struct target_loadmap
5194 {
5195 /* Protocol version number, must be zero. */
5196 Elf32_Word version;
5197 /* Pointer to the DSBT table, its size, and the DSBT index. */
5198 unsigned *dsbt_table;
5199 unsigned dsbt_size, dsbt_index;
5200 /* Number of segments in this map. */
5201 Elf32_Word nsegs;
5202 /* The actual memory map. */
5203 struct target_loadseg segs[/*nsegs*/];
5204 };
5205 # define LINUX_LOADMAP PT_GETDSBT
5206 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5207 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5208 # else
5209 struct target_loadmap
5210 {
5211 /* Protocol version number, must be zero. */
5212 Elf32_Half version;
5213 /* Number of segments in this map. */
5214 Elf32_Half nsegs;
5215 /* The actual memory map. */
5216 struct target_loadseg segs[/*nsegs*/];
5217 };
5218 # define LINUX_LOADMAP PTRACE_GETFDPIC
5219 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5220 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5221 # endif
5222
5223 static int
5224 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5225 unsigned char *myaddr, unsigned int len)
5226 {
5227 int pid = lwpid_of (get_thread_lwp (current_inferior));
5228 int addr = -1;
5229 struct target_loadmap *data = NULL;
5230 unsigned int actual_length, copy_length;
5231
5232 if (strcmp (annex, "exec") == 0)
5233 addr = (int) LINUX_LOADMAP_EXEC;
5234 else if (strcmp (annex, "interp") == 0)
5235 addr = (int) LINUX_LOADMAP_INTERP;
5236 else
5237 return -1;
5238
5239 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5240 return -1;
5241
5242 if (data == NULL)
5243 return -1;
5244
5245 actual_length = sizeof (struct target_loadmap)
5246 + sizeof (struct target_loadseg) * data->nsegs;
5247
5248 if (offset < 0 || offset > actual_length)
5249 return -1;
5250
5251 copy_length = actual_length - offset < len ? actual_length - offset : len;
5252 memcpy (myaddr, (char *) data + offset, copy_length);
5253 return copy_length;
5254 }
5255 #else
5256 # define linux_read_loadmap NULL
5257 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5258
5259 static void
5260 linux_process_qsupported (const char *query)
5261 {
5262 if (the_low_target.process_qsupported != NULL)
5263 the_low_target.process_qsupported (query);
5264 }
5265
5266 static int
5267 linux_supports_tracepoints (void)
5268 {
5269 if (*the_low_target.supports_tracepoints == NULL)
5270 return 0;
5271
5272 return (*the_low_target.supports_tracepoints) ();
5273 }
5274
5275 static CORE_ADDR
5276 linux_read_pc (struct regcache *regcache)
5277 {
5278 if (the_low_target.get_pc == NULL)
5279 return 0;
5280
5281 return (*the_low_target.get_pc) (regcache);
5282 }
5283
5284 static void
5285 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5286 {
5287 gdb_assert (the_low_target.set_pc != NULL);
5288
5289 (*the_low_target.set_pc) (regcache, pc);
5290 }
5291
5292 static int
5293 linux_thread_stopped (struct thread_info *thread)
5294 {
5295 return get_thread_lwp (thread)->stopped;
5296 }
5297
5298 /* This exposes stop-all-threads functionality to other modules. */
5299
5300 static void
5301 linux_pause_all (int freeze)
5302 {
5303 stop_all_lwps (freeze, NULL);
5304 }
5305
5306 /* This exposes unstop-all-threads functionality to other gdbserver
5307 modules. */
5308
5309 static void
5310 linux_unpause_all (int unfreeze)
5311 {
5312 unstop_all_lwps (unfreeze, NULL);
5313 }
5314
5315 static int
5316 linux_prepare_to_access_memory (void)
5317 {
5318 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5319 running LWP. */
5320 if (non_stop)
5321 linux_pause_all (1);
5322 return 0;
5323 }
5324
5325 static void
5326 linux_done_accessing_memory (void)
5327 {
5328 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5329 running LWP. */
5330 if (non_stop)
5331 linux_unpause_all (1);
5332 }
5333
5334 static int
5335 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5336 CORE_ADDR collector,
5337 CORE_ADDR lockaddr,
5338 ULONGEST orig_size,
5339 CORE_ADDR *jump_entry,
5340 CORE_ADDR *trampoline,
5341 ULONGEST *trampoline_size,
5342 unsigned char *jjump_pad_insn,
5343 ULONGEST *jjump_pad_insn_size,
5344 CORE_ADDR *adjusted_insn_addr,
5345 CORE_ADDR *adjusted_insn_addr_end,
5346 char *err)
5347 {
5348 return (*the_low_target.install_fast_tracepoint_jump_pad)
5349 (tpoint, tpaddr, collector, lockaddr, orig_size,
5350 jump_entry, trampoline, trampoline_size,
5351 jjump_pad_insn, jjump_pad_insn_size,
5352 adjusted_insn_addr, adjusted_insn_addr_end,
5353 err);
5354 }
5355
5356 static struct emit_ops *
5357 linux_emit_ops (void)
5358 {
5359 if (the_low_target.emit_ops != NULL)
5360 return (*the_low_target.emit_ops) ();
5361 else
5362 return NULL;
5363 }
5364
5365 static int
5366 linux_get_min_fast_tracepoint_insn_len (void)
5367 {
5368 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5369 }
5370
5371 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5372
5373 static int
5374 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5375 CORE_ADDR *phdr_memaddr, int *num_phdr)
5376 {
5377 char filename[PATH_MAX];
5378 int fd;
5379 const int auxv_size = is_elf64
5380 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5381 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5382
5383 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5384
5385 fd = open (filename, O_RDONLY);
5386 if (fd < 0)
5387 return 1;
5388
5389 *phdr_memaddr = 0;
5390 *num_phdr = 0;
5391 while (read (fd, buf, auxv_size) == auxv_size
5392 && (*phdr_memaddr == 0 || *num_phdr == 0))
5393 {
5394 if (is_elf64)
5395 {
5396 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5397
5398 switch (aux->a_type)
5399 {
5400 case AT_PHDR:
5401 *phdr_memaddr = aux->a_un.a_val;
5402 break;
5403 case AT_PHNUM:
5404 *num_phdr = aux->a_un.a_val;
5405 break;
5406 }
5407 }
5408 else
5409 {
5410 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5411
5412 switch (aux->a_type)
5413 {
5414 case AT_PHDR:
5415 *phdr_memaddr = aux->a_un.a_val;
5416 break;
5417 case AT_PHNUM:
5418 *num_phdr = aux->a_un.a_val;
5419 break;
5420 }
5421 }
5422 }
5423
5424 close (fd);
5425
5426 if (*phdr_memaddr == 0 || *num_phdr == 0)
5427 {
5428 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5429 "phdr_memaddr = %ld, phdr_num = %d",
5430 (long) *phdr_memaddr, *num_phdr);
5431 return 2;
5432 }
5433
5434 return 0;
5435 }
5436
5437 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5438
5439 static CORE_ADDR
5440 get_dynamic (const int pid, const int is_elf64)
5441 {
5442 CORE_ADDR phdr_memaddr, relocation;
5443 int num_phdr, i;
5444 unsigned char *phdr_buf;
5445 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5446
5447 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5448 return 0;
5449
5450 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5451 phdr_buf = alloca (num_phdr * phdr_size);
5452
5453 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5454 return 0;
5455
5456 /* Compute relocation: it is expected to be 0 for "regular" executables,
5457 non-zero for PIE ones. */
5458 relocation = -1;
5459 for (i = 0; relocation == -1 && i < num_phdr; i++)
5460 if (is_elf64)
5461 {
5462 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5463
5464 if (p->p_type == PT_PHDR)
5465 relocation = phdr_memaddr - p->p_vaddr;
5466 }
5467 else
5468 {
5469 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5470
5471 if (p->p_type == PT_PHDR)
5472 relocation = phdr_memaddr - p->p_vaddr;
5473 }
5474
5475 if (relocation == -1)
5476 {
5477 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5478 any real world executables, including PIE executables, have always
5479 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5480 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5481 or present DT_DEBUG anyway (fpc binaries are statically linked).
5482
5483 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5484
5485 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5486
5487 return 0;
5488 }
5489
5490 for (i = 0; i < num_phdr; i++)
5491 {
5492 if (is_elf64)
5493 {
5494 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5495
5496 if (p->p_type == PT_DYNAMIC)
5497 return p->p_vaddr + relocation;
5498 }
5499 else
5500 {
5501 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5502
5503 if (p->p_type == PT_DYNAMIC)
5504 return p->p_vaddr + relocation;
5505 }
5506 }
5507
5508 return 0;
5509 }
5510
5511 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5512 can be 0 if the inferior does not yet have the library list initialized.
5513 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5514 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5515
5516 static CORE_ADDR
5517 get_r_debug (const int pid, const int is_elf64)
5518 {
5519 CORE_ADDR dynamic_memaddr;
5520 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5521 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5522 CORE_ADDR map = -1;
5523
5524 dynamic_memaddr = get_dynamic (pid, is_elf64);
5525 if (dynamic_memaddr == 0)
5526 return map;
5527
5528 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5529 {
5530 if (is_elf64)
5531 {
5532 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5533 #ifdef DT_MIPS_RLD_MAP
5534 union
5535 {
5536 Elf64_Xword map;
5537 unsigned char buf[sizeof (Elf64_Xword)];
5538 }
5539 rld_map;
5540
5541 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5542 {
5543 if (linux_read_memory (dyn->d_un.d_val,
5544 rld_map.buf, sizeof (rld_map.buf)) == 0)
5545 return rld_map.map;
5546 else
5547 break;
5548 }
5549 #endif /* DT_MIPS_RLD_MAP */
5550
5551 if (dyn->d_tag == DT_DEBUG && map == -1)
5552 map = dyn->d_un.d_val;
5553
5554 if (dyn->d_tag == DT_NULL)
5555 break;
5556 }
5557 else
5558 {
5559 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5560 #ifdef DT_MIPS_RLD_MAP
5561 union
5562 {
5563 Elf32_Word map;
5564 unsigned char buf[sizeof (Elf32_Word)];
5565 }
5566 rld_map;
5567
5568 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5569 {
5570 if (linux_read_memory (dyn->d_un.d_val,
5571 rld_map.buf, sizeof (rld_map.buf)) == 0)
5572 return rld_map.map;
5573 else
5574 break;
5575 }
5576 #endif /* DT_MIPS_RLD_MAP */
5577
5578 if (dyn->d_tag == DT_DEBUG && map == -1)
5579 map = dyn->d_un.d_val;
5580
5581 if (dyn->d_tag == DT_NULL)
5582 break;
5583 }
5584
5585 dynamic_memaddr += dyn_size;
5586 }
5587
5588 return map;
5589 }
5590
5591 /* Read one pointer from MEMADDR in the inferior. */
5592
5593 static int
5594 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5595 {
5596 int ret;
5597
5598 /* Go through a union so this works on either big or little endian
5599 hosts, when the inferior's pointer size is smaller than the size
5600 of CORE_ADDR. It is assumed the inferior's endianness is the
5601 same of the superior's. */
5602 union
5603 {
5604 CORE_ADDR core_addr;
5605 unsigned int ui;
5606 unsigned char uc;
5607 } addr;
5608
5609 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5610 if (ret == 0)
5611 {
5612 if (ptr_size == sizeof (CORE_ADDR))
5613 *ptr = addr.core_addr;
5614 else if (ptr_size == sizeof (unsigned int))
5615 *ptr = addr.ui;
5616 else
5617 gdb_assert_not_reached ("unhandled pointer size");
5618 }
5619 return ret;
5620 }
5621
5622 struct link_map_offsets
5623 {
5624 /* Offset and size of r_debug.r_version. */
5625 int r_version_offset;
5626
5627 /* Offset and size of r_debug.r_map. */
5628 int r_map_offset;
5629
5630 /* Offset to l_addr field in struct link_map. */
5631 int l_addr_offset;
5632
5633 /* Offset to l_name field in struct link_map. */
5634 int l_name_offset;
5635
5636 /* Offset to l_ld field in struct link_map. */
5637 int l_ld_offset;
5638
5639 /* Offset to l_next field in struct link_map. */
5640 int l_next_offset;
5641
5642 /* Offset to l_prev field in struct link_map. */
5643 int l_prev_offset;
5644 };
5645
5646 /* Construct qXfer:libraries-svr4:read reply. */
5647
5648 static int
5649 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5650 unsigned const char *writebuf,
5651 CORE_ADDR offset, int len)
5652 {
5653 char *document;
5654 unsigned document_len;
5655 struct process_info_private *const priv = current_process ()->private;
5656 char filename[PATH_MAX];
5657 int pid, is_elf64;
5658
5659 static const struct link_map_offsets lmo_32bit_offsets =
5660 {
5661 0, /* r_version offset. */
5662 4, /* r_debug.r_map offset. */
5663 0, /* l_addr offset in link_map. */
5664 4, /* l_name offset in link_map. */
5665 8, /* l_ld offset in link_map. */
5666 12, /* l_next offset in link_map. */
5667 16 /* l_prev offset in link_map. */
5668 };
5669
5670 static const struct link_map_offsets lmo_64bit_offsets =
5671 {
5672 0, /* r_version offset. */
5673 8, /* r_debug.r_map offset. */
5674 0, /* l_addr offset in link_map. */
5675 8, /* l_name offset in link_map. */
5676 16, /* l_ld offset in link_map. */
5677 24, /* l_next offset in link_map. */
5678 32 /* l_prev offset in link_map. */
5679 };
5680 const struct link_map_offsets *lmo;
5681 unsigned int machine;
5682
5683 if (writebuf != NULL)
5684 return -2;
5685 if (readbuf == NULL)
5686 return -1;
5687
5688 pid = lwpid_of (get_thread_lwp (current_inferior));
5689 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5690 is_elf64 = elf_64_file_p (filename, &machine);
5691 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5692
5693 if (priv->r_debug == 0)
5694 priv->r_debug = get_r_debug (pid, is_elf64);
5695
5696 /* We failed to find DT_DEBUG. Such situation will not change for this
5697 inferior - do not retry it. Report it to GDB as E01, see for the reasons
5698 at the GDB solib-svr4.c side. */
5699 if (priv->r_debug == (CORE_ADDR) -1)
5700 return -1;
5701
5702 if (priv->r_debug == 0)
5703 {
5704 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5705 }
5706 else
5707 {
5708 int allocated = 1024;
5709 char *p;
5710 const int ptr_size = is_elf64 ? 8 : 4;
5711 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5712 int r_version, header_done = 0;
5713
5714 document = xmalloc (allocated);
5715 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5716 p = document + strlen (document);
5717
5718 r_version = 0;
5719 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5720 (unsigned char *) &r_version,
5721 sizeof (r_version)) != 0
5722 || r_version != 1)
5723 {
5724 warning ("unexpected r_debug version %d", r_version);
5725 goto done;
5726 }
5727
5728 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5729 &lm_addr, ptr_size) != 0)
5730 {
5731 warning ("unable to read r_map from 0x%lx",
5732 (long) priv->r_debug + lmo->r_map_offset);
5733 goto done;
5734 }
5735
5736 lm_prev = 0;
5737 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5738 &l_name, ptr_size) == 0
5739 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5740 &l_addr, ptr_size) == 0
5741 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5742 &l_ld, ptr_size) == 0
5743 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5744 &l_prev, ptr_size) == 0
5745 && read_one_ptr (lm_addr + lmo->l_next_offset,
5746 &l_next, ptr_size) == 0)
5747 {
5748 unsigned char libname[PATH_MAX];
5749
5750 if (lm_prev != l_prev)
5751 {
5752 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5753 (long) lm_prev, (long) l_prev);
5754 break;
5755 }
5756
5757 /* Not checking for error because reading may stop before
5758 we've got PATH_MAX worth of characters. */
5759 libname[0] = '\0';
5760 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5761 libname[sizeof (libname) - 1] = '\0';
5762 if (libname[0] != '\0')
5763 {
5764 /* 6x the size for xml_escape_text below. */
5765 size_t len = 6 * strlen ((char *) libname);
5766 char *name;
5767
5768 if (!header_done)
5769 {
5770 /* Terminate `<library-list-svr4'. */
5771 *p++ = '>';
5772 header_done = 1;
5773 }
5774
5775 while (allocated < p - document + len + 200)
5776 {
5777 /* Expand to guarantee sufficient storage. */
5778 uintptr_t document_len = p - document;
5779
5780 document = xrealloc (document, 2 * allocated);
5781 allocated *= 2;
5782 p = document + document_len;
5783 }
5784
5785 name = xml_escape_text ((char *) libname);
5786 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5787 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5788 name, (unsigned long) lm_addr,
5789 (unsigned long) l_addr, (unsigned long) l_ld);
5790 free (name);
5791 }
5792 else if (lm_prev == 0)
5793 {
5794 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5795 p = p + strlen (p);
5796 }
5797
5798 if (l_next == 0)
5799 break;
5800
5801 lm_prev = lm_addr;
5802 lm_addr = l_next;
5803 }
5804 done:
5805 if (!header_done)
5806 {
5807 /* Empty list; terminate `<library-list-svr4'. */
5808 strcpy (p, "/>");
5809 }
5810 else
5811 strcpy (p, "</library-list-svr4>");
5812 }
5813
5814 document_len = strlen (document);
5815 if (offset < document_len)
5816 document_len -= offset;
5817 else
5818 document_len = 0;
5819 if (len > document_len)
5820 len = document_len;
5821
5822 memcpy (readbuf, document + offset, len);
5823 xfree (document);
5824
5825 return len;
5826 }
5827
5828 #ifdef HAVE_LINUX_BTRACE
5829
5830 /* Enable branch tracing. */
5831
5832 static struct btrace_target_info *
5833 linux_low_enable_btrace (ptid_t ptid)
5834 {
5835 struct btrace_target_info *tinfo;
5836
5837 tinfo = linux_enable_btrace (ptid);
5838 if (tinfo != NULL)
5839 tinfo->ptr_bits = register_size (0) * 8;
5840
5841 return tinfo;
5842 }
5843
5844 /* Read branch trace data as btrace xml document. */
5845
5846 static void
5847 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5848 int type)
5849 {
5850 VEC (btrace_block_s) *btrace;
5851 struct btrace_block *block;
5852 int i;
5853
5854 btrace = linux_read_btrace (tinfo, type);
5855
5856 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5857 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5858
5859 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5860 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5861 paddress (block->begin), paddress (block->end));
5862
5863 buffer_grow_str (buffer, "</btrace>\n");
5864
5865 VEC_free (btrace_block_s, btrace);
5866 }
5867 #endif /* HAVE_LINUX_BTRACE */
5868
5869 static struct target_ops linux_target_ops = {
5870 linux_create_inferior,
5871 linux_attach,
5872 linux_kill,
5873 linux_detach,
5874 linux_mourn,
5875 linux_join,
5876 linux_thread_alive,
5877 linux_resume,
5878 linux_wait,
5879 linux_fetch_registers,
5880 linux_store_registers,
5881 linux_prepare_to_access_memory,
5882 linux_done_accessing_memory,
5883 linux_read_memory,
5884 linux_write_memory,
5885 linux_look_up_symbols,
5886 linux_request_interrupt,
5887 linux_read_auxv,
5888 linux_insert_point,
5889 linux_remove_point,
5890 linux_stopped_by_watchpoint,
5891 linux_stopped_data_address,
5892 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5893 linux_read_offsets,
5894 #else
5895 NULL,
5896 #endif
5897 #ifdef USE_THREAD_DB
5898 thread_db_get_tls_address,
5899 #else
5900 NULL,
5901 #endif
5902 linux_qxfer_spu,
5903 hostio_last_error_from_errno,
5904 linux_qxfer_osdata,
5905 linux_xfer_siginfo,
5906 linux_supports_non_stop,
5907 linux_async,
5908 linux_start_non_stop,
5909 linux_supports_multi_process,
5910 #ifdef USE_THREAD_DB
5911 thread_db_handle_monitor_command,
5912 #else
5913 NULL,
5914 #endif
5915 linux_common_core_of_thread,
5916 linux_read_loadmap,
5917 linux_process_qsupported,
5918 linux_supports_tracepoints,
5919 linux_read_pc,
5920 linux_write_pc,
5921 linux_thread_stopped,
5922 NULL,
5923 linux_pause_all,
5924 linux_unpause_all,
5925 linux_cancel_breakpoints,
5926 linux_stabilize_threads,
5927 linux_install_fast_tracepoint_jump_pad,
5928 linux_emit_ops,
5929 linux_supports_disable_randomization,
5930 linux_get_min_fast_tracepoint_insn_len,
5931 linux_qxfer_libraries_svr4,
5932 linux_supports_agent,
5933 #ifdef HAVE_LINUX_BTRACE
5934 linux_supports_btrace,
5935 linux_low_enable_btrace,
5936 linux_disable_btrace,
5937 linux_low_read_btrace,
5938 #else
5939 NULL,
5940 NULL,
5941 NULL,
5942 NULL,
5943 #endif
5944 };
5945
5946 static void
5947 linux_init_signals ()
5948 {
5949 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5950 to find what the cancel signal actually is. */
5951 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5952 signal (__SIGRTMIN+1, SIG_IGN);
5953 #endif
5954 }
5955
5956 void
5957 initialize_low (void)
5958 {
5959 struct sigaction sigchld_action;
5960 memset (&sigchld_action, 0, sizeof (sigchld_action));
5961 set_target_ops (&linux_target_ops);
5962 set_breakpoint_data (the_low_target.breakpoint,
5963 the_low_target.breakpoint_len);
5964 linux_init_signals ();
5965 linux_test_for_tracefork ();
5966 linux_ptrace_init_warnings ();
5967 #ifdef HAVE_LINUX_REGSETS
5968 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5969 ;
5970 disabled_regsets = xmalloc (num_regsets);
5971 #endif
5972
5973 sigchld_action.sa_handler = sigchld_handler;
5974 sigemptyset (&sigchld_action.sa_mask);
5975 sigchld_action.sa_flags = SA_RESTART;
5976 sigaction (SIGCHLD, &sigchld_action, NULL);
5977 }
This page took 0.160928 seconds and 4 git commands to generate.