Normalize on PATH_MAX instead of MAXPATHLEN throughout.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "gdb_wait.h"
25 #include <stdio.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include "gdb_stat.h"
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #ifndef ELFMAG0
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50 #include <elf.h>
51 #endif
52
53 #ifndef SPUFS_MAGIC
54 #define SPUFS_MAGIC 0x23c9b64e
55 #endif
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif
63
64 #ifndef O_LARGEFILE
65 #define O_LARGEFILE 0
66 #endif
67
68 #ifndef W_STOPCODE
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70 #endif
71
72 /* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74 #ifndef __SIGRTMIN
75 #define __SIGRTMIN 32
76 #endif
77
78 #ifdef __UCLIBC__
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80 /* PTRACE_TEXT_ADDR and friends. */
81 #include <asm/ptrace.h>
82 #define HAS_NOMMU
83 #endif
84 #endif
85
86 /* Some targets did not define these ptrace constants from the start,
87 so gdbserver defines them locally here. In the future, these may
88 be removed after they are added to asm/ptrace.h. */
89 #if !(defined(PT_TEXT_ADDR) \
90 || defined(PT_DATA_ADDR) \
91 || defined(PT_TEXT_END_ADDR))
92 #if defined(__mcoldfire__)
93 /* These are still undefined in 3.10 kernels. */
94 #define PT_TEXT_ADDR 49*4
95 #define PT_DATA_ADDR 50*4
96 #define PT_TEXT_END_ADDR 51*4
97 /* BFIN already defines these since at least 2.6.32 kernels. */
98 #elif defined(BFIN)
99 #define PT_TEXT_ADDR 220
100 #define PT_TEXT_END_ADDR 224
101 #define PT_DATA_ADDR 228
102 /* These are still undefined in 3.10 kernels. */
103 #elif defined(__TMS320C6X__)
104 #define PT_TEXT_ADDR (0x10000*4)
105 #define PT_DATA_ADDR (0x10004*4)
106 #define PT_TEXT_END_ADDR (0x10008*4)
107 #endif
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "linux-btrace.h"
112 #endif
113
114 #ifndef HAVE_ELF32_AUXV_T
115 /* Copied from glibc's elf.h. */
116 typedef struct
117 {
118 uint32_t a_type; /* Entry type */
119 union
120 {
121 uint32_t a_val; /* Integer value */
122 /* We use to have pointer elements added here. We cannot do that,
123 though, since it does not work when using 32-bit definitions
124 on 64-bit platforms and vice versa. */
125 } a_un;
126 } Elf32_auxv_t;
127 #endif
128
129 #ifndef HAVE_ELF64_AUXV_T
130 /* Copied from glibc's elf.h. */
131 typedef struct
132 {
133 uint64_t a_type; /* Entry type */
134 union
135 {
136 uint64_t a_val; /* Integer value */
137 /* We use to have pointer elements added here. We cannot do that,
138 though, since it does not work when using 32-bit definitions
139 on 64-bit platforms and vice versa. */
140 } a_un;
141 } Elf64_auxv_t;
142 #endif
143
144 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
145 representation of the thread ID.
146
147 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
148 the same as the LWP ID.
149
150 ``all_processes'' is keyed by the "overall process ID", which
151 GNU/Linux calls tgid, "thread group ID". */
152
153 struct inferior_list all_lwps;
154
155 /* A list of all unknown processes which receive stop signals. Some
156 other process will presumably claim each of these as forked
157 children momentarily. */
158
159 struct simple_pid_list
160 {
161 /* The process ID. */
162 int pid;
163
164 /* The status as reported by waitpid. */
165 int status;
166
167 /* Next in chain. */
168 struct simple_pid_list *next;
169 };
170 struct simple_pid_list *stopped_pids;
171
172 /* Trivial list manipulation functions to keep track of a list of new
173 stopped processes. */
174
175 static void
176 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
177 {
178 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
179
180 new_pid->pid = pid;
181 new_pid->status = status;
182 new_pid->next = *listp;
183 *listp = new_pid;
184 }
185
186 static int
187 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
188 {
189 struct simple_pid_list **p;
190
191 for (p = listp; *p != NULL; p = &(*p)->next)
192 if ((*p)->pid == pid)
193 {
194 struct simple_pid_list *next = (*p)->next;
195
196 *statusp = (*p)->status;
197 xfree (*p);
198 *p = next;
199 return 1;
200 }
201 return 0;
202 }
203
204 enum stopping_threads_kind
205 {
206 /* Not stopping threads presently. */
207 NOT_STOPPING_THREADS,
208
209 /* Stopping threads. */
210 STOPPING_THREADS,
211
212 /* Stopping and suspending threads. */
213 STOPPING_AND_SUSPENDING_THREADS
214 };
215
216 /* This is set while stop_all_lwps is in effect. */
217 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
218
219 /* FIXME make into a target method? */
220 int using_threads = 1;
221
222 /* True if we're presently stabilizing threads (moving them out of
223 jump pads). */
224 static int stabilizing_threads;
225
226 static void linux_resume_one_lwp (struct lwp_info *lwp,
227 int step, int signal, siginfo_t *info);
228 static void linux_resume (struct thread_resume *resume_info, size_t n);
229 static void stop_all_lwps (int suspend, struct lwp_info *except);
230 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
231 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
232 static void *add_lwp (ptid_t ptid);
233 static int linux_stopped_by_watchpoint (void);
234 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
235 static void proceed_all_lwps (void);
236 static int finish_step_over (struct lwp_info *lwp);
237 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
238 static int kill_lwp (unsigned long lwpid, int signo);
239 static void linux_enable_event_reporting (int pid);
240
241 /* True if the low target can hardware single-step. Such targets
242 don't need a BREAKPOINT_REINSERT_ADDR callback. */
243
244 static int
245 can_hardware_single_step (void)
246 {
247 return (the_low_target.breakpoint_reinsert_addr == NULL);
248 }
249
250 /* True if the low target supports memory breakpoints. If so, we'll
251 have a GET_PC implementation. */
252
253 static int
254 supports_breakpoints (void)
255 {
256 return (the_low_target.get_pc != NULL);
257 }
258
259 /* Returns true if this target can support fast tracepoints. This
260 does not mean that the in-process agent has been loaded in the
261 inferior. */
262
263 static int
264 supports_fast_tracepoints (void)
265 {
266 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
267 }
268
269 /* True if LWP is stopped in its stepping range. */
270
271 static int
272 lwp_in_step_range (struct lwp_info *lwp)
273 {
274 CORE_ADDR pc = lwp->stop_pc;
275
276 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
277 }
278
279 struct pending_signals
280 {
281 int signal;
282 siginfo_t info;
283 struct pending_signals *prev;
284 };
285
286 /* The read/write ends of the pipe registered as waitable file in the
287 event loop. */
288 static int linux_event_pipe[2] = { -1, -1 };
289
290 /* True if we're currently in async mode. */
291 #define target_is_async_p() (linux_event_pipe[0] != -1)
292
293 static void send_sigstop (struct lwp_info *lwp);
294 static void wait_for_sigstop (struct inferior_list_entry *entry);
295
296 /* Return non-zero if HEADER is a 64-bit ELF file. */
297
298 static int
299 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
300 {
301 if (header->e_ident[EI_MAG0] == ELFMAG0
302 && header->e_ident[EI_MAG1] == ELFMAG1
303 && header->e_ident[EI_MAG2] == ELFMAG2
304 && header->e_ident[EI_MAG3] == ELFMAG3)
305 {
306 *machine = header->e_machine;
307 return header->e_ident[EI_CLASS] == ELFCLASS64;
308
309 }
310 *machine = EM_NONE;
311 return -1;
312 }
313
314 /* Return non-zero if FILE is a 64-bit ELF file,
315 zero if the file is not a 64-bit ELF file,
316 and -1 if the file is not accessible or doesn't exist. */
317
318 static int
319 elf_64_file_p (const char *file, unsigned int *machine)
320 {
321 Elf64_Ehdr header;
322 int fd;
323
324 fd = open (file, O_RDONLY);
325 if (fd < 0)
326 return -1;
327
328 if (read (fd, &header, sizeof (header)) != sizeof (header))
329 {
330 close (fd);
331 return 0;
332 }
333 close (fd);
334
335 return elf_64_header_p (&header, machine);
336 }
337
338 /* Accepts an integer PID; Returns true if the executable PID is
339 running is a 64-bit ELF file.. */
340
341 int
342 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
343 {
344 char file[PATH_MAX];
345
346 sprintf (file, "/proc/%d/exe", pid);
347 return elf_64_file_p (file, machine);
348 }
349
350 static void
351 delete_lwp (struct lwp_info *lwp)
352 {
353 remove_thread (get_lwp_thread (lwp));
354 remove_inferior (&all_lwps, &lwp->head);
355 free (lwp->arch_private);
356 free (lwp);
357 }
358
359 /* Add a process to the common process list, and set its private
360 data. */
361
362 static struct process_info *
363 linux_add_process (int pid, int attached)
364 {
365 struct process_info *proc;
366
367 proc = add_process (pid, attached);
368 proc->private = xcalloc (1, sizeof (*proc->private));
369
370 /* Set the arch when the first LWP stops. */
371 proc->private->new_inferior = 1;
372
373 if (the_low_target.new_process != NULL)
374 proc->private->arch_private = the_low_target.new_process ();
375
376 return proc;
377 }
378
379 /* Wrapper function for waitpid which handles EINTR, and emulates
380 __WALL for systems where that is not available. */
381
382 static int
383 my_waitpid (int pid, int *status, int flags)
384 {
385 int ret, out_errno;
386
387 if (debug_threads)
388 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
389
390 if (flags & __WALL)
391 {
392 sigset_t block_mask, org_mask, wake_mask;
393 int wnohang;
394
395 wnohang = (flags & WNOHANG) != 0;
396 flags &= ~(__WALL | __WCLONE);
397 flags |= WNOHANG;
398
399 /* Block all signals while here. This avoids knowing about
400 LinuxThread's signals. */
401 sigfillset (&block_mask);
402 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
403
404 /* ... except during the sigsuspend below. */
405 sigemptyset (&wake_mask);
406
407 while (1)
408 {
409 /* Since all signals are blocked, there's no need to check
410 for EINTR here. */
411 ret = waitpid (pid, status, flags);
412 out_errno = errno;
413
414 if (ret == -1 && out_errno != ECHILD)
415 break;
416 else if (ret > 0)
417 break;
418
419 if (flags & __WCLONE)
420 {
421 /* We've tried both flavors now. If WNOHANG is set,
422 there's nothing else to do, just bail out. */
423 if (wnohang)
424 break;
425
426 if (debug_threads)
427 fprintf (stderr, "blocking\n");
428
429 /* Block waiting for signals. */
430 sigsuspend (&wake_mask);
431 }
432
433 flags ^= __WCLONE;
434 }
435
436 sigprocmask (SIG_SETMASK, &org_mask, NULL);
437 }
438 else
439 {
440 do
441 ret = waitpid (pid, status, flags);
442 while (ret == -1 && errno == EINTR);
443 out_errno = errno;
444 }
445
446 if (debug_threads)
447 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
448 pid, flags, status ? *status : -1, ret);
449
450 errno = out_errno;
451 return ret;
452 }
453
454 /* Handle a GNU/Linux extended wait response. If we see a clone
455 event, we need to add the new LWP to our list (and not report the
456 trap to higher layers). */
457
458 static void
459 handle_extended_wait (struct lwp_info *event_child, int wstat)
460 {
461 int event = wstat >> 16;
462 struct lwp_info *new_lwp;
463
464 if (event == PTRACE_EVENT_CLONE)
465 {
466 ptid_t ptid;
467 unsigned long new_pid;
468 int ret, status;
469
470 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_ARG3_TYPE) 0,
471 &new_pid);
472
473 /* If we haven't already seen the new PID stop, wait for it now. */
474 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
475 {
476 /* The new child has a pending SIGSTOP. We can't affect it until it
477 hits the SIGSTOP, but we're already attached. */
478
479 ret = my_waitpid (new_pid, &status, __WALL);
480
481 if (ret == -1)
482 perror_with_name ("waiting for new child");
483 else if (ret != new_pid)
484 warning ("wait returned unexpected PID %d", ret);
485 else if (!WIFSTOPPED (status))
486 warning ("wait returned unexpected status 0x%x", status);
487 }
488
489 ptid = ptid_build (pid_of (event_child), new_pid, 0);
490 new_lwp = (struct lwp_info *) add_lwp (ptid);
491 add_thread (ptid, new_lwp);
492
493 /* Either we're going to immediately resume the new thread
494 or leave it stopped. linux_resume_one_lwp is a nop if it
495 thinks the thread is currently running, so set this first
496 before calling linux_resume_one_lwp. */
497 new_lwp->stopped = 1;
498
499 /* If we're suspending all threads, leave this one suspended
500 too. */
501 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
502 new_lwp->suspended = 1;
503
504 /* Normally we will get the pending SIGSTOP. But in some cases
505 we might get another signal delivered to the group first.
506 If we do get another signal, be sure not to lose it. */
507 if (WSTOPSIG (status) == SIGSTOP)
508 {
509 if (stopping_threads != NOT_STOPPING_THREADS)
510 new_lwp->stop_pc = get_stop_pc (new_lwp);
511 else
512 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
513 }
514 else
515 {
516 new_lwp->stop_expected = 1;
517
518 if (stopping_threads != NOT_STOPPING_THREADS)
519 {
520 new_lwp->stop_pc = get_stop_pc (new_lwp);
521 new_lwp->status_pending_p = 1;
522 new_lwp->status_pending = status;
523 }
524 else
525 /* Pass the signal on. This is what GDB does - except
526 shouldn't we really report it instead? */
527 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
528 }
529
530 /* Always resume the current thread. If we are stopping
531 threads, it will have a pending SIGSTOP; we may as well
532 collect it now. */
533 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
534 }
535 }
536
537 /* Return the PC as read from the regcache of LWP, without any
538 adjustment. */
539
540 static CORE_ADDR
541 get_pc (struct lwp_info *lwp)
542 {
543 struct thread_info *saved_inferior;
544 struct regcache *regcache;
545 CORE_ADDR pc;
546
547 if (the_low_target.get_pc == NULL)
548 return 0;
549
550 saved_inferior = current_inferior;
551 current_inferior = get_lwp_thread (lwp);
552
553 regcache = get_thread_regcache (current_inferior, 1);
554 pc = (*the_low_target.get_pc) (regcache);
555
556 if (debug_threads)
557 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
558
559 current_inferior = saved_inferior;
560 return pc;
561 }
562
563 /* This function should only be called if LWP got a SIGTRAP.
564 The SIGTRAP could mean several things.
565
566 On i386, where decr_pc_after_break is non-zero:
567 If we were single-stepping this process using PTRACE_SINGLESTEP,
568 we will get only the one SIGTRAP (even if the instruction we
569 stepped over was a breakpoint). The value of $eip will be the
570 next instruction.
571 If we continue the process using PTRACE_CONT, we will get a
572 SIGTRAP when we hit a breakpoint. The value of $eip will be
573 the instruction after the breakpoint (i.e. needs to be
574 decremented). If we report the SIGTRAP to GDB, we must also
575 report the undecremented PC. If we cancel the SIGTRAP, we
576 must resume at the decremented PC.
577
578 (Presumably, not yet tested) On a non-decr_pc_after_break machine
579 with hardware or kernel single-step:
580 If we single-step over a breakpoint instruction, our PC will
581 point at the following instruction. If we continue and hit a
582 breakpoint instruction, our PC will point at the breakpoint
583 instruction. */
584
585 static CORE_ADDR
586 get_stop_pc (struct lwp_info *lwp)
587 {
588 CORE_ADDR stop_pc;
589
590 if (the_low_target.get_pc == NULL)
591 return 0;
592
593 stop_pc = get_pc (lwp);
594
595 if (WSTOPSIG (lwp->last_status) == SIGTRAP
596 && !lwp->stepping
597 && !lwp->stopped_by_watchpoint
598 && lwp->last_status >> 16 == 0)
599 stop_pc -= the_low_target.decr_pc_after_break;
600
601 if (debug_threads)
602 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
603
604 return stop_pc;
605 }
606
607 static void *
608 add_lwp (ptid_t ptid)
609 {
610 struct lwp_info *lwp;
611
612 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
613 memset (lwp, 0, sizeof (*lwp));
614
615 lwp->head.id = ptid;
616
617 if (the_low_target.new_thread != NULL)
618 lwp->arch_private = the_low_target.new_thread ();
619
620 add_inferior_to_list (&all_lwps, &lwp->head);
621
622 return lwp;
623 }
624
625 /* Start an inferior process and returns its pid.
626 ALLARGS is a vector of program-name and args. */
627
628 static int
629 linux_create_inferior (char *program, char **allargs)
630 {
631 #ifdef HAVE_PERSONALITY
632 int personality_orig = 0, personality_set = 0;
633 #endif
634 struct lwp_info *new_lwp;
635 int pid;
636 ptid_t ptid;
637
638 #ifdef HAVE_PERSONALITY
639 if (disable_randomization)
640 {
641 errno = 0;
642 personality_orig = personality (0xffffffff);
643 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
644 {
645 personality_set = 1;
646 personality (personality_orig | ADDR_NO_RANDOMIZE);
647 }
648 if (errno != 0 || (personality_set
649 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
650 warning ("Error disabling address space randomization: %s",
651 strerror (errno));
652 }
653 #endif
654
655 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
656 pid = vfork ();
657 #else
658 pid = fork ();
659 #endif
660 if (pid < 0)
661 perror_with_name ("fork");
662
663 if (pid == 0)
664 {
665 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
666
667 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
668 signal (__SIGRTMIN + 1, SIG_DFL);
669 #endif
670
671 setpgid (0, 0);
672
673 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
674 stdout to stderr so that inferior i/o doesn't corrupt the connection.
675 Also, redirect stdin to /dev/null. */
676 if (remote_connection_is_stdio ())
677 {
678 close (0);
679 open ("/dev/null", O_RDONLY);
680 dup2 (2, 1);
681 if (write (2, "stdin/stdout redirected\n",
682 sizeof ("stdin/stdout redirected\n") - 1) < 0)
683 {
684 /* Errors ignored. */;
685 }
686 }
687
688 execv (program, allargs);
689 if (errno == ENOENT)
690 execvp (program, allargs);
691
692 fprintf (stderr, "Cannot exec %s: %s.\n", program,
693 strerror (errno));
694 fflush (stderr);
695 _exit (0177);
696 }
697
698 #ifdef HAVE_PERSONALITY
699 if (personality_set)
700 {
701 errno = 0;
702 personality (personality_orig);
703 if (errno != 0)
704 warning ("Error restoring address space randomization: %s",
705 strerror (errno));
706 }
707 #endif
708
709 linux_add_process (pid, 0);
710
711 ptid = ptid_build (pid, pid, 0);
712 new_lwp = add_lwp (ptid);
713 add_thread (ptid, new_lwp);
714 new_lwp->must_set_ptrace_flags = 1;
715
716 return pid;
717 }
718
719 /* Attach to an inferior process. */
720
721 static void
722 linux_attach_lwp_1 (unsigned long lwpid, int initial)
723 {
724 ptid_t ptid;
725 struct lwp_info *new_lwp;
726
727 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0)
728 != 0)
729 {
730 struct buffer buffer;
731
732 if (!initial)
733 {
734 /* If we fail to attach to an LWP, just warn. */
735 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
736 strerror (errno), errno);
737 fflush (stderr);
738 return;
739 }
740
741 /* If we fail to attach to a process, report an error. */
742 buffer_init (&buffer);
743 linux_ptrace_attach_warnings (lwpid, &buffer);
744 buffer_grow_str0 (&buffer, "");
745 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
746 lwpid, strerror (errno), errno);
747 }
748
749 if (initial)
750 /* If lwp is the tgid, we handle adding existing threads later.
751 Otherwise we just add lwp without bothering about any other
752 threads. */
753 ptid = ptid_build (lwpid, lwpid, 0);
754 else
755 {
756 /* Note that extracting the pid from the current inferior is
757 safe, since we're always called in the context of the same
758 process as this new thread. */
759 int pid = pid_of (get_thread_lwp (current_inferior));
760 ptid = ptid_build (pid, lwpid, 0);
761 }
762
763 new_lwp = (struct lwp_info *) add_lwp (ptid);
764 add_thread (ptid, new_lwp);
765
766 /* We need to wait for SIGSTOP before being able to make the next
767 ptrace call on this LWP. */
768 new_lwp->must_set_ptrace_flags = 1;
769
770 if (linux_proc_pid_is_stopped (lwpid))
771 {
772 if (debug_threads)
773 fprintf (stderr,
774 "Attached to a stopped process\n");
775
776 /* The process is definitely stopped. It is in a job control
777 stop, unless the kernel predates the TASK_STOPPED /
778 TASK_TRACED distinction, in which case it might be in a
779 ptrace stop. Make sure it is in a ptrace stop; from there we
780 can kill it, signal it, et cetera.
781
782 First make sure there is a pending SIGSTOP. Since we are
783 already attached, the process can not transition from stopped
784 to running without a PTRACE_CONT; so we know this signal will
785 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
786 probably already in the queue (unless this kernel is old
787 enough to use TASK_STOPPED for ptrace stops); but since
788 SIGSTOP is not an RT signal, it can only be queued once. */
789 kill_lwp (lwpid, SIGSTOP);
790
791 /* Finally, resume the stopped process. This will deliver the
792 SIGSTOP (or a higher priority signal, just like normal
793 PTRACE_ATTACH), which we'll catch later on. */
794 ptrace (PTRACE_CONT, lwpid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
795 }
796
797 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
798 brings it to a halt.
799
800 There are several cases to consider here:
801
802 1) gdbserver has already attached to the process and is being notified
803 of a new thread that is being created.
804 In this case we should ignore that SIGSTOP and resume the
805 process. This is handled below by setting stop_expected = 1,
806 and the fact that add_thread sets last_resume_kind ==
807 resume_continue.
808
809 2) This is the first thread (the process thread), and we're attaching
810 to it via attach_inferior.
811 In this case we want the process thread to stop.
812 This is handled by having linux_attach set last_resume_kind ==
813 resume_stop after we return.
814
815 If the pid we are attaching to is also the tgid, we attach to and
816 stop all the existing threads. Otherwise, we attach to pid and
817 ignore any other threads in the same group as this pid.
818
819 3) GDB is connecting to gdbserver and is requesting an enumeration of all
820 existing threads.
821 In this case we want the thread to stop.
822 FIXME: This case is currently not properly handled.
823 We should wait for the SIGSTOP but don't. Things work apparently
824 because enough time passes between when we ptrace (ATTACH) and when
825 gdb makes the next ptrace call on the thread.
826
827 On the other hand, if we are currently trying to stop all threads, we
828 should treat the new thread as if we had sent it a SIGSTOP. This works
829 because we are guaranteed that the add_lwp call above added us to the
830 end of the list, and so the new thread has not yet reached
831 wait_for_sigstop (but will). */
832 new_lwp->stop_expected = 1;
833 }
834
835 void
836 linux_attach_lwp (unsigned long lwpid)
837 {
838 linux_attach_lwp_1 (lwpid, 0);
839 }
840
841 /* Attach to PID. If PID is the tgid, attach to it and all
842 of its threads. */
843
844 static int
845 linux_attach (unsigned long pid)
846 {
847 /* Attach to PID. We will check for other threads
848 soon. */
849 linux_attach_lwp_1 (pid, 1);
850 linux_add_process (pid, 1);
851
852 if (!non_stop)
853 {
854 struct thread_info *thread;
855
856 /* Don't ignore the initial SIGSTOP if we just attached to this
857 process. It will be collected by wait shortly. */
858 thread = find_thread_ptid (ptid_build (pid, pid, 0));
859 thread->last_resume_kind = resume_stop;
860 }
861
862 if (linux_proc_get_tgid (pid) == pid)
863 {
864 DIR *dir;
865 char pathname[128];
866
867 sprintf (pathname, "/proc/%ld/task", pid);
868
869 dir = opendir (pathname);
870
871 if (!dir)
872 {
873 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
874 fflush (stderr);
875 }
876 else
877 {
878 /* At this point we attached to the tgid. Scan the task for
879 existing threads. */
880 unsigned long lwp;
881 int new_threads_found;
882 int iterations = 0;
883 struct dirent *dp;
884
885 while (iterations < 2)
886 {
887 new_threads_found = 0;
888 /* Add all the other threads. While we go through the
889 threads, new threads may be spawned. Cycle through
890 the list of threads until we have done two iterations without
891 finding new threads. */
892 while ((dp = readdir (dir)) != NULL)
893 {
894 /* Fetch one lwp. */
895 lwp = strtoul (dp->d_name, NULL, 10);
896
897 /* Is this a new thread? */
898 if (lwp
899 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
900 {
901 linux_attach_lwp_1 (lwp, 0);
902 new_threads_found++;
903
904 if (debug_threads)
905 fprintf (stderr, "\
906 Found and attached to new lwp %ld\n", lwp);
907 }
908 }
909
910 if (!new_threads_found)
911 iterations++;
912 else
913 iterations = 0;
914
915 rewinddir (dir);
916 }
917 closedir (dir);
918 }
919 }
920
921 return 0;
922 }
923
924 struct counter
925 {
926 int pid;
927 int count;
928 };
929
930 static int
931 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
932 {
933 struct counter *counter = args;
934
935 if (ptid_get_pid (entry->id) == counter->pid)
936 {
937 if (++counter->count > 1)
938 return 1;
939 }
940
941 return 0;
942 }
943
944 static int
945 last_thread_of_process_p (struct thread_info *thread)
946 {
947 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
948 int pid = ptid_get_pid (ptid);
949 struct counter counter = { pid , 0 };
950
951 return (find_inferior (&all_threads,
952 second_thread_of_pid_p, &counter) == NULL);
953 }
954
955 /* Kill LWP. */
956
957 static void
958 linux_kill_one_lwp (struct lwp_info *lwp)
959 {
960 int pid = lwpid_of (lwp);
961
962 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
963 there is no signal context, and ptrace(PTRACE_KILL) (or
964 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
965 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
966 alternative is to kill with SIGKILL. We only need one SIGKILL
967 per process, not one for each thread. But since we still support
968 linuxthreads, and we also support debugging programs using raw
969 clone without CLONE_THREAD, we send one for each thread. For
970 years, we used PTRACE_KILL only, so we're being a bit paranoid
971 about some old kernels where PTRACE_KILL might work better
972 (dubious if there are any such, but that's why it's paranoia), so
973 we try SIGKILL first, PTRACE_KILL second, and so we're fine
974 everywhere. */
975
976 errno = 0;
977 kill (pid, SIGKILL);
978 if (debug_threads)
979 fprintf (stderr,
980 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
981 target_pid_to_str (ptid_of (lwp)),
982 errno ? strerror (errno) : "OK");
983
984 errno = 0;
985 ptrace (PTRACE_KILL, pid, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
986 if (debug_threads)
987 fprintf (stderr,
988 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
989 target_pid_to_str (ptid_of (lwp)),
990 errno ? strerror (errno) : "OK");
991 }
992
993 /* Callback for `find_inferior'. Kills an lwp of a given process,
994 except the leader. */
995
996 static int
997 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
998 {
999 struct thread_info *thread = (struct thread_info *) entry;
1000 struct lwp_info *lwp = get_thread_lwp (thread);
1001 int wstat;
1002 int pid = * (int *) args;
1003
1004 if (ptid_get_pid (entry->id) != pid)
1005 return 0;
1006
1007 /* We avoid killing the first thread here, because of a Linux kernel (at
1008 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1009 the children get a chance to be reaped, it will remain a zombie
1010 forever. */
1011
1012 if (lwpid_of (lwp) == pid)
1013 {
1014 if (debug_threads)
1015 fprintf (stderr, "lkop: is last of process %s\n",
1016 target_pid_to_str (entry->id));
1017 return 0;
1018 }
1019
1020 do
1021 {
1022 linux_kill_one_lwp (lwp);
1023
1024 /* Make sure it died. The loop is most likely unnecessary. */
1025 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1026 } while (pid > 0 && WIFSTOPPED (wstat));
1027
1028 return 0;
1029 }
1030
1031 static int
1032 linux_kill (int pid)
1033 {
1034 struct process_info *process;
1035 struct lwp_info *lwp;
1036 int wstat;
1037 int lwpid;
1038
1039 process = find_process_pid (pid);
1040 if (process == NULL)
1041 return -1;
1042
1043 /* If we're killing a running inferior, make sure it is stopped
1044 first, as PTRACE_KILL will not work otherwise. */
1045 stop_all_lwps (0, NULL);
1046
1047 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1048
1049 /* See the comment in linux_kill_one_lwp. We did not kill the first
1050 thread in the list, so do so now. */
1051 lwp = find_lwp_pid (pid_to_ptid (pid));
1052
1053 if (lwp == NULL)
1054 {
1055 if (debug_threads)
1056 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1057 lwpid_of (lwp), pid);
1058 }
1059 else
1060 {
1061 if (debug_threads)
1062 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1063 lwpid_of (lwp), pid);
1064
1065 do
1066 {
1067 linux_kill_one_lwp (lwp);
1068
1069 /* Make sure it died. The loop is most likely unnecessary. */
1070 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1071 } while (lwpid > 0 && WIFSTOPPED (wstat));
1072 }
1073
1074 the_target->mourn (process);
1075
1076 /* Since we presently can only stop all lwps of all processes, we
1077 need to unstop lwps of other processes. */
1078 unstop_all_lwps (0, NULL);
1079 return 0;
1080 }
1081
1082 /* Get pending signal of THREAD, for detaching purposes. This is the
1083 signal the thread last stopped for, which we need to deliver to the
1084 thread when detaching, otherwise, it'd be suppressed/lost. */
1085
1086 static int
1087 get_detach_signal (struct thread_info *thread)
1088 {
1089 enum gdb_signal signo = GDB_SIGNAL_0;
1090 int status;
1091 struct lwp_info *lp = get_thread_lwp (thread);
1092
1093 if (lp->status_pending_p)
1094 status = lp->status_pending;
1095 else
1096 {
1097 /* If the thread had been suspended by gdbserver, and it stopped
1098 cleanly, then it'll have stopped with SIGSTOP. But we don't
1099 want to deliver that SIGSTOP. */
1100 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1101 || thread->last_status.value.sig == GDB_SIGNAL_0)
1102 return 0;
1103
1104 /* Otherwise, we may need to deliver the signal we
1105 intercepted. */
1106 status = lp->last_status;
1107 }
1108
1109 if (!WIFSTOPPED (status))
1110 {
1111 if (debug_threads)
1112 fprintf (stderr,
1113 "GPS: lwp %s hasn't stopped: no pending signal\n",
1114 target_pid_to_str (ptid_of (lp)));
1115 return 0;
1116 }
1117
1118 /* Extended wait statuses aren't real SIGTRAPs. */
1119 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1120 {
1121 if (debug_threads)
1122 fprintf (stderr,
1123 "GPS: lwp %s had stopped with extended "
1124 "status: no pending signal\n",
1125 target_pid_to_str (ptid_of (lp)));
1126 return 0;
1127 }
1128
1129 signo = gdb_signal_from_host (WSTOPSIG (status));
1130
1131 if (program_signals_p && !program_signals[signo])
1132 {
1133 if (debug_threads)
1134 fprintf (stderr,
1135 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1136 target_pid_to_str (ptid_of (lp)),
1137 gdb_signal_to_string (signo));
1138 return 0;
1139 }
1140 else if (!program_signals_p
1141 /* If we have no way to know which signals GDB does not
1142 want to have passed to the program, assume
1143 SIGTRAP/SIGINT, which is GDB's default. */
1144 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1145 {
1146 if (debug_threads)
1147 fprintf (stderr,
1148 "GPS: lwp %s had signal %s, "
1149 "but we don't know if we should pass it. Default to not.\n",
1150 target_pid_to_str (ptid_of (lp)),
1151 gdb_signal_to_string (signo));
1152 return 0;
1153 }
1154 else
1155 {
1156 if (debug_threads)
1157 fprintf (stderr,
1158 "GPS: lwp %s has pending signal %s: delivering it.\n",
1159 target_pid_to_str (ptid_of (lp)),
1160 gdb_signal_to_string (signo));
1161
1162 return WSTOPSIG (status);
1163 }
1164 }
1165
1166 static int
1167 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1168 {
1169 struct thread_info *thread = (struct thread_info *) entry;
1170 struct lwp_info *lwp = get_thread_lwp (thread);
1171 int pid = * (int *) args;
1172 int sig;
1173
1174 if (ptid_get_pid (entry->id) != pid)
1175 return 0;
1176
1177 /* If there is a pending SIGSTOP, get rid of it. */
1178 if (lwp->stop_expected)
1179 {
1180 if (debug_threads)
1181 fprintf (stderr,
1182 "Sending SIGCONT to %s\n",
1183 target_pid_to_str (ptid_of (lwp)));
1184
1185 kill_lwp (lwpid_of (lwp), SIGCONT);
1186 lwp->stop_expected = 0;
1187 }
1188
1189 /* Flush any pending changes to the process's registers. */
1190 regcache_invalidate_thread (get_lwp_thread (lwp));
1191
1192 /* Pass on any pending signal for this thread. */
1193 sig = get_detach_signal (thread);
1194
1195 /* Finally, let it resume. */
1196 if (the_low_target.prepare_to_resume != NULL)
1197 the_low_target.prepare_to_resume (lwp);
1198 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1199 (PTRACE_ARG4_TYPE) (long) sig) < 0)
1200 error (_("Can't detach %s: %s"),
1201 target_pid_to_str (ptid_of (lwp)),
1202 strerror (errno));
1203
1204 delete_lwp (lwp);
1205 return 0;
1206 }
1207
1208 static int
1209 linux_detach (int pid)
1210 {
1211 struct process_info *process;
1212
1213 process = find_process_pid (pid);
1214 if (process == NULL)
1215 return -1;
1216
1217 /* Stop all threads before detaching. First, ptrace requires that
1218 the thread is stopped to sucessfully detach. Second, thread_db
1219 may need to uninstall thread event breakpoints from memory, which
1220 only works with a stopped process anyway. */
1221 stop_all_lwps (0, NULL);
1222
1223 #ifdef USE_THREAD_DB
1224 thread_db_detach (process);
1225 #endif
1226
1227 /* Stabilize threads (move out of jump pads). */
1228 stabilize_threads ();
1229
1230 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1231
1232 the_target->mourn (process);
1233
1234 /* Since we presently can only stop all lwps of all processes, we
1235 need to unstop lwps of other processes. */
1236 unstop_all_lwps (0, NULL);
1237 return 0;
1238 }
1239
1240 /* Remove all LWPs that belong to process PROC from the lwp list. */
1241
1242 static int
1243 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1244 {
1245 struct lwp_info *lwp = (struct lwp_info *) entry;
1246 struct process_info *process = proc;
1247
1248 if (pid_of (lwp) == pid_of (process))
1249 delete_lwp (lwp);
1250
1251 return 0;
1252 }
1253
1254 static void
1255 linux_mourn (struct process_info *process)
1256 {
1257 struct process_info_private *priv;
1258
1259 #ifdef USE_THREAD_DB
1260 thread_db_mourn (process);
1261 #endif
1262
1263 find_inferior (&all_lwps, delete_lwp_callback, process);
1264
1265 /* Freeing all private data. */
1266 priv = process->private;
1267 free (priv->arch_private);
1268 free (priv);
1269 process->private = NULL;
1270
1271 remove_process (process);
1272 }
1273
1274 static void
1275 linux_join (int pid)
1276 {
1277 int status, ret;
1278
1279 do {
1280 ret = my_waitpid (pid, &status, 0);
1281 if (WIFEXITED (status) || WIFSIGNALED (status))
1282 break;
1283 } while (ret != -1 || errno != ECHILD);
1284 }
1285
1286 /* Return nonzero if the given thread is still alive. */
1287 static int
1288 linux_thread_alive (ptid_t ptid)
1289 {
1290 struct lwp_info *lwp = find_lwp_pid (ptid);
1291
1292 /* We assume we always know if a thread exits. If a whole process
1293 exited but we still haven't been able to report it to GDB, we'll
1294 hold on to the last lwp of the dead process. */
1295 if (lwp != NULL)
1296 return !lwp->dead;
1297 else
1298 return 0;
1299 }
1300
1301 /* Return 1 if this lwp has an interesting status pending. */
1302 static int
1303 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1304 {
1305 struct lwp_info *lwp = (struct lwp_info *) entry;
1306 ptid_t ptid = * (ptid_t *) arg;
1307 struct thread_info *thread;
1308
1309 /* Check if we're only interested in events from a specific process
1310 or its lwps. */
1311 if (!ptid_equal (minus_one_ptid, ptid)
1312 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1313 return 0;
1314
1315 thread = get_lwp_thread (lwp);
1316
1317 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1318 report any status pending the LWP may have. */
1319 if (thread->last_resume_kind == resume_stop
1320 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1321 return 0;
1322
1323 return lwp->status_pending_p;
1324 }
1325
1326 static int
1327 same_lwp (struct inferior_list_entry *entry, void *data)
1328 {
1329 ptid_t ptid = *(ptid_t *) data;
1330 int lwp;
1331
1332 if (ptid_get_lwp (ptid) != 0)
1333 lwp = ptid_get_lwp (ptid);
1334 else
1335 lwp = ptid_get_pid (ptid);
1336
1337 if (ptid_get_lwp (entry->id) == lwp)
1338 return 1;
1339
1340 return 0;
1341 }
1342
1343 struct lwp_info *
1344 find_lwp_pid (ptid_t ptid)
1345 {
1346 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1347 }
1348
1349 static struct lwp_info *
1350 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1351 {
1352 int ret;
1353 int to_wait_for = -1;
1354 struct lwp_info *child = NULL;
1355
1356 if (debug_threads)
1357 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1358
1359 if (ptid_equal (ptid, minus_one_ptid))
1360 to_wait_for = -1; /* any child */
1361 else
1362 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1363
1364 options |= __WALL;
1365
1366 retry:
1367
1368 ret = my_waitpid (to_wait_for, wstatp, options);
1369 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1370 return NULL;
1371 else if (ret == -1)
1372 perror_with_name ("waitpid");
1373
1374 if (debug_threads
1375 && (!WIFSTOPPED (*wstatp)
1376 || (WSTOPSIG (*wstatp) != 32
1377 && WSTOPSIG (*wstatp) != 33)))
1378 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1379
1380 child = find_lwp_pid (pid_to_ptid (ret));
1381
1382 /* If we didn't find a process, one of two things presumably happened:
1383 - A process we started and then detached from has exited. Ignore it.
1384 - A process we are controlling has forked and the new child's stop
1385 was reported to us by the kernel. Save its PID. */
1386 if (child == NULL && WIFSTOPPED (*wstatp))
1387 {
1388 add_to_pid_list (&stopped_pids, ret, *wstatp);
1389 goto retry;
1390 }
1391 else if (child == NULL)
1392 goto retry;
1393
1394 child->stopped = 1;
1395
1396 child->last_status = *wstatp;
1397
1398 if (WIFSTOPPED (*wstatp))
1399 {
1400 struct process_info *proc;
1401
1402 /* Architecture-specific setup after inferior is running. This
1403 needs to happen after we have attached to the inferior and it
1404 is stopped for the first time, but before we access any
1405 inferior registers. */
1406 proc = find_process_pid (pid_of (child));
1407 if (proc->private->new_inferior)
1408 {
1409 struct thread_info *saved_inferior;
1410
1411 saved_inferior = current_inferior;
1412 current_inferior = get_lwp_thread (child);
1413
1414 the_low_target.arch_setup ();
1415
1416 current_inferior = saved_inferior;
1417
1418 proc->private->new_inferior = 0;
1419 }
1420 }
1421
1422 /* Fetch the possibly triggered data watchpoint info and store it in
1423 CHILD.
1424
1425 On some archs, like x86, that use debug registers to set
1426 watchpoints, it's possible that the way to know which watched
1427 address trapped, is to check the register that is used to select
1428 which address to watch. Problem is, between setting the
1429 watchpoint and reading back which data address trapped, the user
1430 may change the set of watchpoints, and, as a consequence, GDB
1431 changes the debug registers in the inferior. To avoid reading
1432 back a stale stopped-data-address when that happens, we cache in
1433 LP the fact that a watchpoint trapped, and the corresponding data
1434 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1435 changes the debug registers meanwhile, we have the cached data we
1436 can rely on. */
1437
1438 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1439 {
1440 if (the_low_target.stopped_by_watchpoint == NULL)
1441 {
1442 child->stopped_by_watchpoint = 0;
1443 }
1444 else
1445 {
1446 struct thread_info *saved_inferior;
1447
1448 saved_inferior = current_inferior;
1449 current_inferior = get_lwp_thread (child);
1450
1451 child->stopped_by_watchpoint
1452 = the_low_target.stopped_by_watchpoint ();
1453
1454 if (child->stopped_by_watchpoint)
1455 {
1456 if (the_low_target.stopped_data_address != NULL)
1457 child->stopped_data_address
1458 = the_low_target.stopped_data_address ();
1459 else
1460 child->stopped_data_address = 0;
1461 }
1462
1463 current_inferior = saved_inferior;
1464 }
1465 }
1466
1467 /* Store the STOP_PC, with adjustment applied. This depends on the
1468 architecture being defined already (so that CHILD has a valid
1469 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1470 not). */
1471 if (WIFSTOPPED (*wstatp))
1472 child->stop_pc = get_stop_pc (child);
1473
1474 if (debug_threads
1475 && WIFSTOPPED (*wstatp)
1476 && the_low_target.get_pc != NULL)
1477 {
1478 struct thread_info *saved_inferior = current_inferior;
1479 struct regcache *regcache;
1480 CORE_ADDR pc;
1481
1482 current_inferior = get_lwp_thread (child);
1483 regcache = get_thread_regcache (current_inferior, 1);
1484 pc = (*the_low_target.get_pc) (regcache);
1485 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1486 current_inferior = saved_inferior;
1487 }
1488
1489 return child;
1490 }
1491
1492 /* This function should only be called if the LWP got a SIGTRAP.
1493
1494 Handle any tracepoint steps or hits. Return true if a tracepoint
1495 event was handled, 0 otherwise. */
1496
1497 static int
1498 handle_tracepoints (struct lwp_info *lwp)
1499 {
1500 struct thread_info *tinfo = get_lwp_thread (lwp);
1501 int tpoint_related_event = 0;
1502
1503 /* If this tracepoint hit causes a tracing stop, we'll immediately
1504 uninsert tracepoints. To do this, we temporarily pause all
1505 threads, unpatch away, and then unpause threads. We need to make
1506 sure the unpausing doesn't resume LWP too. */
1507 lwp->suspended++;
1508
1509 /* And we need to be sure that any all-threads-stopping doesn't try
1510 to move threads out of the jump pads, as it could deadlock the
1511 inferior (LWP could be in the jump pad, maybe even holding the
1512 lock.) */
1513
1514 /* Do any necessary step collect actions. */
1515 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1516
1517 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1518
1519 /* See if we just hit a tracepoint and do its main collect
1520 actions. */
1521 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1522
1523 lwp->suspended--;
1524
1525 gdb_assert (lwp->suspended == 0);
1526 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1527
1528 if (tpoint_related_event)
1529 {
1530 if (debug_threads)
1531 fprintf (stderr, "got a tracepoint event\n");
1532 return 1;
1533 }
1534
1535 return 0;
1536 }
1537
1538 /* Convenience wrapper. Returns true if LWP is presently collecting a
1539 fast tracepoint. */
1540
1541 static int
1542 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1543 struct fast_tpoint_collect_status *status)
1544 {
1545 CORE_ADDR thread_area;
1546
1547 if (the_low_target.get_thread_area == NULL)
1548 return 0;
1549
1550 /* Get the thread area address. This is used to recognize which
1551 thread is which when tracing with the in-process agent library.
1552 We don't read anything from the address, and treat it as opaque;
1553 it's the address itself that we assume is unique per-thread. */
1554 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1555 return 0;
1556
1557 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1558 }
1559
1560 /* The reason we resume in the caller, is because we want to be able
1561 to pass lwp->status_pending as WSTAT, and we need to clear
1562 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1563 refuses to resume. */
1564
1565 static int
1566 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1567 {
1568 struct thread_info *saved_inferior;
1569
1570 saved_inferior = current_inferior;
1571 current_inferior = get_lwp_thread (lwp);
1572
1573 if ((wstat == NULL
1574 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1575 && supports_fast_tracepoints ()
1576 && agent_loaded_p ())
1577 {
1578 struct fast_tpoint_collect_status status;
1579 int r;
1580
1581 if (debug_threads)
1582 fprintf (stderr, "\
1583 Checking whether LWP %ld needs to move out of the jump pad.\n",
1584 lwpid_of (lwp));
1585
1586 r = linux_fast_tracepoint_collecting (lwp, &status);
1587
1588 if (wstat == NULL
1589 || (WSTOPSIG (*wstat) != SIGILL
1590 && WSTOPSIG (*wstat) != SIGFPE
1591 && WSTOPSIG (*wstat) != SIGSEGV
1592 && WSTOPSIG (*wstat) != SIGBUS))
1593 {
1594 lwp->collecting_fast_tracepoint = r;
1595
1596 if (r != 0)
1597 {
1598 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1599 {
1600 /* Haven't executed the original instruction yet.
1601 Set breakpoint there, and wait till it's hit,
1602 then single-step until exiting the jump pad. */
1603 lwp->exit_jump_pad_bkpt
1604 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1605 }
1606
1607 if (debug_threads)
1608 fprintf (stderr, "\
1609 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1610 lwpid_of (lwp));
1611 current_inferior = saved_inferior;
1612
1613 return 1;
1614 }
1615 }
1616 else
1617 {
1618 /* If we get a synchronous signal while collecting, *and*
1619 while executing the (relocated) original instruction,
1620 reset the PC to point at the tpoint address, before
1621 reporting to GDB. Otherwise, it's an IPA lib bug: just
1622 report the signal to GDB, and pray for the best. */
1623
1624 lwp->collecting_fast_tracepoint = 0;
1625
1626 if (r != 0
1627 && (status.adjusted_insn_addr <= lwp->stop_pc
1628 && lwp->stop_pc < status.adjusted_insn_addr_end))
1629 {
1630 siginfo_t info;
1631 struct regcache *regcache;
1632
1633 /* The si_addr on a few signals references the address
1634 of the faulting instruction. Adjust that as
1635 well. */
1636 if ((WSTOPSIG (*wstat) == SIGILL
1637 || WSTOPSIG (*wstat) == SIGFPE
1638 || WSTOPSIG (*wstat) == SIGBUS
1639 || WSTOPSIG (*wstat) == SIGSEGV)
1640 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1641 (PTRACE_ARG3_TYPE) 0, &info) == 0
1642 /* Final check just to make sure we don't clobber
1643 the siginfo of non-kernel-sent signals. */
1644 && (uintptr_t) info.si_addr == lwp->stop_pc)
1645 {
1646 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1647 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1648 (PTRACE_ARG3_TYPE) 0, &info);
1649 }
1650
1651 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1652 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1653 lwp->stop_pc = status.tpoint_addr;
1654
1655 /* Cancel any fast tracepoint lock this thread was
1656 holding. */
1657 force_unlock_trace_buffer ();
1658 }
1659
1660 if (lwp->exit_jump_pad_bkpt != NULL)
1661 {
1662 if (debug_threads)
1663 fprintf (stderr,
1664 "Cancelling fast exit-jump-pad: removing bkpt. "
1665 "stopping all threads momentarily.\n");
1666
1667 stop_all_lwps (1, lwp);
1668 cancel_breakpoints ();
1669
1670 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1671 lwp->exit_jump_pad_bkpt = NULL;
1672
1673 unstop_all_lwps (1, lwp);
1674
1675 gdb_assert (lwp->suspended >= 0);
1676 }
1677 }
1678 }
1679
1680 if (debug_threads)
1681 fprintf (stderr, "\
1682 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1683 lwpid_of (lwp));
1684
1685 current_inferior = saved_inferior;
1686 return 0;
1687 }
1688
1689 /* Enqueue one signal in the "signals to report later when out of the
1690 jump pad" list. */
1691
1692 static void
1693 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1694 {
1695 struct pending_signals *p_sig;
1696
1697 if (debug_threads)
1698 fprintf (stderr, "\
1699 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1700
1701 if (debug_threads)
1702 {
1703 struct pending_signals *sig;
1704
1705 for (sig = lwp->pending_signals_to_report;
1706 sig != NULL;
1707 sig = sig->prev)
1708 fprintf (stderr,
1709 " Already queued %d\n",
1710 sig->signal);
1711
1712 fprintf (stderr, " (no more currently queued signals)\n");
1713 }
1714
1715 /* Don't enqueue non-RT signals if they are already in the deferred
1716 queue. (SIGSTOP being the easiest signal to see ending up here
1717 twice) */
1718 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1719 {
1720 struct pending_signals *sig;
1721
1722 for (sig = lwp->pending_signals_to_report;
1723 sig != NULL;
1724 sig = sig->prev)
1725 {
1726 if (sig->signal == WSTOPSIG (*wstat))
1727 {
1728 if (debug_threads)
1729 fprintf (stderr,
1730 "Not requeuing already queued non-RT signal %d"
1731 " for LWP %ld\n",
1732 sig->signal,
1733 lwpid_of (lwp));
1734 return;
1735 }
1736 }
1737 }
1738
1739 p_sig = xmalloc (sizeof (*p_sig));
1740 p_sig->prev = lwp->pending_signals_to_report;
1741 p_sig->signal = WSTOPSIG (*wstat);
1742 memset (&p_sig->info, 0, sizeof (siginfo_t));
1743 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1744 &p_sig->info);
1745
1746 lwp->pending_signals_to_report = p_sig;
1747 }
1748
1749 /* Dequeue one signal from the "signals to report later when out of
1750 the jump pad" list. */
1751
1752 static int
1753 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1754 {
1755 if (lwp->pending_signals_to_report != NULL)
1756 {
1757 struct pending_signals **p_sig;
1758
1759 p_sig = &lwp->pending_signals_to_report;
1760 while ((*p_sig)->prev != NULL)
1761 p_sig = &(*p_sig)->prev;
1762
1763 *wstat = W_STOPCODE ((*p_sig)->signal);
1764 if ((*p_sig)->info.si_signo != 0)
1765 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
1766 &(*p_sig)->info);
1767 free (*p_sig);
1768 *p_sig = NULL;
1769
1770 if (debug_threads)
1771 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1772 WSTOPSIG (*wstat), lwpid_of (lwp));
1773
1774 if (debug_threads)
1775 {
1776 struct pending_signals *sig;
1777
1778 for (sig = lwp->pending_signals_to_report;
1779 sig != NULL;
1780 sig = sig->prev)
1781 fprintf (stderr,
1782 " Still queued %d\n",
1783 sig->signal);
1784
1785 fprintf (stderr, " (no more queued signals)\n");
1786 }
1787
1788 return 1;
1789 }
1790
1791 return 0;
1792 }
1793
1794 /* Arrange for a breakpoint to be hit again later. We don't keep the
1795 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1796 will handle the current event, eventually we will resume this LWP,
1797 and this breakpoint will trap again. */
1798
1799 static int
1800 cancel_breakpoint (struct lwp_info *lwp)
1801 {
1802 struct thread_info *saved_inferior;
1803
1804 /* There's nothing to do if we don't support breakpoints. */
1805 if (!supports_breakpoints ())
1806 return 0;
1807
1808 /* breakpoint_at reads from current inferior. */
1809 saved_inferior = current_inferior;
1810 current_inferior = get_lwp_thread (lwp);
1811
1812 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1813 {
1814 if (debug_threads)
1815 fprintf (stderr,
1816 "CB: Push back breakpoint for %s\n",
1817 target_pid_to_str (ptid_of (lwp)));
1818
1819 /* Back up the PC if necessary. */
1820 if (the_low_target.decr_pc_after_break)
1821 {
1822 struct regcache *regcache
1823 = get_thread_regcache (current_inferior, 1);
1824 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1825 }
1826
1827 current_inferior = saved_inferior;
1828 return 1;
1829 }
1830 else
1831 {
1832 if (debug_threads)
1833 fprintf (stderr,
1834 "CB: No breakpoint found at %s for [%s]\n",
1835 paddress (lwp->stop_pc),
1836 target_pid_to_str (ptid_of (lwp)));
1837 }
1838
1839 current_inferior = saved_inferior;
1840 return 0;
1841 }
1842
1843 /* When the event-loop is doing a step-over, this points at the thread
1844 being stepped. */
1845 ptid_t step_over_bkpt;
1846
1847 /* Wait for an event from child PID. If PID is -1, wait for any
1848 child. Store the stop status through the status pointer WSTAT.
1849 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1850 event was found and OPTIONS contains WNOHANG. Return the PID of
1851 the stopped child otherwise. */
1852
1853 static int
1854 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1855 {
1856 struct lwp_info *event_child, *requested_child;
1857 ptid_t wait_ptid;
1858
1859 event_child = NULL;
1860 requested_child = NULL;
1861
1862 /* Check for a lwp with a pending status. */
1863
1864 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1865 {
1866 event_child = (struct lwp_info *)
1867 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1868 if (debug_threads && event_child)
1869 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1870 }
1871 else
1872 {
1873 requested_child = find_lwp_pid (ptid);
1874
1875 if (stopping_threads == NOT_STOPPING_THREADS
1876 && requested_child->status_pending_p
1877 && requested_child->collecting_fast_tracepoint)
1878 {
1879 enqueue_one_deferred_signal (requested_child,
1880 &requested_child->status_pending);
1881 requested_child->status_pending_p = 0;
1882 requested_child->status_pending = 0;
1883 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1884 }
1885
1886 if (requested_child->suspended
1887 && requested_child->status_pending_p)
1888 fatal ("requesting an event out of a suspended child?");
1889
1890 if (requested_child->status_pending_p)
1891 event_child = requested_child;
1892 }
1893
1894 if (event_child != NULL)
1895 {
1896 if (debug_threads)
1897 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1898 lwpid_of (event_child), event_child->status_pending);
1899 *wstat = event_child->status_pending;
1900 event_child->status_pending_p = 0;
1901 event_child->status_pending = 0;
1902 current_inferior = get_lwp_thread (event_child);
1903 return lwpid_of (event_child);
1904 }
1905
1906 if (ptid_is_pid (ptid))
1907 {
1908 /* A request to wait for a specific tgid. This is not possible
1909 with waitpid, so instead, we wait for any child, and leave
1910 children we're not interested in right now with a pending
1911 status to report later. */
1912 wait_ptid = minus_one_ptid;
1913 }
1914 else
1915 wait_ptid = ptid;
1916
1917 /* We only enter this loop if no process has a pending wait status. Thus
1918 any action taken in response to a wait status inside this loop is
1919 responding as soon as we detect the status, not after any pending
1920 events. */
1921 while (1)
1922 {
1923 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1924
1925 if ((options & WNOHANG) && event_child == NULL)
1926 {
1927 if (debug_threads)
1928 fprintf (stderr, "WNOHANG set, no event found\n");
1929 return 0;
1930 }
1931
1932 if (event_child == NULL)
1933 error ("event from unknown child");
1934
1935 if (ptid_is_pid (ptid)
1936 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1937 {
1938 if (! WIFSTOPPED (*wstat))
1939 mark_lwp_dead (event_child, *wstat);
1940 else
1941 {
1942 event_child->status_pending_p = 1;
1943 event_child->status_pending = *wstat;
1944 }
1945 continue;
1946 }
1947
1948 current_inferior = get_lwp_thread (event_child);
1949
1950 /* Check for thread exit. */
1951 if (! WIFSTOPPED (*wstat))
1952 {
1953 if (debug_threads)
1954 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1955
1956 /* If the last thread is exiting, just return. */
1957 if (last_thread_of_process_p (current_inferior))
1958 {
1959 if (debug_threads)
1960 fprintf (stderr, "LWP %ld is last lwp of process\n",
1961 lwpid_of (event_child));
1962 return lwpid_of (event_child);
1963 }
1964
1965 if (!non_stop)
1966 {
1967 current_inferior = (struct thread_info *) all_threads.head;
1968 if (debug_threads)
1969 fprintf (stderr, "Current inferior is now %ld\n",
1970 lwpid_of (get_thread_lwp (current_inferior)));
1971 }
1972 else
1973 {
1974 current_inferior = NULL;
1975 if (debug_threads)
1976 fprintf (stderr, "Current inferior is now <NULL>\n");
1977 }
1978
1979 /* If we were waiting for this particular child to do something...
1980 well, it did something. */
1981 if (requested_child != NULL)
1982 {
1983 int lwpid = lwpid_of (event_child);
1984
1985 /* Cancel the step-over operation --- the thread that
1986 started it is gone. */
1987 if (finish_step_over (event_child))
1988 unstop_all_lwps (1, event_child);
1989 delete_lwp (event_child);
1990 return lwpid;
1991 }
1992
1993 delete_lwp (event_child);
1994
1995 /* Wait for a more interesting event. */
1996 continue;
1997 }
1998
1999 if (event_child->must_set_ptrace_flags)
2000 {
2001 linux_enable_event_reporting (lwpid_of (event_child));
2002 event_child->must_set_ptrace_flags = 0;
2003 }
2004
2005 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
2006 && *wstat >> 16 != 0)
2007 {
2008 handle_extended_wait (event_child, *wstat);
2009 continue;
2010 }
2011
2012 if (WIFSTOPPED (*wstat)
2013 && WSTOPSIG (*wstat) == SIGSTOP
2014 && event_child->stop_expected)
2015 {
2016 int should_stop;
2017
2018 if (debug_threads)
2019 fprintf (stderr, "Expected stop.\n");
2020 event_child->stop_expected = 0;
2021
2022 should_stop = (current_inferior->last_resume_kind == resume_stop
2023 || stopping_threads != NOT_STOPPING_THREADS);
2024
2025 if (!should_stop)
2026 {
2027 linux_resume_one_lwp (event_child,
2028 event_child->stepping, 0, NULL);
2029 continue;
2030 }
2031 }
2032
2033 return lwpid_of (event_child);
2034 }
2035
2036 /* NOTREACHED */
2037 return 0;
2038 }
2039
2040 /* Count the LWP's that have had events. */
2041
2042 static int
2043 count_events_callback (struct inferior_list_entry *entry, void *data)
2044 {
2045 struct lwp_info *lp = (struct lwp_info *) entry;
2046 struct thread_info *thread = get_lwp_thread (lp);
2047 int *count = data;
2048
2049 gdb_assert (count != NULL);
2050
2051 /* Count only resumed LWPs that have a SIGTRAP event pending that
2052 should be reported to GDB. */
2053 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2054 && thread->last_resume_kind != resume_stop
2055 && lp->status_pending_p
2056 && WIFSTOPPED (lp->status_pending)
2057 && WSTOPSIG (lp->status_pending) == SIGTRAP
2058 && !breakpoint_inserted_here (lp->stop_pc))
2059 (*count)++;
2060
2061 return 0;
2062 }
2063
2064 /* Select the LWP (if any) that is currently being single-stepped. */
2065
2066 static int
2067 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2068 {
2069 struct lwp_info *lp = (struct lwp_info *) entry;
2070 struct thread_info *thread = get_lwp_thread (lp);
2071
2072 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2073 && thread->last_resume_kind == resume_step
2074 && lp->status_pending_p)
2075 return 1;
2076 else
2077 return 0;
2078 }
2079
2080 /* Select the Nth LWP that has had a SIGTRAP event that should be
2081 reported to GDB. */
2082
2083 static int
2084 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2085 {
2086 struct lwp_info *lp = (struct lwp_info *) entry;
2087 struct thread_info *thread = get_lwp_thread (lp);
2088 int *selector = data;
2089
2090 gdb_assert (selector != NULL);
2091
2092 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2093 if (thread->last_resume_kind != resume_stop
2094 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2095 && lp->status_pending_p
2096 && WIFSTOPPED (lp->status_pending)
2097 && WSTOPSIG (lp->status_pending) == SIGTRAP
2098 && !breakpoint_inserted_here (lp->stop_pc))
2099 if ((*selector)-- == 0)
2100 return 1;
2101
2102 return 0;
2103 }
2104
2105 static int
2106 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2107 {
2108 struct lwp_info *lp = (struct lwp_info *) entry;
2109 struct thread_info *thread = get_lwp_thread (lp);
2110 struct lwp_info *event_lp = data;
2111
2112 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2113 if (lp == event_lp)
2114 return 0;
2115
2116 /* If a LWP other than the LWP that we're reporting an event for has
2117 hit a GDB breakpoint (as opposed to some random trap signal),
2118 then just arrange for it to hit it again later. We don't keep
2119 the SIGTRAP status and don't forward the SIGTRAP signal to the
2120 LWP. We will handle the current event, eventually we will resume
2121 all LWPs, and this one will get its breakpoint trap again.
2122
2123 If we do not do this, then we run the risk that the user will
2124 delete or disable the breakpoint, but the LWP will have already
2125 tripped on it. */
2126
2127 if (thread->last_resume_kind != resume_stop
2128 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2129 && lp->status_pending_p
2130 && WIFSTOPPED (lp->status_pending)
2131 && WSTOPSIG (lp->status_pending) == SIGTRAP
2132 && !lp->stepping
2133 && !lp->stopped_by_watchpoint
2134 && cancel_breakpoint (lp))
2135 /* Throw away the SIGTRAP. */
2136 lp->status_pending_p = 0;
2137
2138 return 0;
2139 }
2140
2141 static void
2142 linux_cancel_breakpoints (void)
2143 {
2144 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2145 }
2146
2147 /* Select one LWP out of those that have events pending. */
2148
2149 static void
2150 select_event_lwp (struct lwp_info **orig_lp)
2151 {
2152 int num_events = 0;
2153 int random_selector;
2154 struct lwp_info *event_lp;
2155
2156 /* Give preference to any LWP that is being single-stepped. */
2157 event_lp
2158 = (struct lwp_info *) find_inferior (&all_lwps,
2159 select_singlestep_lwp_callback, NULL);
2160 if (event_lp != NULL)
2161 {
2162 if (debug_threads)
2163 fprintf (stderr,
2164 "SEL: Select single-step %s\n",
2165 target_pid_to_str (ptid_of (event_lp)));
2166 }
2167 else
2168 {
2169 /* No single-stepping LWP. Select one at random, out of those
2170 which have had SIGTRAP events. */
2171
2172 /* First see how many SIGTRAP events we have. */
2173 find_inferior (&all_lwps, count_events_callback, &num_events);
2174
2175 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2176 random_selector = (int)
2177 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2178
2179 if (debug_threads && num_events > 1)
2180 fprintf (stderr,
2181 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2182 num_events, random_selector);
2183
2184 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2185 select_event_lwp_callback,
2186 &random_selector);
2187 }
2188
2189 if (event_lp != NULL)
2190 {
2191 /* Switch the event LWP. */
2192 *orig_lp = event_lp;
2193 }
2194 }
2195
2196 /* Decrement the suspend count of an LWP. */
2197
2198 static int
2199 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2200 {
2201 struct lwp_info *lwp = (struct lwp_info *) entry;
2202
2203 /* Ignore EXCEPT. */
2204 if (lwp == except)
2205 return 0;
2206
2207 lwp->suspended--;
2208
2209 gdb_assert (lwp->suspended >= 0);
2210 return 0;
2211 }
2212
2213 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2214 NULL. */
2215
2216 static void
2217 unsuspend_all_lwps (struct lwp_info *except)
2218 {
2219 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2220 }
2221
2222 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2223 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2224 void *data);
2225 static int lwp_running (struct inferior_list_entry *entry, void *data);
2226 static ptid_t linux_wait_1 (ptid_t ptid,
2227 struct target_waitstatus *ourstatus,
2228 int target_options);
2229
2230 /* Stabilize threads (move out of jump pads).
2231
2232 If a thread is midway collecting a fast tracepoint, we need to
2233 finish the collection and move it out of the jump pad before
2234 reporting the signal.
2235
2236 This avoids recursion while collecting (when a signal arrives
2237 midway, and the signal handler itself collects), which would trash
2238 the trace buffer. In case the user set a breakpoint in a signal
2239 handler, this avoids the backtrace showing the jump pad, etc..
2240 Most importantly, there are certain things we can't do safely if
2241 threads are stopped in a jump pad (or in its callee's). For
2242 example:
2243
2244 - starting a new trace run. A thread still collecting the
2245 previous run, could trash the trace buffer when resumed. The trace
2246 buffer control structures would have been reset but the thread had
2247 no way to tell. The thread could even midway memcpy'ing to the
2248 buffer, which would mean that when resumed, it would clobber the
2249 trace buffer that had been set for a new run.
2250
2251 - we can't rewrite/reuse the jump pads for new tracepoints
2252 safely. Say you do tstart while a thread is stopped midway while
2253 collecting. When the thread is later resumed, it finishes the
2254 collection, and returns to the jump pad, to execute the original
2255 instruction that was under the tracepoint jump at the time the
2256 older run had been started. If the jump pad had been rewritten
2257 since for something else in the new run, the thread would now
2258 execute the wrong / random instructions. */
2259
2260 static void
2261 linux_stabilize_threads (void)
2262 {
2263 struct thread_info *save_inferior;
2264 struct lwp_info *lwp_stuck;
2265
2266 lwp_stuck
2267 = (struct lwp_info *) find_inferior (&all_lwps,
2268 stuck_in_jump_pad_callback, NULL);
2269 if (lwp_stuck != NULL)
2270 {
2271 if (debug_threads)
2272 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2273 lwpid_of (lwp_stuck));
2274 return;
2275 }
2276
2277 save_inferior = current_inferior;
2278
2279 stabilizing_threads = 1;
2280
2281 /* Kick 'em all. */
2282 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2283
2284 /* Loop until all are stopped out of the jump pads. */
2285 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2286 {
2287 struct target_waitstatus ourstatus;
2288 struct lwp_info *lwp;
2289 int wstat;
2290
2291 /* Note that we go through the full wait even loop. While
2292 moving threads out of jump pad, we need to be able to step
2293 over internal breakpoints and such. */
2294 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2295
2296 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2297 {
2298 lwp = get_thread_lwp (current_inferior);
2299
2300 /* Lock it. */
2301 lwp->suspended++;
2302
2303 if (ourstatus.value.sig != GDB_SIGNAL_0
2304 || current_inferior->last_resume_kind == resume_stop)
2305 {
2306 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2307 enqueue_one_deferred_signal (lwp, &wstat);
2308 }
2309 }
2310 }
2311
2312 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2313
2314 stabilizing_threads = 0;
2315
2316 current_inferior = save_inferior;
2317
2318 if (debug_threads)
2319 {
2320 lwp_stuck
2321 = (struct lwp_info *) find_inferior (&all_lwps,
2322 stuck_in_jump_pad_callback, NULL);
2323 if (lwp_stuck != NULL)
2324 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2325 lwpid_of (lwp_stuck));
2326 }
2327 }
2328
2329 /* Wait for process, returns status. */
2330
2331 static ptid_t
2332 linux_wait_1 (ptid_t ptid,
2333 struct target_waitstatus *ourstatus, int target_options)
2334 {
2335 int w;
2336 struct lwp_info *event_child;
2337 int options;
2338 int pid;
2339 int step_over_finished;
2340 int bp_explains_trap;
2341 int maybe_internal_trap;
2342 int report_to_gdb;
2343 int trace_event;
2344 int in_step_range;
2345
2346 /* Translate generic target options into linux options. */
2347 options = __WALL;
2348 if (target_options & TARGET_WNOHANG)
2349 options |= WNOHANG;
2350
2351 retry:
2352 bp_explains_trap = 0;
2353 trace_event = 0;
2354 in_step_range = 0;
2355 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2356
2357 /* If we were only supposed to resume one thread, only wait for
2358 that thread - if it's still alive. If it died, however - which
2359 can happen if we're coming from the thread death case below -
2360 then we need to make sure we restart the other threads. We could
2361 pick a thread at random or restart all; restarting all is less
2362 arbitrary. */
2363 if (!non_stop
2364 && !ptid_equal (cont_thread, null_ptid)
2365 && !ptid_equal (cont_thread, minus_one_ptid))
2366 {
2367 struct thread_info *thread;
2368
2369 thread = (struct thread_info *) find_inferior_id (&all_threads,
2370 cont_thread);
2371
2372 /* No stepping, no signal - unless one is pending already, of course. */
2373 if (thread == NULL)
2374 {
2375 struct thread_resume resume_info;
2376 resume_info.thread = minus_one_ptid;
2377 resume_info.kind = resume_continue;
2378 resume_info.sig = 0;
2379 linux_resume (&resume_info, 1);
2380 }
2381 else
2382 ptid = cont_thread;
2383 }
2384
2385 if (ptid_equal (step_over_bkpt, null_ptid))
2386 pid = linux_wait_for_event (ptid, &w, options);
2387 else
2388 {
2389 if (debug_threads)
2390 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2391 target_pid_to_str (step_over_bkpt));
2392 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2393 }
2394
2395 if (pid == 0) /* only if TARGET_WNOHANG */
2396 return null_ptid;
2397
2398 event_child = get_thread_lwp (current_inferior);
2399
2400 /* If we are waiting for a particular child, and it exited,
2401 linux_wait_for_event will return its exit status. Similarly if
2402 the last child exited. If this is not the last child, however,
2403 do not report it as exited until there is a 'thread exited' response
2404 available in the remote protocol. Instead, just wait for another event.
2405 This should be safe, because if the thread crashed we will already
2406 have reported the termination signal to GDB; that should stop any
2407 in-progress stepping operations, etc.
2408
2409 Report the exit status of the last thread to exit. This matches
2410 LinuxThreads' behavior. */
2411
2412 if (last_thread_of_process_p (current_inferior))
2413 {
2414 if (WIFEXITED (w) || WIFSIGNALED (w))
2415 {
2416 if (WIFEXITED (w))
2417 {
2418 ourstatus->kind = TARGET_WAITKIND_EXITED;
2419 ourstatus->value.integer = WEXITSTATUS (w);
2420
2421 if (debug_threads)
2422 fprintf (stderr,
2423 "\nChild exited with retcode = %x \n",
2424 WEXITSTATUS (w));
2425 }
2426 else
2427 {
2428 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2429 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2430
2431 if (debug_threads)
2432 fprintf (stderr,
2433 "\nChild terminated with signal = %x \n",
2434 WTERMSIG (w));
2435
2436 }
2437
2438 return ptid_of (event_child);
2439 }
2440 }
2441 else
2442 {
2443 if (!WIFSTOPPED (w))
2444 goto retry;
2445 }
2446
2447 /* If this event was not handled before, and is not a SIGTRAP, we
2448 report it. SIGILL and SIGSEGV are also treated as traps in case
2449 a breakpoint is inserted at the current PC. If this target does
2450 not support internal breakpoints at all, we also report the
2451 SIGTRAP without further processing; it's of no concern to us. */
2452 maybe_internal_trap
2453 = (supports_breakpoints ()
2454 && (WSTOPSIG (w) == SIGTRAP
2455 || ((WSTOPSIG (w) == SIGILL
2456 || WSTOPSIG (w) == SIGSEGV)
2457 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2458
2459 if (maybe_internal_trap)
2460 {
2461 /* Handle anything that requires bookkeeping before deciding to
2462 report the event or continue waiting. */
2463
2464 /* First check if we can explain the SIGTRAP with an internal
2465 breakpoint, or if we should possibly report the event to GDB.
2466 Do this before anything that may remove or insert a
2467 breakpoint. */
2468 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2469
2470 /* We have a SIGTRAP, possibly a step-over dance has just
2471 finished. If so, tweak the state machine accordingly,
2472 reinsert breakpoints and delete any reinsert (software
2473 single-step) breakpoints. */
2474 step_over_finished = finish_step_over (event_child);
2475
2476 /* Now invoke the callbacks of any internal breakpoints there. */
2477 check_breakpoints (event_child->stop_pc);
2478
2479 /* Handle tracepoint data collecting. This may overflow the
2480 trace buffer, and cause a tracing stop, removing
2481 breakpoints. */
2482 trace_event = handle_tracepoints (event_child);
2483
2484 if (bp_explains_trap)
2485 {
2486 /* If we stepped or ran into an internal breakpoint, we've
2487 already handled it. So next time we resume (from this
2488 PC), we should step over it. */
2489 if (debug_threads)
2490 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2491
2492 if (breakpoint_here (event_child->stop_pc))
2493 event_child->need_step_over = 1;
2494 }
2495 }
2496 else
2497 {
2498 /* We have some other signal, possibly a step-over dance was in
2499 progress, and it should be cancelled too. */
2500 step_over_finished = finish_step_over (event_child);
2501 }
2502
2503 /* We have all the data we need. Either report the event to GDB, or
2504 resume threads and keep waiting for more. */
2505
2506 /* If we're collecting a fast tracepoint, finish the collection and
2507 move out of the jump pad before delivering a signal. See
2508 linux_stabilize_threads. */
2509
2510 if (WIFSTOPPED (w)
2511 && WSTOPSIG (w) != SIGTRAP
2512 && supports_fast_tracepoints ()
2513 && agent_loaded_p ())
2514 {
2515 if (debug_threads)
2516 fprintf (stderr,
2517 "Got signal %d for LWP %ld. Check if we need "
2518 "to defer or adjust it.\n",
2519 WSTOPSIG (w), lwpid_of (event_child));
2520
2521 /* Allow debugging the jump pad itself. */
2522 if (current_inferior->last_resume_kind != resume_step
2523 && maybe_move_out_of_jump_pad (event_child, &w))
2524 {
2525 enqueue_one_deferred_signal (event_child, &w);
2526
2527 if (debug_threads)
2528 fprintf (stderr,
2529 "Signal %d for LWP %ld deferred (in jump pad)\n",
2530 WSTOPSIG (w), lwpid_of (event_child));
2531
2532 linux_resume_one_lwp (event_child, 0, 0, NULL);
2533 goto retry;
2534 }
2535 }
2536
2537 if (event_child->collecting_fast_tracepoint)
2538 {
2539 if (debug_threads)
2540 fprintf (stderr, "\
2541 LWP %ld was trying to move out of the jump pad (%d). \
2542 Check if we're already there.\n",
2543 lwpid_of (event_child),
2544 event_child->collecting_fast_tracepoint);
2545
2546 trace_event = 1;
2547
2548 event_child->collecting_fast_tracepoint
2549 = linux_fast_tracepoint_collecting (event_child, NULL);
2550
2551 if (event_child->collecting_fast_tracepoint != 1)
2552 {
2553 /* No longer need this breakpoint. */
2554 if (event_child->exit_jump_pad_bkpt != NULL)
2555 {
2556 if (debug_threads)
2557 fprintf (stderr,
2558 "No longer need exit-jump-pad bkpt; removing it."
2559 "stopping all threads momentarily.\n");
2560
2561 /* Other running threads could hit this breakpoint.
2562 We don't handle moribund locations like GDB does,
2563 instead we always pause all threads when removing
2564 breakpoints, so that any step-over or
2565 decr_pc_after_break adjustment is always taken
2566 care of while the breakpoint is still
2567 inserted. */
2568 stop_all_lwps (1, event_child);
2569 cancel_breakpoints ();
2570
2571 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2572 event_child->exit_jump_pad_bkpt = NULL;
2573
2574 unstop_all_lwps (1, event_child);
2575
2576 gdb_assert (event_child->suspended >= 0);
2577 }
2578 }
2579
2580 if (event_child->collecting_fast_tracepoint == 0)
2581 {
2582 if (debug_threads)
2583 fprintf (stderr,
2584 "fast tracepoint finished "
2585 "collecting successfully.\n");
2586
2587 /* We may have a deferred signal to report. */
2588 if (dequeue_one_deferred_signal (event_child, &w))
2589 {
2590 if (debug_threads)
2591 fprintf (stderr, "dequeued one signal.\n");
2592 }
2593 else
2594 {
2595 if (debug_threads)
2596 fprintf (stderr, "no deferred signals.\n");
2597
2598 if (stabilizing_threads)
2599 {
2600 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2601 ourstatus->value.sig = GDB_SIGNAL_0;
2602 return ptid_of (event_child);
2603 }
2604 }
2605 }
2606 }
2607
2608 /* Check whether GDB would be interested in this event. */
2609
2610 /* If GDB is not interested in this signal, don't stop other
2611 threads, and don't report it to GDB. Just resume the inferior
2612 right away. We do this for threading-related signals as well as
2613 any that GDB specifically requested we ignore. But never ignore
2614 SIGSTOP if we sent it ourselves, and do not ignore signals when
2615 stepping - they may require special handling to skip the signal
2616 handler. */
2617 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2618 thread library? */
2619 if (WIFSTOPPED (w)
2620 && current_inferior->last_resume_kind != resume_step
2621 && (
2622 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2623 (current_process ()->private->thread_db != NULL
2624 && (WSTOPSIG (w) == __SIGRTMIN
2625 || WSTOPSIG (w) == __SIGRTMIN + 1))
2626 ||
2627 #endif
2628 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2629 && !(WSTOPSIG (w) == SIGSTOP
2630 && current_inferior->last_resume_kind == resume_stop))))
2631 {
2632 siginfo_t info, *info_p;
2633
2634 if (debug_threads)
2635 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2636 WSTOPSIG (w), lwpid_of (event_child));
2637
2638 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2639 (PTRACE_ARG3_TYPE) 0, &info) == 0)
2640 info_p = &info;
2641 else
2642 info_p = NULL;
2643 linux_resume_one_lwp (event_child, event_child->stepping,
2644 WSTOPSIG (w), info_p);
2645 goto retry;
2646 }
2647
2648 /* Note that all addresses are always "out of the step range" when
2649 there's no range to begin with. */
2650 in_step_range = lwp_in_step_range (event_child);
2651
2652 /* If GDB wanted this thread to single step, and the thread is out
2653 of the step range, we always want to report the SIGTRAP, and let
2654 GDB handle it. Watchpoints should always be reported. So should
2655 signals we can't explain. A SIGTRAP we can't explain could be a
2656 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2657 do, we're be able to handle GDB breakpoints on top of internal
2658 breakpoints, by handling the internal breakpoint and still
2659 reporting the event to GDB. If we don't, we're out of luck, GDB
2660 won't see the breakpoint hit. */
2661 report_to_gdb = (!maybe_internal_trap
2662 || (current_inferior->last_resume_kind == resume_step
2663 && !in_step_range)
2664 || event_child->stopped_by_watchpoint
2665 || (!step_over_finished && !in_step_range
2666 && !bp_explains_trap && !trace_event)
2667 || (gdb_breakpoint_here (event_child->stop_pc)
2668 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2669 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2670
2671 run_breakpoint_commands (event_child->stop_pc);
2672
2673 /* We found no reason GDB would want us to stop. We either hit one
2674 of our own breakpoints, or finished an internal step GDB
2675 shouldn't know about. */
2676 if (!report_to_gdb)
2677 {
2678 if (debug_threads)
2679 {
2680 if (bp_explains_trap)
2681 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2682 if (step_over_finished)
2683 fprintf (stderr, "Step-over finished.\n");
2684 if (trace_event)
2685 fprintf (stderr, "Tracepoint event.\n");
2686 if (lwp_in_step_range (event_child))
2687 fprintf (stderr, "Range stepping pc 0x%s [0x%s, 0x%s).\n",
2688 paddress (event_child->stop_pc),
2689 paddress (event_child->step_range_start),
2690 paddress (event_child->step_range_end));
2691 }
2692
2693 /* We're not reporting this breakpoint to GDB, so apply the
2694 decr_pc_after_break adjustment to the inferior's regcache
2695 ourselves. */
2696
2697 if (the_low_target.set_pc != NULL)
2698 {
2699 struct regcache *regcache
2700 = get_thread_regcache (get_lwp_thread (event_child), 1);
2701 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2702 }
2703
2704 /* We may have finished stepping over a breakpoint. If so,
2705 we've stopped and suspended all LWPs momentarily except the
2706 stepping one. This is where we resume them all again. We're
2707 going to keep waiting, so use proceed, which handles stepping
2708 over the next breakpoint. */
2709 if (debug_threads)
2710 fprintf (stderr, "proceeding all threads.\n");
2711
2712 if (step_over_finished)
2713 unsuspend_all_lwps (event_child);
2714
2715 proceed_all_lwps ();
2716 goto retry;
2717 }
2718
2719 if (debug_threads)
2720 {
2721 if (current_inferior->last_resume_kind == resume_step)
2722 {
2723 if (event_child->step_range_start == event_child->step_range_end)
2724 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2725 else if (!lwp_in_step_range (event_child))
2726 fprintf (stderr, "Out of step range, reporting event.\n");
2727 }
2728 if (event_child->stopped_by_watchpoint)
2729 fprintf (stderr, "Stopped by watchpoint.\n");
2730 if (gdb_breakpoint_here (event_child->stop_pc))
2731 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2732 if (debug_threads)
2733 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2734 }
2735
2736 /* Alright, we're going to report a stop. */
2737
2738 if (!non_stop && !stabilizing_threads)
2739 {
2740 /* In all-stop, stop all threads. */
2741 stop_all_lwps (0, NULL);
2742
2743 /* If we're not waiting for a specific LWP, choose an event LWP
2744 from among those that have had events. Giving equal priority
2745 to all LWPs that have had events helps prevent
2746 starvation. */
2747 if (ptid_equal (ptid, minus_one_ptid))
2748 {
2749 event_child->status_pending_p = 1;
2750 event_child->status_pending = w;
2751
2752 select_event_lwp (&event_child);
2753
2754 event_child->status_pending_p = 0;
2755 w = event_child->status_pending;
2756 }
2757
2758 /* Now that we've selected our final event LWP, cancel any
2759 breakpoints in other LWPs that have hit a GDB breakpoint.
2760 See the comment in cancel_breakpoints_callback to find out
2761 why. */
2762 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2763
2764 /* If we were going a step-over, all other threads but the stepping one
2765 had been paused in start_step_over, with their suspend counts
2766 incremented. We don't want to do a full unstop/unpause, because we're
2767 in all-stop mode (so we want threads stopped), but we still need to
2768 unsuspend the other threads, to decrement their `suspended' count
2769 back. */
2770 if (step_over_finished)
2771 unsuspend_all_lwps (event_child);
2772
2773 /* Stabilize threads (move out of jump pads). */
2774 stabilize_threads ();
2775 }
2776 else
2777 {
2778 /* If we just finished a step-over, then all threads had been
2779 momentarily paused. In all-stop, that's fine, we want
2780 threads stopped by now anyway. In non-stop, we need to
2781 re-resume threads that GDB wanted to be running. */
2782 if (step_over_finished)
2783 unstop_all_lwps (1, event_child);
2784 }
2785
2786 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2787
2788 if (current_inferior->last_resume_kind == resume_stop
2789 && WSTOPSIG (w) == SIGSTOP)
2790 {
2791 /* A thread that has been requested to stop by GDB with vCont;t,
2792 and it stopped cleanly, so report as SIG0. The use of
2793 SIGSTOP is an implementation detail. */
2794 ourstatus->value.sig = GDB_SIGNAL_0;
2795 }
2796 else if (current_inferior->last_resume_kind == resume_stop
2797 && WSTOPSIG (w) != SIGSTOP)
2798 {
2799 /* A thread that has been requested to stop by GDB with vCont;t,
2800 but, it stopped for other reasons. */
2801 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2802 }
2803 else
2804 {
2805 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2806 }
2807
2808 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2809
2810 if (debug_threads)
2811 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2812 target_pid_to_str (ptid_of (event_child)),
2813 ourstatus->kind,
2814 ourstatus->value.sig);
2815
2816 return ptid_of (event_child);
2817 }
2818
2819 /* Get rid of any pending event in the pipe. */
2820 static void
2821 async_file_flush (void)
2822 {
2823 int ret;
2824 char buf;
2825
2826 do
2827 ret = read (linux_event_pipe[0], &buf, 1);
2828 while (ret >= 0 || (ret == -1 && errno == EINTR));
2829 }
2830
2831 /* Put something in the pipe, so the event loop wakes up. */
2832 static void
2833 async_file_mark (void)
2834 {
2835 int ret;
2836
2837 async_file_flush ();
2838
2839 do
2840 ret = write (linux_event_pipe[1], "+", 1);
2841 while (ret == 0 || (ret == -1 && errno == EINTR));
2842
2843 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2844 be awakened anyway. */
2845 }
2846
2847 static ptid_t
2848 linux_wait (ptid_t ptid,
2849 struct target_waitstatus *ourstatus, int target_options)
2850 {
2851 ptid_t event_ptid;
2852
2853 if (debug_threads)
2854 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2855
2856 /* Flush the async file first. */
2857 if (target_is_async_p ())
2858 async_file_flush ();
2859
2860 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2861
2862 /* If at least one stop was reported, there may be more. A single
2863 SIGCHLD can signal more than one child stop. */
2864 if (target_is_async_p ()
2865 && (target_options & TARGET_WNOHANG) != 0
2866 && !ptid_equal (event_ptid, null_ptid))
2867 async_file_mark ();
2868
2869 return event_ptid;
2870 }
2871
2872 /* Send a signal to an LWP. */
2873
2874 static int
2875 kill_lwp (unsigned long lwpid, int signo)
2876 {
2877 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2878 fails, then we are not using nptl threads and we should be using kill. */
2879
2880 #ifdef __NR_tkill
2881 {
2882 static int tkill_failed;
2883
2884 if (!tkill_failed)
2885 {
2886 int ret;
2887
2888 errno = 0;
2889 ret = syscall (__NR_tkill, lwpid, signo);
2890 if (errno != ENOSYS)
2891 return ret;
2892 tkill_failed = 1;
2893 }
2894 }
2895 #endif
2896
2897 return kill (lwpid, signo);
2898 }
2899
2900 void
2901 linux_stop_lwp (struct lwp_info *lwp)
2902 {
2903 send_sigstop (lwp);
2904 }
2905
2906 static void
2907 send_sigstop (struct lwp_info *lwp)
2908 {
2909 int pid;
2910
2911 pid = lwpid_of (lwp);
2912
2913 /* If we already have a pending stop signal for this process, don't
2914 send another. */
2915 if (lwp->stop_expected)
2916 {
2917 if (debug_threads)
2918 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2919
2920 return;
2921 }
2922
2923 if (debug_threads)
2924 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2925
2926 lwp->stop_expected = 1;
2927 kill_lwp (pid, SIGSTOP);
2928 }
2929
2930 static int
2931 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2932 {
2933 struct lwp_info *lwp = (struct lwp_info *) entry;
2934
2935 /* Ignore EXCEPT. */
2936 if (lwp == except)
2937 return 0;
2938
2939 if (lwp->stopped)
2940 return 0;
2941
2942 send_sigstop (lwp);
2943 return 0;
2944 }
2945
2946 /* Increment the suspend count of an LWP, and stop it, if not stopped
2947 yet. */
2948 static int
2949 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2950 void *except)
2951 {
2952 struct lwp_info *lwp = (struct lwp_info *) entry;
2953
2954 /* Ignore EXCEPT. */
2955 if (lwp == except)
2956 return 0;
2957
2958 lwp->suspended++;
2959
2960 return send_sigstop_callback (entry, except);
2961 }
2962
2963 static void
2964 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2965 {
2966 /* It's dead, really. */
2967 lwp->dead = 1;
2968
2969 /* Store the exit status for later. */
2970 lwp->status_pending_p = 1;
2971 lwp->status_pending = wstat;
2972
2973 /* Prevent trying to stop it. */
2974 lwp->stopped = 1;
2975
2976 /* No further stops are expected from a dead lwp. */
2977 lwp->stop_expected = 0;
2978 }
2979
2980 static void
2981 wait_for_sigstop (struct inferior_list_entry *entry)
2982 {
2983 struct lwp_info *lwp = (struct lwp_info *) entry;
2984 struct thread_info *saved_inferior;
2985 int wstat;
2986 ptid_t saved_tid;
2987 ptid_t ptid;
2988 int pid;
2989
2990 if (lwp->stopped)
2991 {
2992 if (debug_threads)
2993 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2994 lwpid_of (lwp));
2995 return;
2996 }
2997
2998 saved_inferior = current_inferior;
2999 if (saved_inferior != NULL)
3000 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
3001 else
3002 saved_tid = null_ptid; /* avoid bogus unused warning */
3003
3004 ptid = lwp->head.id;
3005
3006 if (debug_threads)
3007 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
3008
3009 pid = linux_wait_for_event (ptid, &wstat, __WALL);
3010
3011 /* If we stopped with a non-SIGSTOP signal, save it for later
3012 and record the pending SIGSTOP. If the process exited, just
3013 return. */
3014 if (WIFSTOPPED (wstat))
3015 {
3016 if (debug_threads)
3017 fprintf (stderr, "LWP %ld stopped with signal %d\n",
3018 lwpid_of (lwp), WSTOPSIG (wstat));
3019
3020 if (WSTOPSIG (wstat) != SIGSTOP)
3021 {
3022 if (debug_threads)
3023 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
3024 lwpid_of (lwp), wstat);
3025
3026 lwp->status_pending_p = 1;
3027 lwp->status_pending = wstat;
3028 }
3029 }
3030 else
3031 {
3032 if (debug_threads)
3033 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
3034
3035 lwp = find_lwp_pid (pid_to_ptid (pid));
3036 if (lwp)
3037 {
3038 /* Leave this status pending for the next time we're able to
3039 report it. In the mean time, we'll report this lwp as
3040 dead to GDB, so GDB doesn't try to read registers and
3041 memory from it. This can only happen if this was the
3042 last thread of the process; otherwise, PID is removed
3043 from the thread tables before linux_wait_for_event
3044 returns. */
3045 mark_lwp_dead (lwp, wstat);
3046 }
3047 }
3048
3049 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
3050 current_inferior = saved_inferior;
3051 else
3052 {
3053 if (debug_threads)
3054 fprintf (stderr, "Previously current thread died.\n");
3055
3056 if (non_stop)
3057 {
3058 /* We can't change the current inferior behind GDB's back,
3059 otherwise, a subsequent command may apply to the wrong
3060 process. */
3061 current_inferior = NULL;
3062 }
3063 else
3064 {
3065 /* Set a valid thread as current. */
3066 set_desired_inferior (0);
3067 }
3068 }
3069 }
3070
3071 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3072 move it out, because we need to report the stop event to GDB. For
3073 example, if the user puts a breakpoint in the jump pad, it's
3074 because she wants to debug it. */
3075
3076 static int
3077 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3078 {
3079 struct lwp_info *lwp = (struct lwp_info *) entry;
3080 struct thread_info *thread = get_lwp_thread (lwp);
3081
3082 gdb_assert (lwp->suspended == 0);
3083 gdb_assert (lwp->stopped);
3084
3085 /* Allow debugging the jump pad, gdb_collect, etc.. */
3086 return (supports_fast_tracepoints ()
3087 && agent_loaded_p ()
3088 && (gdb_breakpoint_here (lwp->stop_pc)
3089 || lwp->stopped_by_watchpoint
3090 || thread->last_resume_kind == resume_step)
3091 && linux_fast_tracepoint_collecting (lwp, NULL));
3092 }
3093
3094 static void
3095 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3096 {
3097 struct lwp_info *lwp = (struct lwp_info *) entry;
3098 struct thread_info *thread = get_lwp_thread (lwp);
3099 int *wstat;
3100
3101 gdb_assert (lwp->suspended == 0);
3102 gdb_assert (lwp->stopped);
3103
3104 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3105
3106 /* Allow debugging the jump pad, gdb_collect, etc. */
3107 if (!gdb_breakpoint_here (lwp->stop_pc)
3108 && !lwp->stopped_by_watchpoint
3109 && thread->last_resume_kind != resume_step
3110 && maybe_move_out_of_jump_pad (lwp, wstat))
3111 {
3112 if (debug_threads)
3113 fprintf (stderr,
3114 "LWP %ld needs stabilizing (in jump pad)\n",
3115 lwpid_of (lwp));
3116
3117 if (wstat)
3118 {
3119 lwp->status_pending_p = 0;
3120 enqueue_one_deferred_signal (lwp, wstat);
3121
3122 if (debug_threads)
3123 fprintf (stderr,
3124 "Signal %d for LWP %ld deferred "
3125 "(in jump pad)\n",
3126 WSTOPSIG (*wstat), lwpid_of (lwp));
3127 }
3128
3129 linux_resume_one_lwp (lwp, 0, 0, NULL);
3130 }
3131 else
3132 lwp->suspended++;
3133 }
3134
3135 static int
3136 lwp_running (struct inferior_list_entry *entry, void *data)
3137 {
3138 struct lwp_info *lwp = (struct lwp_info *) entry;
3139
3140 if (lwp->dead)
3141 return 0;
3142 if (lwp->stopped)
3143 return 0;
3144 return 1;
3145 }
3146
3147 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3148 If SUSPEND, then also increase the suspend count of every LWP,
3149 except EXCEPT. */
3150
3151 static void
3152 stop_all_lwps (int suspend, struct lwp_info *except)
3153 {
3154 /* Should not be called recursively. */
3155 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3156
3157 stopping_threads = (suspend
3158 ? STOPPING_AND_SUSPENDING_THREADS
3159 : STOPPING_THREADS);
3160
3161 if (suspend)
3162 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3163 else
3164 find_inferior (&all_lwps, send_sigstop_callback, except);
3165 for_each_inferior (&all_lwps, wait_for_sigstop);
3166 stopping_threads = NOT_STOPPING_THREADS;
3167 }
3168
3169 /* Resume execution of the inferior process.
3170 If STEP is nonzero, single-step it.
3171 If SIGNAL is nonzero, give it that signal. */
3172
3173 static void
3174 linux_resume_one_lwp (struct lwp_info *lwp,
3175 int step, int signal, siginfo_t *info)
3176 {
3177 struct thread_info *saved_inferior;
3178 int fast_tp_collecting;
3179
3180 if (lwp->stopped == 0)
3181 return;
3182
3183 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3184
3185 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3186
3187 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3188 user used the "jump" command, or "set $pc = foo"). */
3189 if (lwp->stop_pc != get_pc (lwp))
3190 {
3191 /* Collecting 'while-stepping' actions doesn't make sense
3192 anymore. */
3193 release_while_stepping_state_list (get_lwp_thread (lwp));
3194 }
3195
3196 /* If we have pending signals or status, and a new signal, enqueue the
3197 signal. Also enqueue the signal if we are waiting to reinsert a
3198 breakpoint; it will be picked up again below. */
3199 if (signal != 0
3200 && (lwp->status_pending_p
3201 || lwp->pending_signals != NULL
3202 || lwp->bp_reinsert != 0
3203 || fast_tp_collecting))
3204 {
3205 struct pending_signals *p_sig;
3206 p_sig = xmalloc (sizeof (*p_sig));
3207 p_sig->prev = lwp->pending_signals;
3208 p_sig->signal = signal;
3209 if (info == NULL)
3210 memset (&p_sig->info, 0, sizeof (siginfo_t));
3211 else
3212 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3213 lwp->pending_signals = p_sig;
3214 }
3215
3216 if (lwp->status_pending_p)
3217 {
3218 if (debug_threads)
3219 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3220 " has pending status\n",
3221 lwpid_of (lwp), step ? "step" : "continue", signal,
3222 lwp->stop_expected ? "expected" : "not expected");
3223 return;
3224 }
3225
3226 saved_inferior = current_inferior;
3227 current_inferior = get_lwp_thread (lwp);
3228
3229 if (debug_threads)
3230 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3231 lwpid_of (lwp), step ? "step" : "continue", signal,
3232 lwp->stop_expected ? "expected" : "not expected");
3233
3234 /* This bit needs some thinking about. If we get a signal that
3235 we must report while a single-step reinsert is still pending,
3236 we often end up resuming the thread. It might be better to
3237 (ew) allow a stack of pending events; then we could be sure that
3238 the reinsert happened right away and not lose any signals.
3239
3240 Making this stack would also shrink the window in which breakpoints are
3241 uninserted (see comment in linux_wait_for_lwp) but not enough for
3242 complete correctness, so it won't solve that problem. It may be
3243 worthwhile just to solve this one, however. */
3244 if (lwp->bp_reinsert != 0)
3245 {
3246 if (debug_threads)
3247 fprintf (stderr, " pending reinsert at 0x%s\n",
3248 paddress (lwp->bp_reinsert));
3249
3250 if (can_hardware_single_step ())
3251 {
3252 if (fast_tp_collecting == 0)
3253 {
3254 if (step == 0)
3255 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3256 if (lwp->suspended)
3257 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3258 lwp->suspended);
3259 }
3260
3261 step = 1;
3262 }
3263
3264 /* Postpone any pending signal. It was enqueued above. */
3265 signal = 0;
3266 }
3267
3268 if (fast_tp_collecting == 1)
3269 {
3270 if (debug_threads)
3271 fprintf (stderr, "\
3272 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3273 lwpid_of (lwp));
3274
3275 /* Postpone any pending signal. It was enqueued above. */
3276 signal = 0;
3277 }
3278 else if (fast_tp_collecting == 2)
3279 {
3280 if (debug_threads)
3281 fprintf (stderr, "\
3282 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3283 lwpid_of (lwp));
3284
3285 if (can_hardware_single_step ())
3286 step = 1;
3287 else
3288 fatal ("moving out of jump pad single-stepping"
3289 " not implemented on this target");
3290
3291 /* Postpone any pending signal. It was enqueued above. */
3292 signal = 0;
3293 }
3294
3295 /* If we have while-stepping actions in this thread set it stepping.
3296 If we have a signal to deliver, it may or may not be set to
3297 SIG_IGN, we don't know. Assume so, and allow collecting
3298 while-stepping into a signal handler. A possible smart thing to
3299 do would be to set an internal breakpoint at the signal return
3300 address, continue, and carry on catching this while-stepping
3301 action only when that breakpoint is hit. A future
3302 enhancement. */
3303 if (get_lwp_thread (lwp)->while_stepping != NULL
3304 && can_hardware_single_step ())
3305 {
3306 if (debug_threads)
3307 fprintf (stderr,
3308 "lwp %ld has a while-stepping action -> forcing step.\n",
3309 lwpid_of (lwp));
3310 step = 1;
3311 }
3312
3313 if (debug_threads && the_low_target.get_pc != NULL)
3314 {
3315 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3316 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3317 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3318 }
3319
3320 /* If we have pending signals, consume one unless we are trying to
3321 reinsert a breakpoint or we're trying to finish a fast tracepoint
3322 collect. */
3323 if (lwp->pending_signals != NULL
3324 && lwp->bp_reinsert == 0
3325 && fast_tp_collecting == 0)
3326 {
3327 struct pending_signals **p_sig;
3328
3329 p_sig = &lwp->pending_signals;
3330 while ((*p_sig)->prev != NULL)
3331 p_sig = &(*p_sig)->prev;
3332
3333 signal = (*p_sig)->signal;
3334 if ((*p_sig)->info.si_signo != 0)
3335 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3336 &(*p_sig)->info);
3337
3338 free (*p_sig);
3339 *p_sig = NULL;
3340 }
3341
3342 if (the_low_target.prepare_to_resume != NULL)
3343 the_low_target.prepare_to_resume (lwp);
3344
3345 regcache_invalidate_thread (get_lwp_thread (lwp));
3346 errno = 0;
3347 lwp->stopped = 0;
3348 lwp->stopped_by_watchpoint = 0;
3349 lwp->stepping = step;
3350 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3351 (PTRACE_ARG3_TYPE) 0,
3352 /* Coerce to a uintptr_t first to avoid potential gcc warning
3353 of coercing an 8 byte integer to a 4 byte pointer. */
3354 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3355
3356 current_inferior = saved_inferior;
3357 if (errno)
3358 {
3359 /* ESRCH from ptrace either means that the thread was already
3360 running (an error) or that it is gone (a race condition). If
3361 it's gone, we will get a notification the next time we wait,
3362 so we can ignore the error. We could differentiate these
3363 two, but it's tricky without waiting; the thread still exists
3364 as a zombie, so sending it signal 0 would succeed. So just
3365 ignore ESRCH. */
3366 if (errno == ESRCH)
3367 return;
3368
3369 perror_with_name ("ptrace");
3370 }
3371 }
3372
3373 struct thread_resume_array
3374 {
3375 struct thread_resume *resume;
3376 size_t n;
3377 };
3378
3379 /* This function is called once per thread. We look up the thread
3380 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3381 resume request.
3382
3383 This algorithm is O(threads * resume elements), but resume elements
3384 is small (and will remain small at least until GDB supports thread
3385 suspension). */
3386 static int
3387 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3388 {
3389 struct lwp_info *lwp;
3390 struct thread_info *thread;
3391 int ndx;
3392 struct thread_resume_array *r;
3393
3394 thread = (struct thread_info *) entry;
3395 lwp = get_thread_lwp (thread);
3396 r = arg;
3397
3398 for (ndx = 0; ndx < r->n; ndx++)
3399 {
3400 ptid_t ptid = r->resume[ndx].thread;
3401 if (ptid_equal (ptid, minus_one_ptid)
3402 || ptid_equal (ptid, entry->id)
3403 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3404 of PID'. */
3405 || (ptid_get_pid (ptid) == pid_of (lwp)
3406 && (ptid_is_pid (ptid)
3407 || ptid_get_lwp (ptid) == -1)))
3408 {
3409 if (r->resume[ndx].kind == resume_stop
3410 && thread->last_resume_kind == resume_stop)
3411 {
3412 if (debug_threads)
3413 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3414 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3415 ? "stopped"
3416 : "stopping",
3417 lwpid_of (lwp));
3418
3419 continue;
3420 }
3421
3422 lwp->resume = &r->resume[ndx];
3423 thread->last_resume_kind = lwp->resume->kind;
3424
3425 lwp->step_range_start = lwp->resume->step_range_start;
3426 lwp->step_range_end = lwp->resume->step_range_end;
3427
3428 /* If we had a deferred signal to report, dequeue one now.
3429 This can happen if LWP gets more than one signal while
3430 trying to get out of a jump pad. */
3431 if (lwp->stopped
3432 && !lwp->status_pending_p
3433 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3434 {
3435 lwp->status_pending_p = 1;
3436
3437 if (debug_threads)
3438 fprintf (stderr,
3439 "Dequeueing deferred signal %d for LWP %ld, "
3440 "leaving status pending.\n",
3441 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3442 }
3443
3444 return 0;
3445 }
3446 }
3447
3448 /* No resume action for this thread. */
3449 lwp->resume = NULL;
3450
3451 return 0;
3452 }
3453
3454
3455 /* Set *FLAG_P if this lwp has an interesting status pending. */
3456 static int
3457 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3458 {
3459 struct lwp_info *lwp = (struct lwp_info *) entry;
3460
3461 /* LWPs which will not be resumed are not interesting, because
3462 we might not wait for them next time through linux_wait. */
3463 if (lwp->resume == NULL)
3464 return 0;
3465
3466 if (lwp->status_pending_p)
3467 * (int *) flag_p = 1;
3468
3469 return 0;
3470 }
3471
3472 /* Return 1 if this lwp that GDB wants running is stopped at an
3473 internal breakpoint that we need to step over. It assumes that any
3474 required STOP_PC adjustment has already been propagated to the
3475 inferior's regcache. */
3476
3477 static int
3478 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3479 {
3480 struct lwp_info *lwp = (struct lwp_info *) entry;
3481 struct thread_info *thread;
3482 struct thread_info *saved_inferior;
3483 CORE_ADDR pc;
3484
3485 /* LWPs which will not be resumed are not interesting, because we
3486 might not wait for them next time through linux_wait. */
3487
3488 if (!lwp->stopped)
3489 {
3490 if (debug_threads)
3491 fprintf (stderr,
3492 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3493 lwpid_of (lwp));
3494 return 0;
3495 }
3496
3497 thread = get_lwp_thread (lwp);
3498
3499 if (thread->last_resume_kind == resume_stop)
3500 {
3501 if (debug_threads)
3502 fprintf (stderr,
3503 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3504 lwpid_of (lwp));
3505 return 0;
3506 }
3507
3508 gdb_assert (lwp->suspended >= 0);
3509
3510 if (lwp->suspended)
3511 {
3512 if (debug_threads)
3513 fprintf (stderr,
3514 "Need step over [LWP %ld]? Ignoring, suspended\n",
3515 lwpid_of (lwp));
3516 return 0;
3517 }
3518
3519 if (!lwp->need_step_over)
3520 {
3521 if (debug_threads)
3522 fprintf (stderr,
3523 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3524 }
3525
3526 if (lwp->status_pending_p)
3527 {
3528 if (debug_threads)
3529 fprintf (stderr,
3530 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3531 lwpid_of (lwp));
3532 return 0;
3533 }
3534
3535 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3536 or we have. */
3537 pc = get_pc (lwp);
3538
3539 /* If the PC has changed since we stopped, then don't do anything,
3540 and let the breakpoint/tracepoint be hit. This happens if, for
3541 instance, GDB handled the decr_pc_after_break subtraction itself,
3542 GDB is OOL stepping this thread, or the user has issued a "jump"
3543 command, or poked thread's registers herself. */
3544 if (pc != lwp->stop_pc)
3545 {
3546 if (debug_threads)
3547 fprintf (stderr,
3548 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3549 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3550 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3551
3552 lwp->need_step_over = 0;
3553 return 0;
3554 }
3555
3556 saved_inferior = current_inferior;
3557 current_inferior = thread;
3558
3559 /* We can only step over breakpoints we know about. */
3560 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3561 {
3562 /* Don't step over a breakpoint that GDB expects to hit
3563 though. If the condition is being evaluated on the target's side
3564 and it evaluate to false, step over this breakpoint as well. */
3565 if (gdb_breakpoint_here (pc)
3566 && gdb_condition_true_at_breakpoint (pc)
3567 && gdb_no_commands_at_breakpoint (pc))
3568 {
3569 if (debug_threads)
3570 fprintf (stderr,
3571 "Need step over [LWP %ld]? yes, but found"
3572 " GDB breakpoint at 0x%s; skipping step over\n",
3573 lwpid_of (lwp), paddress (pc));
3574
3575 current_inferior = saved_inferior;
3576 return 0;
3577 }
3578 else
3579 {
3580 if (debug_threads)
3581 fprintf (stderr,
3582 "Need step over [LWP %ld]? yes, "
3583 "found breakpoint at 0x%s\n",
3584 lwpid_of (lwp), paddress (pc));
3585
3586 /* We've found an lwp that needs stepping over --- return 1 so
3587 that find_inferior stops looking. */
3588 current_inferior = saved_inferior;
3589
3590 /* If the step over is cancelled, this is set again. */
3591 lwp->need_step_over = 0;
3592 return 1;
3593 }
3594 }
3595
3596 current_inferior = saved_inferior;
3597
3598 if (debug_threads)
3599 fprintf (stderr,
3600 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3601 lwpid_of (lwp), paddress (pc));
3602
3603 return 0;
3604 }
3605
3606 /* Start a step-over operation on LWP. When LWP stopped at a
3607 breakpoint, to make progress, we need to remove the breakpoint out
3608 of the way. If we let other threads run while we do that, they may
3609 pass by the breakpoint location and miss hitting it. To avoid
3610 that, a step-over momentarily stops all threads while LWP is
3611 single-stepped while the breakpoint is temporarily uninserted from
3612 the inferior. When the single-step finishes, we reinsert the
3613 breakpoint, and let all threads that are supposed to be running,
3614 run again.
3615
3616 On targets that don't support hardware single-step, we don't
3617 currently support full software single-stepping. Instead, we only
3618 support stepping over the thread event breakpoint, by asking the
3619 low target where to place a reinsert breakpoint. Since this
3620 routine assumes the breakpoint being stepped over is a thread event
3621 breakpoint, it usually assumes the return address of the current
3622 function is a good enough place to set the reinsert breakpoint. */
3623
3624 static int
3625 start_step_over (struct lwp_info *lwp)
3626 {
3627 struct thread_info *saved_inferior;
3628 CORE_ADDR pc;
3629 int step;
3630
3631 if (debug_threads)
3632 fprintf (stderr,
3633 "Starting step-over on LWP %ld. Stopping all threads\n",
3634 lwpid_of (lwp));
3635
3636 stop_all_lwps (1, lwp);
3637 gdb_assert (lwp->suspended == 0);
3638
3639 if (debug_threads)
3640 fprintf (stderr, "Done stopping all threads for step-over.\n");
3641
3642 /* Note, we should always reach here with an already adjusted PC,
3643 either by GDB (if we're resuming due to GDB's request), or by our
3644 caller, if we just finished handling an internal breakpoint GDB
3645 shouldn't care about. */
3646 pc = get_pc (lwp);
3647
3648 saved_inferior = current_inferior;
3649 current_inferior = get_lwp_thread (lwp);
3650
3651 lwp->bp_reinsert = pc;
3652 uninsert_breakpoints_at (pc);
3653 uninsert_fast_tracepoint_jumps_at (pc);
3654
3655 if (can_hardware_single_step ())
3656 {
3657 step = 1;
3658 }
3659 else
3660 {
3661 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3662 set_reinsert_breakpoint (raddr);
3663 step = 0;
3664 }
3665
3666 current_inferior = saved_inferior;
3667
3668 linux_resume_one_lwp (lwp, step, 0, NULL);
3669
3670 /* Require next event from this LWP. */
3671 step_over_bkpt = lwp->head.id;
3672 return 1;
3673 }
3674
3675 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3676 start_step_over, if still there, and delete any reinsert
3677 breakpoints we've set, on non hardware single-step targets. */
3678
3679 static int
3680 finish_step_over (struct lwp_info *lwp)
3681 {
3682 if (lwp->bp_reinsert != 0)
3683 {
3684 if (debug_threads)
3685 fprintf (stderr, "Finished step over.\n");
3686
3687 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3688 may be no breakpoint to reinsert there by now. */
3689 reinsert_breakpoints_at (lwp->bp_reinsert);
3690 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3691
3692 lwp->bp_reinsert = 0;
3693
3694 /* Delete any software-single-step reinsert breakpoints. No
3695 longer needed. We don't have to worry about other threads
3696 hitting this trap, and later not being able to explain it,
3697 because we were stepping over a breakpoint, and we hold all
3698 threads but LWP stopped while doing that. */
3699 if (!can_hardware_single_step ())
3700 delete_reinsert_breakpoints ();
3701
3702 step_over_bkpt = null_ptid;
3703 return 1;
3704 }
3705 else
3706 return 0;
3707 }
3708
3709 /* This function is called once per thread. We check the thread's resume
3710 request, which will tell us whether to resume, step, or leave the thread
3711 stopped; and what signal, if any, it should be sent.
3712
3713 For threads which we aren't explicitly told otherwise, we preserve
3714 the stepping flag; this is used for stepping over gdbserver-placed
3715 breakpoints.
3716
3717 If pending_flags was set in any thread, we queue any needed
3718 signals, since we won't actually resume. We already have a pending
3719 event to report, so we don't need to preserve any step requests;
3720 they should be re-issued if necessary. */
3721
3722 static int
3723 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3724 {
3725 struct lwp_info *lwp;
3726 struct thread_info *thread;
3727 int step;
3728 int leave_all_stopped = * (int *) arg;
3729 int leave_pending;
3730
3731 thread = (struct thread_info *) entry;
3732 lwp = get_thread_lwp (thread);
3733
3734 if (lwp->resume == NULL)
3735 return 0;
3736
3737 if (lwp->resume->kind == resume_stop)
3738 {
3739 if (debug_threads)
3740 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3741
3742 if (!lwp->stopped)
3743 {
3744 if (debug_threads)
3745 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3746
3747 /* Stop the thread, and wait for the event asynchronously,
3748 through the event loop. */
3749 send_sigstop (lwp);
3750 }
3751 else
3752 {
3753 if (debug_threads)
3754 fprintf (stderr, "already stopped LWP %ld\n",
3755 lwpid_of (lwp));
3756
3757 /* The LWP may have been stopped in an internal event that
3758 was not meant to be notified back to GDB (e.g., gdbserver
3759 breakpoint), so we should be reporting a stop event in
3760 this case too. */
3761
3762 /* If the thread already has a pending SIGSTOP, this is a
3763 no-op. Otherwise, something later will presumably resume
3764 the thread and this will cause it to cancel any pending
3765 operation, due to last_resume_kind == resume_stop. If
3766 the thread already has a pending status to report, we
3767 will still report it the next time we wait - see
3768 status_pending_p_callback. */
3769
3770 /* If we already have a pending signal to report, then
3771 there's no need to queue a SIGSTOP, as this means we're
3772 midway through moving the LWP out of the jumppad, and we
3773 will report the pending signal as soon as that is
3774 finished. */
3775 if (lwp->pending_signals_to_report == NULL)
3776 send_sigstop (lwp);
3777 }
3778
3779 /* For stop requests, we're done. */
3780 lwp->resume = NULL;
3781 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3782 return 0;
3783 }
3784
3785 /* If this thread which is about to be resumed has a pending status,
3786 then don't resume any threads - we can just report the pending
3787 status. Make sure to queue any signals that would otherwise be
3788 sent. In all-stop mode, we do this decision based on if *any*
3789 thread has a pending status. If there's a thread that needs the
3790 step-over-breakpoint dance, then don't resume any other thread
3791 but that particular one. */
3792 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3793
3794 if (!leave_pending)
3795 {
3796 if (debug_threads)
3797 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3798
3799 step = (lwp->resume->kind == resume_step);
3800 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3801 }
3802 else
3803 {
3804 if (debug_threads)
3805 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3806
3807 /* If we have a new signal, enqueue the signal. */
3808 if (lwp->resume->sig != 0)
3809 {
3810 struct pending_signals *p_sig;
3811 p_sig = xmalloc (sizeof (*p_sig));
3812 p_sig->prev = lwp->pending_signals;
3813 p_sig->signal = lwp->resume->sig;
3814 memset (&p_sig->info, 0, sizeof (siginfo_t));
3815
3816 /* If this is the same signal we were previously stopped by,
3817 make sure to queue its siginfo. We can ignore the return
3818 value of ptrace; if it fails, we'll skip
3819 PTRACE_SETSIGINFO. */
3820 if (WIFSTOPPED (lwp->last_status)
3821 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3822 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_ARG3_TYPE) 0,
3823 &p_sig->info);
3824
3825 lwp->pending_signals = p_sig;
3826 }
3827 }
3828
3829 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3830 lwp->resume = NULL;
3831 return 0;
3832 }
3833
3834 static void
3835 linux_resume (struct thread_resume *resume_info, size_t n)
3836 {
3837 struct thread_resume_array array = { resume_info, n };
3838 struct lwp_info *need_step_over = NULL;
3839 int any_pending;
3840 int leave_all_stopped;
3841
3842 find_inferior (&all_threads, linux_set_resume_request, &array);
3843
3844 /* If there is a thread which would otherwise be resumed, which has
3845 a pending status, then don't resume any threads - we can just
3846 report the pending status. Make sure to queue any signals that
3847 would otherwise be sent. In non-stop mode, we'll apply this
3848 logic to each thread individually. We consume all pending events
3849 before considering to start a step-over (in all-stop). */
3850 any_pending = 0;
3851 if (!non_stop)
3852 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3853
3854 /* If there is a thread which would otherwise be resumed, which is
3855 stopped at a breakpoint that needs stepping over, then don't
3856 resume any threads - have it step over the breakpoint with all
3857 other threads stopped, then resume all threads again. Make sure
3858 to queue any signals that would otherwise be delivered or
3859 queued. */
3860 if (!any_pending && supports_breakpoints ())
3861 need_step_over
3862 = (struct lwp_info *) find_inferior (&all_lwps,
3863 need_step_over_p, NULL);
3864
3865 leave_all_stopped = (need_step_over != NULL || any_pending);
3866
3867 if (debug_threads)
3868 {
3869 if (need_step_over != NULL)
3870 fprintf (stderr, "Not resuming all, need step over\n");
3871 else if (any_pending)
3872 fprintf (stderr,
3873 "Not resuming, all-stop and found "
3874 "an LWP with pending status\n");
3875 else
3876 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3877 }
3878
3879 /* Even if we're leaving threads stopped, queue all signals we'd
3880 otherwise deliver. */
3881 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3882
3883 if (need_step_over)
3884 start_step_over (need_step_over);
3885 }
3886
3887 /* This function is called once per thread. We check the thread's
3888 last resume request, which will tell us whether to resume, step, or
3889 leave the thread stopped. Any signal the client requested to be
3890 delivered has already been enqueued at this point.
3891
3892 If any thread that GDB wants running is stopped at an internal
3893 breakpoint that needs stepping over, we start a step-over operation
3894 on that particular thread, and leave all others stopped. */
3895
3896 static int
3897 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3898 {
3899 struct lwp_info *lwp = (struct lwp_info *) entry;
3900 struct thread_info *thread;
3901 int step;
3902
3903 if (lwp == except)
3904 return 0;
3905
3906 if (debug_threads)
3907 fprintf (stderr,
3908 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3909
3910 if (!lwp->stopped)
3911 {
3912 if (debug_threads)
3913 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3914 return 0;
3915 }
3916
3917 thread = get_lwp_thread (lwp);
3918
3919 if (thread->last_resume_kind == resume_stop
3920 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3921 {
3922 if (debug_threads)
3923 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3924 lwpid_of (lwp));
3925 return 0;
3926 }
3927
3928 if (lwp->status_pending_p)
3929 {
3930 if (debug_threads)
3931 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3932 lwpid_of (lwp));
3933 return 0;
3934 }
3935
3936 gdb_assert (lwp->suspended >= 0);
3937
3938 if (lwp->suspended)
3939 {
3940 if (debug_threads)
3941 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3942 return 0;
3943 }
3944
3945 if (thread->last_resume_kind == resume_stop
3946 && lwp->pending_signals_to_report == NULL
3947 && lwp->collecting_fast_tracepoint == 0)
3948 {
3949 /* We haven't reported this LWP as stopped yet (otherwise, the
3950 last_status.kind check above would catch it, and we wouldn't
3951 reach here. This LWP may have been momentarily paused by a
3952 stop_all_lwps call while handling for example, another LWP's
3953 step-over. In that case, the pending expected SIGSTOP signal
3954 that was queued at vCont;t handling time will have already
3955 been consumed by wait_for_sigstop, and so we need to requeue
3956 another one here. Note that if the LWP already has a SIGSTOP
3957 pending, this is a no-op. */
3958
3959 if (debug_threads)
3960 fprintf (stderr,
3961 "Client wants LWP %ld to stop. "
3962 "Making sure it has a SIGSTOP pending\n",
3963 lwpid_of (lwp));
3964
3965 send_sigstop (lwp);
3966 }
3967
3968 step = thread->last_resume_kind == resume_step;
3969 linux_resume_one_lwp (lwp, step, 0, NULL);
3970 return 0;
3971 }
3972
3973 static int
3974 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3975 {
3976 struct lwp_info *lwp = (struct lwp_info *) entry;
3977
3978 if (lwp == except)
3979 return 0;
3980
3981 lwp->suspended--;
3982 gdb_assert (lwp->suspended >= 0);
3983
3984 return proceed_one_lwp (entry, except);
3985 }
3986
3987 /* When we finish a step-over, set threads running again. If there's
3988 another thread that may need a step-over, now's the time to start
3989 it. Eventually, we'll move all threads past their breakpoints. */
3990
3991 static void
3992 proceed_all_lwps (void)
3993 {
3994 struct lwp_info *need_step_over;
3995
3996 /* If there is a thread which would otherwise be resumed, which is
3997 stopped at a breakpoint that needs stepping over, then don't
3998 resume any threads - have it step over the breakpoint with all
3999 other threads stopped, then resume all threads again. */
4000
4001 if (supports_breakpoints ())
4002 {
4003 need_step_over
4004 = (struct lwp_info *) find_inferior (&all_lwps,
4005 need_step_over_p, NULL);
4006
4007 if (need_step_over != NULL)
4008 {
4009 if (debug_threads)
4010 fprintf (stderr, "proceed_all_lwps: found "
4011 "thread %ld needing a step-over\n",
4012 lwpid_of (need_step_over));
4013
4014 start_step_over (need_step_over);
4015 return;
4016 }
4017 }
4018
4019 if (debug_threads)
4020 fprintf (stderr, "Proceeding, no step-over needed\n");
4021
4022 find_inferior (&all_lwps, proceed_one_lwp, NULL);
4023 }
4024
4025 /* Stopped LWPs that the client wanted to be running, that don't have
4026 pending statuses, are set to run again, except for EXCEPT, if not
4027 NULL. This undoes a stop_all_lwps call. */
4028
4029 static void
4030 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4031 {
4032 if (debug_threads)
4033 {
4034 if (except)
4035 fprintf (stderr,
4036 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
4037 else
4038 fprintf (stderr,
4039 "unstopping all lwps\n");
4040 }
4041
4042 if (unsuspend)
4043 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
4044 else
4045 find_inferior (&all_lwps, proceed_one_lwp, except);
4046 }
4047
4048
4049 #ifdef HAVE_LINUX_REGSETS
4050
4051 #define use_linux_regsets 1
4052
4053 /* Returns true if REGSET has been disabled. */
4054
4055 static int
4056 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4057 {
4058 return (info->disabled_regsets != NULL
4059 && info->disabled_regsets[regset - info->regsets]);
4060 }
4061
4062 /* Disable REGSET. */
4063
4064 static void
4065 disable_regset (struct regsets_info *info, struct regset_info *regset)
4066 {
4067 int dr_offset;
4068
4069 dr_offset = regset - info->regsets;
4070 if (info->disabled_regsets == NULL)
4071 info->disabled_regsets = xcalloc (1, info->num_regsets);
4072 info->disabled_regsets[dr_offset] = 1;
4073 }
4074
4075 static int
4076 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4077 struct regcache *regcache)
4078 {
4079 struct regset_info *regset;
4080 int saw_general_regs = 0;
4081 int pid;
4082 struct iovec iov;
4083
4084 regset = regsets_info->regsets;
4085
4086 pid = lwpid_of (get_thread_lwp (current_inferior));
4087 while (regset->size >= 0)
4088 {
4089 void *buf, *data;
4090 int nt_type, res;
4091
4092 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4093 {
4094 regset ++;
4095 continue;
4096 }
4097
4098 buf = xmalloc (regset->size);
4099
4100 nt_type = regset->nt_type;
4101 if (nt_type)
4102 {
4103 iov.iov_base = buf;
4104 iov.iov_len = regset->size;
4105 data = (void *) &iov;
4106 }
4107 else
4108 data = buf;
4109
4110 #ifndef __sparc__
4111 res = ptrace (regset->get_request, pid,
4112 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4113 #else
4114 res = ptrace (regset->get_request, pid, data, nt_type);
4115 #endif
4116 if (res < 0)
4117 {
4118 if (errno == EIO)
4119 {
4120 /* If we get EIO on a regset, do not try it again for
4121 this process mode. */
4122 disable_regset (regsets_info, regset);
4123 free (buf);
4124 continue;
4125 }
4126 else
4127 {
4128 char s[256];
4129 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4130 pid);
4131 perror (s);
4132 }
4133 }
4134 else if (regset->type == GENERAL_REGS)
4135 saw_general_regs = 1;
4136 regset->store_function (regcache, buf);
4137 regset ++;
4138 free (buf);
4139 }
4140 if (saw_general_regs)
4141 return 0;
4142 else
4143 return 1;
4144 }
4145
4146 static int
4147 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4148 struct regcache *regcache)
4149 {
4150 struct regset_info *regset;
4151 int saw_general_regs = 0;
4152 int pid;
4153 struct iovec iov;
4154
4155 regset = regsets_info->regsets;
4156
4157 pid = lwpid_of (get_thread_lwp (current_inferior));
4158 while (regset->size >= 0)
4159 {
4160 void *buf, *data;
4161 int nt_type, res;
4162
4163 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4164 {
4165 regset ++;
4166 continue;
4167 }
4168
4169 buf = xmalloc (regset->size);
4170
4171 /* First fill the buffer with the current register set contents,
4172 in case there are any items in the kernel's regset that are
4173 not in gdbserver's regcache. */
4174
4175 nt_type = regset->nt_type;
4176 if (nt_type)
4177 {
4178 iov.iov_base = buf;
4179 iov.iov_len = regset->size;
4180 data = (void *) &iov;
4181 }
4182 else
4183 data = buf;
4184
4185 #ifndef __sparc__
4186 res = ptrace (regset->get_request, pid,
4187 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4188 #else
4189 res = ptrace (regset->get_request, pid, data, nt_type);
4190 #endif
4191
4192 if (res == 0)
4193 {
4194 /* Then overlay our cached registers on that. */
4195 regset->fill_function (regcache, buf);
4196
4197 /* Only now do we write the register set. */
4198 #ifndef __sparc__
4199 res = ptrace (regset->set_request, pid,
4200 (PTRACE_ARG3_TYPE) (long) nt_type, data);
4201 #else
4202 res = ptrace (regset->set_request, pid, data, nt_type);
4203 #endif
4204 }
4205
4206 if (res < 0)
4207 {
4208 if (errno == EIO)
4209 {
4210 /* If we get EIO on a regset, do not try it again for
4211 this process mode. */
4212 disable_regset (regsets_info, regset);
4213 free (buf);
4214 continue;
4215 }
4216 else if (errno == ESRCH)
4217 {
4218 /* At this point, ESRCH should mean the process is
4219 already gone, in which case we simply ignore attempts
4220 to change its registers. See also the related
4221 comment in linux_resume_one_lwp. */
4222 free (buf);
4223 return 0;
4224 }
4225 else
4226 {
4227 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4228 }
4229 }
4230 else if (regset->type == GENERAL_REGS)
4231 saw_general_regs = 1;
4232 regset ++;
4233 free (buf);
4234 }
4235 if (saw_general_regs)
4236 return 0;
4237 else
4238 return 1;
4239 }
4240
4241 #else /* !HAVE_LINUX_REGSETS */
4242
4243 #define use_linux_regsets 0
4244 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4245 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4246
4247 #endif
4248
4249 /* Return 1 if register REGNO is supported by one of the regset ptrace
4250 calls or 0 if it has to be transferred individually. */
4251
4252 static int
4253 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4254 {
4255 unsigned char mask = 1 << (regno % 8);
4256 size_t index = regno / 8;
4257
4258 return (use_linux_regsets
4259 && (regs_info->regset_bitmap == NULL
4260 || (regs_info->regset_bitmap[index] & mask) != 0));
4261 }
4262
4263 #ifdef HAVE_LINUX_USRREGS
4264
4265 int
4266 register_addr (const struct usrregs_info *usrregs, int regnum)
4267 {
4268 int addr;
4269
4270 if (regnum < 0 || regnum >= usrregs->num_regs)
4271 error ("Invalid register number %d.", regnum);
4272
4273 addr = usrregs->regmap[regnum];
4274
4275 return addr;
4276 }
4277
4278 /* Fetch one register. */
4279 static void
4280 fetch_register (const struct usrregs_info *usrregs,
4281 struct regcache *regcache, int regno)
4282 {
4283 CORE_ADDR regaddr;
4284 int i, size;
4285 char *buf;
4286 int pid;
4287
4288 if (regno >= usrregs->num_regs)
4289 return;
4290 if ((*the_low_target.cannot_fetch_register) (regno))
4291 return;
4292
4293 regaddr = register_addr (usrregs, regno);
4294 if (regaddr == -1)
4295 return;
4296
4297 size = ((register_size (regcache->tdesc, regno)
4298 + sizeof (PTRACE_XFER_TYPE) - 1)
4299 & -sizeof (PTRACE_XFER_TYPE));
4300 buf = alloca (size);
4301
4302 pid = lwpid_of (get_thread_lwp (current_inferior));
4303 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4304 {
4305 errno = 0;
4306 *(PTRACE_XFER_TYPE *) (buf + i) =
4307 ptrace (PTRACE_PEEKUSER, pid,
4308 /* Coerce to a uintptr_t first to avoid potential gcc warning
4309 of coercing an 8 byte integer to a 4 byte pointer. */
4310 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, (PTRACE_ARG4_TYPE) 0);
4311 regaddr += sizeof (PTRACE_XFER_TYPE);
4312 if (errno != 0)
4313 error ("reading register %d: %s", regno, strerror (errno));
4314 }
4315
4316 if (the_low_target.supply_ptrace_register)
4317 the_low_target.supply_ptrace_register (regcache, regno, buf);
4318 else
4319 supply_register (regcache, regno, buf);
4320 }
4321
4322 /* Store one register. */
4323 static void
4324 store_register (const struct usrregs_info *usrregs,
4325 struct regcache *regcache, int regno)
4326 {
4327 CORE_ADDR regaddr;
4328 int i, size;
4329 char *buf;
4330 int pid;
4331
4332 if (regno >= usrregs->num_regs)
4333 return;
4334 if ((*the_low_target.cannot_store_register) (regno))
4335 return;
4336
4337 regaddr = register_addr (usrregs, regno);
4338 if (regaddr == -1)
4339 return;
4340
4341 size = ((register_size (regcache->tdesc, regno)
4342 + sizeof (PTRACE_XFER_TYPE) - 1)
4343 & -sizeof (PTRACE_XFER_TYPE));
4344 buf = alloca (size);
4345 memset (buf, 0, size);
4346
4347 if (the_low_target.collect_ptrace_register)
4348 the_low_target.collect_ptrace_register (regcache, regno, buf);
4349 else
4350 collect_register (regcache, regno, buf);
4351
4352 pid = lwpid_of (get_thread_lwp (current_inferior));
4353 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4354 {
4355 errno = 0;
4356 ptrace (PTRACE_POKEUSER, pid,
4357 /* Coerce to a uintptr_t first to avoid potential gcc warning
4358 about coercing an 8 byte integer to a 4 byte pointer. */
4359 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4360 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4361 if (errno != 0)
4362 {
4363 /* At this point, ESRCH should mean the process is
4364 already gone, in which case we simply ignore attempts
4365 to change its registers. See also the related
4366 comment in linux_resume_one_lwp. */
4367 if (errno == ESRCH)
4368 return;
4369
4370 if ((*the_low_target.cannot_store_register) (regno) == 0)
4371 error ("writing register %d: %s", regno, strerror (errno));
4372 }
4373 regaddr += sizeof (PTRACE_XFER_TYPE);
4374 }
4375 }
4376
4377 /* Fetch all registers, or just one, from the child process.
4378 If REGNO is -1, do this for all registers, skipping any that are
4379 assumed to have been retrieved by regsets_fetch_inferior_registers,
4380 unless ALL is non-zero.
4381 Otherwise, REGNO specifies which register (so we can save time). */
4382 static void
4383 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4384 struct regcache *regcache, int regno, int all)
4385 {
4386 struct usrregs_info *usr = regs_info->usrregs;
4387
4388 if (regno == -1)
4389 {
4390 for (regno = 0; regno < usr->num_regs; regno++)
4391 if (all || !linux_register_in_regsets (regs_info, regno))
4392 fetch_register (usr, regcache, regno);
4393 }
4394 else
4395 fetch_register (usr, regcache, regno);
4396 }
4397
4398 /* Store our register values back into the inferior.
4399 If REGNO is -1, do this for all registers, skipping any that are
4400 assumed to have been saved by regsets_store_inferior_registers,
4401 unless ALL is non-zero.
4402 Otherwise, REGNO specifies which register (so we can save time). */
4403 static void
4404 usr_store_inferior_registers (const struct regs_info *regs_info,
4405 struct regcache *regcache, int regno, int all)
4406 {
4407 struct usrregs_info *usr = regs_info->usrregs;
4408
4409 if (regno == -1)
4410 {
4411 for (regno = 0; regno < usr->num_regs; regno++)
4412 if (all || !linux_register_in_regsets (regs_info, regno))
4413 store_register (usr, regcache, regno);
4414 }
4415 else
4416 store_register (usr, regcache, regno);
4417 }
4418
4419 #else /* !HAVE_LINUX_USRREGS */
4420
4421 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4422 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4423
4424 #endif
4425
4426
4427 void
4428 linux_fetch_registers (struct regcache *regcache, int regno)
4429 {
4430 int use_regsets;
4431 int all = 0;
4432 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4433
4434 if (regno == -1)
4435 {
4436 if (the_low_target.fetch_register != NULL
4437 && regs_info->usrregs != NULL)
4438 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4439 (*the_low_target.fetch_register) (regcache, regno);
4440
4441 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4442 if (regs_info->usrregs != NULL)
4443 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4444 }
4445 else
4446 {
4447 if (the_low_target.fetch_register != NULL
4448 && (*the_low_target.fetch_register) (regcache, regno))
4449 return;
4450
4451 use_regsets = linux_register_in_regsets (regs_info, regno);
4452 if (use_regsets)
4453 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4454 regcache);
4455 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4456 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4457 }
4458 }
4459
4460 void
4461 linux_store_registers (struct regcache *regcache, int regno)
4462 {
4463 int use_regsets;
4464 int all = 0;
4465 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4466
4467 if (regno == -1)
4468 {
4469 all = regsets_store_inferior_registers (regs_info->regsets_info,
4470 regcache);
4471 if (regs_info->usrregs != NULL)
4472 usr_store_inferior_registers (regs_info, regcache, regno, all);
4473 }
4474 else
4475 {
4476 use_regsets = linux_register_in_regsets (regs_info, regno);
4477 if (use_regsets)
4478 all = regsets_store_inferior_registers (regs_info->regsets_info,
4479 regcache);
4480 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4481 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4482 }
4483 }
4484
4485
4486 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4487 to debugger memory starting at MYADDR. */
4488
4489 static int
4490 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4491 {
4492 int pid = lwpid_of (get_thread_lwp (current_inferior));
4493 register PTRACE_XFER_TYPE *buffer;
4494 register CORE_ADDR addr;
4495 register int count;
4496 char filename[64];
4497 register int i;
4498 int ret;
4499 int fd;
4500
4501 /* Try using /proc. Don't bother for one word. */
4502 if (len >= 3 * sizeof (long))
4503 {
4504 int bytes;
4505
4506 /* We could keep this file open and cache it - possibly one per
4507 thread. That requires some juggling, but is even faster. */
4508 sprintf (filename, "/proc/%d/mem", pid);
4509 fd = open (filename, O_RDONLY | O_LARGEFILE);
4510 if (fd == -1)
4511 goto no_proc;
4512
4513 /* If pread64 is available, use it. It's faster if the kernel
4514 supports it (only one syscall), and it's 64-bit safe even on
4515 32-bit platforms (for instance, SPARC debugging a SPARC64
4516 application). */
4517 #ifdef HAVE_PREAD64
4518 bytes = pread64 (fd, myaddr, len, memaddr);
4519 #else
4520 bytes = -1;
4521 if (lseek (fd, memaddr, SEEK_SET) != -1)
4522 bytes = read (fd, myaddr, len);
4523 #endif
4524
4525 close (fd);
4526 if (bytes == len)
4527 return 0;
4528
4529 /* Some data was read, we'll try to get the rest with ptrace. */
4530 if (bytes > 0)
4531 {
4532 memaddr += bytes;
4533 myaddr += bytes;
4534 len -= bytes;
4535 }
4536 }
4537
4538 no_proc:
4539 /* Round starting address down to longword boundary. */
4540 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4541 /* Round ending address up; get number of longwords that makes. */
4542 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4543 / sizeof (PTRACE_XFER_TYPE));
4544 /* Allocate buffer of that many longwords. */
4545 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4546
4547 /* Read all the longwords */
4548 errno = 0;
4549 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4550 {
4551 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4552 about coercing an 8 byte integer to a 4 byte pointer. */
4553 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4554 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4555 (PTRACE_ARG4_TYPE) 0);
4556 if (errno)
4557 break;
4558 }
4559 ret = errno;
4560
4561 /* Copy appropriate bytes out of the buffer. */
4562 if (i > 0)
4563 {
4564 i *= sizeof (PTRACE_XFER_TYPE);
4565 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4566 memcpy (myaddr,
4567 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4568 i < len ? i : len);
4569 }
4570
4571 return ret;
4572 }
4573
4574 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4575 memory at MEMADDR. On failure (cannot write to the inferior)
4576 returns the value of errno. Always succeeds if LEN is zero. */
4577
4578 static int
4579 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4580 {
4581 register int i;
4582 /* Round starting address down to longword boundary. */
4583 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4584 /* Round ending address up; get number of longwords that makes. */
4585 register int count
4586 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4587 / sizeof (PTRACE_XFER_TYPE);
4588
4589 /* Allocate buffer of that many longwords. */
4590 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4591 alloca (count * sizeof (PTRACE_XFER_TYPE));
4592
4593 int pid = lwpid_of (get_thread_lwp (current_inferior));
4594
4595 if (len == 0)
4596 {
4597 /* Zero length write always succeeds. */
4598 return 0;
4599 }
4600
4601 if (debug_threads)
4602 {
4603 /* Dump up to four bytes. */
4604 unsigned int val = * (unsigned int *) myaddr;
4605 if (len == 1)
4606 val = val & 0xff;
4607 else if (len == 2)
4608 val = val & 0xffff;
4609 else if (len == 3)
4610 val = val & 0xffffff;
4611 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4612 val, (long)memaddr);
4613 }
4614
4615 /* Fill start and end extra bytes of buffer with existing memory data. */
4616
4617 errno = 0;
4618 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4619 about coercing an 8 byte integer to a 4 byte pointer. */
4620 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4621 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4622 (PTRACE_ARG4_TYPE) 0);
4623 if (errno)
4624 return errno;
4625
4626 if (count > 1)
4627 {
4628 errno = 0;
4629 buffer[count - 1]
4630 = ptrace (PTRACE_PEEKTEXT, pid,
4631 /* Coerce to a uintptr_t first to avoid potential gcc warning
4632 about coercing an 8 byte integer to a 4 byte pointer. */
4633 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4634 * sizeof (PTRACE_XFER_TYPE)),
4635 (PTRACE_ARG4_TYPE) 0);
4636 if (errno)
4637 return errno;
4638 }
4639
4640 /* Copy data to be written over corresponding part of buffer. */
4641
4642 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4643 myaddr, len);
4644
4645 /* Write the entire buffer. */
4646
4647 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4648 {
4649 errno = 0;
4650 ptrace (PTRACE_POKETEXT, pid,
4651 /* Coerce to a uintptr_t first to avoid potential gcc warning
4652 about coercing an 8 byte integer to a 4 byte pointer. */
4653 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4654 (PTRACE_ARG4_TYPE) buffer[i]);
4655 if (errno)
4656 return errno;
4657 }
4658
4659 return 0;
4660 }
4661
4662 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4663 static int linux_supports_tracefork_flag;
4664
4665 static void
4666 linux_enable_event_reporting (int pid)
4667 {
4668 if (!linux_supports_tracefork_flag)
4669 return;
4670
4671 ptrace (PTRACE_SETOPTIONS, pid, (PTRACE_ARG3_TYPE) 0,
4672 (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4673 }
4674
4675 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4676
4677 static int
4678 linux_tracefork_grandchild (void *arg)
4679 {
4680 _exit (0);
4681 }
4682
4683 #define STACK_SIZE 4096
4684
4685 static int
4686 linux_tracefork_child (void *arg)
4687 {
4688 ptrace (PTRACE_TRACEME, 0, (PTRACE_ARG3_TYPE) 0, (PTRACE_ARG4_TYPE) 0);
4689 kill (getpid (), SIGSTOP);
4690
4691 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4692
4693 if (fork () == 0)
4694 linux_tracefork_grandchild (NULL);
4695
4696 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4697
4698 #ifdef __ia64__
4699 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4700 CLONE_VM | SIGCHLD, NULL);
4701 #else
4702 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4703 CLONE_VM | SIGCHLD, NULL);
4704 #endif
4705
4706 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4707
4708 _exit (0);
4709 }
4710
4711 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4712 sure that we can enable the option, and that it had the desired
4713 effect. */
4714
4715 static void
4716 linux_test_for_tracefork (void)
4717 {
4718 int child_pid, ret, status;
4719 long second_pid;
4720 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4721 char *stack = xmalloc (STACK_SIZE * 4);
4722 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4723
4724 linux_supports_tracefork_flag = 0;
4725
4726 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4727
4728 child_pid = fork ();
4729 if (child_pid == 0)
4730 linux_tracefork_child (NULL);
4731
4732 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4733
4734 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4735 #ifdef __ia64__
4736 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4737 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4738 #else /* !__ia64__ */
4739 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4740 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4741 #endif /* !__ia64__ */
4742
4743 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4744
4745 if (child_pid == -1)
4746 perror_with_name ("clone");
4747
4748 ret = my_waitpid (child_pid, &status, 0);
4749 if (ret == -1)
4750 perror_with_name ("waitpid");
4751 else if (ret != child_pid)
4752 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4753 if (! WIFSTOPPED (status))
4754 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4755
4756 ret = ptrace (PTRACE_SETOPTIONS, child_pid, (PTRACE_ARG3_TYPE) 0,
4757 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4758 if (ret != 0)
4759 {
4760 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4761 (PTRACE_ARG4_TYPE) 0);
4762 if (ret != 0)
4763 {
4764 warning ("linux_test_for_tracefork: failed to kill child");
4765 return;
4766 }
4767
4768 ret = my_waitpid (child_pid, &status, 0);
4769 if (ret != child_pid)
4770 warning ("linux_test_for_tracefork: failed to wait for killed child");
4771 else if (!WIFSIGNALED (status))
4772 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4773 "killed child", status);
4774
4775 return;
4776 }
4777
4778 ret = ptrace (PTRACE_CONT, child_pid, (PTRACE_ARG3_TYPE) 0,
4779 (PTRACE_ARG4_TYPE) 0);
4780 if (ret != 0)
4781 warning ("linux_test_for_tracefork: failed to resume child");
4782
4783 ret = my_waitpid (child_pid, &status, 0);
4784
4785 if (ret == child_pid && WIFSTOPPED (status)
4786 && status >> 16 == PTRACE_EVENT_FORK)
4787 {
4788 second_pid = 0;
4789 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, (PTRACE_ARG3_TYPE) 0,
4790 &second_pid);
4791 if (ret == 0 && second_pid != 0)
4792 {
4793 int second_status;
4794
4795 linux_supports_tracefork_flag = 1;
4796 my_waitpid (second_pid, &second_status, 0);
4797 ret = ptrace (PTRACE_KILL, second_pid, (PTRACE_ARG3_TYPE) 0,
4798 (PTRACE_ARG4_TYPE) 0);
4799 if (ret != 0)
4800 warning ("linux_test_for_tracefork: failed to kill second child");
4801 my_waitpid (second_pid, &status, 0);
4802 }
4803 }
4804 else
4805 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4806 "(%d, status 0x%x)", ret, status);
4807
4808 do
4809 {
4810 ret = ptrace (PTRACE_KILL, child_pid, (PTRACE_ARG3_TYPE) 0,
4811 (PTRACE_ARG4_TYPE) 0);
4812 if (ret != 0)
4813 warning ("linux_test_for_tracefork: failed to kill child");
4814 my_waitpid (child_pid, &status, 0);
4815 }
4816 while (WIFSTOPPED (status));
4817
4818 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4819 free (stack);
4820 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4821 }
4822
4823
4824 static void
4825 linux_look_up_symbols (void)
4826 {
4827 #ifdef USE_THREAD_DB
4828 struct process_info *proc = current_process ();
4829
4830 if (proc->private->thread_db != NULL)
4831 return;
4832
4833 /* If the kernel supports tracing forks then it also supports tracing
4834 clones, and then we don't need to use the magic thread event breakpoint
4835 to learn about threads. */
4836 thread_db_init (!linux_supports_tracefork_flag);
4837 #endif
4838 }
4839
4840 static void
4841 linux_request_interrupt (void)
4842 {
4843 extern unsigned long signal_pid;
4844
4845 if (!ptid_equal (cont_thread, null_ptid)
4846 && !ptid_equal (cont_thread, minus_one_ptid))
4847 {
4848 struct lwp_info *lwp;
4849 int lwpid;
4850
4851 lwp = get_thread_lwp (current_inferior);
4852 lwpid = lwpid_of (lwp);
4853 kill_lwp (lwpid, SIGINT);
4854 }
4855 else
4856 kill_lwp (signal_pid, SIGINT);
4857 }
4858
4859 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4860 to debugger memory starting at MYADDR. */
4861
4862 static int
4863 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4864 {
4865 char filename[PATH_MAX];
4866 int fd, n;
4867 int pid = lwpid_of (get_thread_lwp (current_inferior));
4868
4869 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4870
4871 fd = open (filename, O_RDONLY);
4872 if (fd < 0)
4873 return -1;
4874
4875 if (offset != (CORE_ADDR) 0
4876 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4877 n = -1;
4878 else
4879 n = read (fd, myaddr, len);
4880
4881 close (fd);
4882
4883 return n;
4884 }
4885
4886 /* These breakpoint and watchpoint related wrapper functions simply
4887 pass on the function call if the target has registered a
4888 corresponding function. */
4889
4890 static int
4891 linux_insert_point (char type, CORE_ADDR addr, int len)
4892 {
4893 if (the_low_target.insert_point != NULL)
4894 return the_low_target.insert_point (type, addr, len);
4895 else
4896 /* Unsupported (see target.h). */
4897 return 1;
4898 }
4899
4900 static int
4901 linux_remove_point (char type, CORE_ADDR addr, int len)
4902 {
4903 if (the_low_target.remove_point != NULL)
4904 return the_low_target.remove_point (type, addr, len);
4905 else
4906 /* Unsupported (see target.h). */
4907 return 1;
4908 }
4909
4910 static int
4911 linux_stopped_by_watchpoint (void)
4912 {
4913 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4914
4915 return lwp->stopped_by_watchpoint;
4916 }
4917
4918 static CORE_ADDR
4919 linux_stopped_data_address (void)
4920 {
4921 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4922
4923 return lwp->stopped_data_address;
4924 }
4925
4926 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4927 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4928 && defined(PT_TEXT_END_ADDR)
4929
4930 /* This is only used for targets that define PT_TEXT_ADDR,
4931 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4932 the target has different ways of acquiring this information, like
4933 loadmaps. */
4934
4935 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4936 to tell gdb about. */
4937
4938 static int
4939 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4940 {
4941 unsigned long text, text_end, data;
4942 int pid = lwpid_of (get_thread_lwp (current_inferior));
4943
4944 errno = 0;
4945
4946 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_ADDR,
4947 (PTRACE_ARG4_TYPE) 0);
4948 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_TEXT_END_ADDR,
4949 (PTRACE_ARG4_TYPE) 0);
4950 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_ARG3_TYPE) PT_DATA_ADDR,
4951 (PTRACE_ARG4_TYPE) 0);
4952
4953 if (errno == 0)
4954 {
4955 /* Both text and data offsets produced at compile-time (and so
4956 used by gdb) are relative to the beginning of the program,
4957 with the data segment immediately following the text segment.
4958 However, the actual runtime layout in memory may put the data
4959 somewhere else, so when we send gdb a data base-address, we
4960 use the real data base address and subtract the compile-time
4961 data base-address from it (which is just the length of the
4962 text segment). BSS immediately follows data in both
4963 cases. */
4964 *text_p = text;
4965 *data_p = data - (text_end - text);
4966
4967 return 1;
4968 }
4969 return 0;
4970 }
4971 #endif
4972
4973 static int
4974 linux_qxfer_osdata (const char *annex,
4975 unsigned char *readbuf, unsigned const char *writebuf,
4976 CORE_ADDR offset, int len)
4977 {
4978 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4979 }
4980
4981 /* Convert a native/host siginfo object, into/from the siginfo in the
4982 layout of the inferiors' architecture. */
4983
4984 static void
4985 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4986 {
4987 int done = 0;
4988
4989 if (the_low_target.siginfo_fixup != NULL)
4990 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4991
4992 /* If there was no callback, or the callback didn't do anything,
4993 then just do a straight memcpy. */
4994 if (!done)
4995 {
4996 if (direction == 1)
4997 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4998 else
4999 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5000 }
5001 }
5002
5003 static int
5004 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5005 unsigned const char *writebuf, CORE_ADDR offset, int len)
5006 {
5007 int pid;
5008 siginfo_t siginfo;
5009 char inf_siginfo[sizeof (siginfo_t)];
5010
5011 if (current_inferior == NULL)
5012 return -1;
5013
5014 pid = lwpid_of (get_thread_lwp (current_inferior));
5015
5016 if (debug_threads)
5017 fprintf (stderr, "%s siginfo for lwp %d.\n",
5018 readbuf != NULL ? "Reading" : "Writing",
5019 pid);
5020
5021 if (offset >= sizeof (siginfo))
5022 return -1;
5023
5024 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
5025 return -1;
5026
5027 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5028 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5029 inferior with a 64-bit GDBSERVER should look the same as debugging it
5030 with a 32-bit GDBSERVER, we need to convert it. */
5031 siginfo_fixup (&siginfo, inf_siginfo, 0);
5032
5033 if (offset + len > sizeof (siginfo))
5034 len = sizeof (siginfo) - offset;
5035
5036 if (readbuf != NULL)
5037 memcpy (readbuf, inf_siginfo + offset, len);
5038 else
5039 {
5040 memcpy (inf_siginfo + offset, writebuf, len);
5041
5042 /* Convert back to ptrace layout before flushing it out. */
5043 siginfo_fixup (&siginfo, inf_siginfo, 1);
5044
5045 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_ARG3_TYPE) 0, &siginfo) != 0)
5046 return -1;
5047 }
5048
5049 return len;
5050 }
5051
5052 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5053 so we notice when children change state; as the handler for the
5054 sigsuspend in my_waitpid. */
5055
5056 static void
5057 sigchld_handler (int signo)
5058 {
5059 int old_errno = errno;
5060
5061 if (debug_threads)
5062 {
5063 do
5064 {
5065 /* fprintf is not async-signal-safe, so call write
5066 directly. */
5067 if (write (2, "sigchld_handler\n",
5068 sizeof ("sigchld_handler\n") - 1) < 0)
5069 break; /* just ignore */
5070 } while (0);
5071 }
5072
5073 if (target_is_async_p ())
5074 async_file_mark (); /* trigger a linux_wait */
5075
5076 errno = old_errno;
5077 }
5078
5079 static int
5080 linux_supports_non_stop (void)
5081 {
5082 return 1;
5083 }
5084
5085 static int
5086 linux_async (int enable)
5087 {
5088 int previous = (linux_event_pipe[0] != -1);
5089
5090 if (debug_threads)
5091 fprintf (stderr, "linux_async (%d), previous=%d\n",
5092 enable, previous);
5093
5094 if (previous != enable)
5095 {
5096 sigset_t mask;
5097 sigemptyset (&mask);
5098 sigaddset (&mask, SIGCHLD);
5099
5100 sigprocmask (SIG_BLOCK, &mask, NULL);
5101
5102 if (enable)
5103 {
5104 if (pipe (linux_event_pipe) == -1)
5105 fatal ("creating event pipe failed.");
5106
5107 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5108 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5109
5110 /* Register the event loop handler. */
5111 add_file_handler (linux_event_pipe[0],
5112 handle_target_event, NULL);
5113
5114 /* Always trigger a linux_wait. */
5115 async_file_mark ();
5116 }
5117 else
5118 {
5119 delete_file_handler (linux_event_pipe[0]);
5120
5121 close (linux_event_pipe[0]);
5122 close (linux_event_pipe[1]);
5123 linux_event_pipe[0] = -1;
5124 linux_event_pipe[1] = -1;
5125 }
5126
5127 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5128 }
5129
5130 return previous;
5131 }
5132
5133 static int
5134 linux_start_non_stop (int nonstop)
5135 {
5136 /* Register or unregister from event-loop accordingly. */
5137 linux_async (nonstop);
5138 return 0;
5139 }
5140
5141 static int
5142 linux_supports_multi_process (void)
5143 {
5144 return 1;
5145 }
5146
5147 static int
5148 linux_supports_disable_randomization (void)
5149 {
5150 #ifdef HAVE_PERSONALITY
5151 return 1;
5152 #else
5153 return 0;
5154 #endif
5155 }
5156
5157 static int
5158 linux_supports_agent (void)
5159 {
5160 return 1;
5161 }
5162
5163 static int
5164 linux_supports_range_stepping (void)
5165 {
5166 if (*the_low_target.supports_range_stepping == NULL)
5167 return 0;
5168
5169 return (*the_low_target.supports_range_stepping) ();
5170 }
5171
5172 /* Enumerate spufs IDs for process PID. */
5173 static int
5174 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5175 {
5176 int pos = 0;
5177 int written = 0;
5178 char path[128];
5179 DIR *dir;
5180 struct dirent *entry;
5181
5182 sprintf (path, "/proc/%ld/fd", pid);
5183 dir = opendir (path);
5184 if (!dir)
5185 return -1;
5186
5187 rewinddir (dir);
5188 while ((entry = readdir (dir)) != NULL)
5189 {
5190 struct stat st;
5191 struct statfs stfs;
5192 int fd;
5193
5194 fd = atoi (entry->d_name);
5195 if (!fd)
5196 continue;
5197
5198 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5199 if (stat (path, &st) != 0)
5200 continue;
5201 if (!S_ISDIR (st.st_mode))
5202 continue;
5203
5204 if (statfs (path, &stfs) != 0)
5205 continue;
5206 if (stfs.f_type != SPUFS_MAGIC)
5207 continue;
5208
5209 if (pos >= offset && pos + 4 <= offset + len)
5210 {
5211 *(unsigned int *)(buf + pos - offset) = fd;
5212 written += 4;
5213 }
5214 pos += 4;
5215 }
5216
5217 closedir (dir);
5218 return written;
5219 }
5220
5221 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5222 object type, using the /proc file system. */
5223 static int
5224 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5225 unsigned const char *writebuf,
5226 CORE_ADDR offset, int len)
5227 {
5228 long pid = lwpid_of (get_thread_lwp (current_inferior));
5229 char buf[128];
5230 int fd = 0;
5231 int ret = 0;
5232
5233 if (!writebuf && !readbuf)
5234 return -1;
5235
5236 if (!*annex)
5237 {
5238 if (!readbuf)
5239 return -1;
5240 else
5241 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5242 }
5243
5244 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5245 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5246 if (fd <= 0)
5247 return -1;
5248
5249 if (offset != 0
5250 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5251 {
5252 close (fd);
5253 return 0;
5254 }
5255
5256 if (writebuf)
5257 ret = write (fd, writebuf, (size_t) len);
5258 else
5259 ret = read (fd, readbuf, (size_t) len);
5260
5261 close (fd);
5262 return ret;
5263 }
5264
5265 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5266 struct target_loadseg
5267 {
5268 /* Core address to which the segment is mapped. */
5269 Elf32_Addr addr;
5270 /* VMA recorded in the program header. */
5271 Elf32_Addr p_vaddr;
5272 /* Size of this segment in memory. */
5273 Elf32_Word p_memsz;
5274 };
5275
5276 # if defined PT_GETDSBT
5277 struct target_loadmap
5278 {
5279 /* Protocol version number, must be zero. */
5280 Elf32_Word version;
5281 /* Pointer to the DSBT table, its size, and the DSBT index. */
5282 unsigned *dsbt_table;
5283 unsigned dsbt_size, dsbt_index;
5284 /* Number of segments in this map. */
5285 Elf32_Word nsegs;
5286 /* The actual memory map. */
5287 struct target_loadseg segs[/*nsegs*/];
5288 };
5289 # define LINUX_LOADMAP PT_GETDSBT
5290 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5291 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5292 # else
5293 struct target_loadmap
5294 {
5295 /* Protocol version number, must be zero. */
5296 Elf32_Half version;
5297 /* Number of segments in this map. */
5298 Elf32_Half nsegs;
5299 /* The actual memory map. */
5300 struct target_loadseg segs[/*nsegs*/];
5301 };
5302 # define LINUX_LOADMAP PTRACE_GETFDPIC
5303 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5304 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5305 # endif
5306
5307 static int
5308 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5309 unsigned char *myaddr, unsigned int len)
5310 {
5311 int pid = lwpid_of (get_thread_lwp (current_inferior));
5312 int addr = -1;
5313 struct target_loadmap *data = NULL;
5314 unsigned int actual_length, copy_length;
5315
5316 if (strcmp (annex, "exec") == 0)
5317 addr = (int) LINUX_LOADMAP_EXEC;
5318 else if (strcmp (annex, "interp") == 0)
5319 addr = (int) LINUX_LOADMAP_INTERP;
5320 else
5321 return -1;
5322
5323 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5324 return -1;
5325
5326 if (data == NULL)
5327 return -1;
5328
5329 actual_length = sizeof (struct target_loadmap)
5330 + sizeof (struct target_loadseg) * data->nsegs;
5331
5332 if (offset < 0 || offset > actual_length)
5333 return -1;
5334
5335 copy_length = actual_length - offset < len ? actual_length - offset : len;
5336 memcpy (myaddr, (char *) data + offset, copy_length);
5337 return copy_length;
5338 }
5339 #else
5340 # define linux_read_loadmap NULL
5341 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5342
5343 static void
5344 linux_process_qsupported (const char *query)
5345 {
5346 if (the_low_target.process_qsupported != NULL)
5347 the_low_target.process_qsupported (query);
5348 }
5349
5350 static int
5351 linux_supports_tracepoints (void)
5352 {
5353 if (*the_low_target.supports_tracepoints == NULL)
5354 return 0;
5355
5356 return (*the_low_target.supports_tracepoints) ();
5357 }
5358
5359 static CORE_ADDR
5360 linux_read_pc (struct regcache *regcache)
5361 {
5362 if (the_low_target.get_pc == NULL)
5363 return 0;
5364
5365 return (*the_low_target.get_pc) (regcache);
5366 }
5367
5368 static void
5369 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5370 {
5371 gdb_assert (the_low_target.set_pc != NULL);
5372
5373 (*the_low_target.set_pc) (regcache, pc);
5374 }
5375
5376 static int
5377 linux_thread_stopped (struct thread_info *thread)
5378 {
5379 return get_thread_lwp (thread)->stopped;
5380 }
5381
5382 /* This exposes stop-all-threads functionality to other modules. */
5383
5384 static void
5385 linux_pause_all (int freeze)
5386 {
5387 stop_all_lwps (freeze, NULL);
5388 }
5389
5390 /* This exposes unstop-all-threads functionality to other gdbserver
5391 modules. */
5392
5393 static void
5394 linux_unpause_all (int unfreeze)
5395 {
5396 unstop_all_lwps (unfreeze, NULL);
5397 }
5398
5399 static int
5400 linux_prepare_to_access_memory (void)
5401 {
5402 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5403 running LWP. */
5404 if (non_stop)
5405 linux_pause_all (1);
5406 return 0;
5407 }
5408
5409 static void
5410 linux_done_accessing_memory (void)
5411 {
5412 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5413 running LWP. */
5414 if (non_stop)
5415 linux_unpause_all (1);
5416 }
5417
5418 static int
5419 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5420 CORE_ADDR collector,
5421 CORE_ADDR lockaddr,
5422 ULONGEST orig_size,
5423 CORE_ADDR *jump_entry,
5424 CORE_ADDR *trampoline,
5425 ULONGEST *trampoline_size,
5426 unsigned char *jjump_pad_insn,
5427 ULONGEST *jjump_pad_insn_size,
5428 CORE_ADDR *adjusted_insn_addr,
5429 CORE_ADDR *adjusted_insn_addr_end,
5430 char *err)
5431 {
5432 return (*the_low_target.install_fast_tracepoint_jump_pad)
5433 (tpoint, tpaddr, collector, lockaddr, orig_size,
5434 jump_entry, trampoline, trampoline_size,
5435 jjump_pad_insn, jjump_pad_insn_size,
5436 adjusted_insn_addr, adjusted_insn_addr_end,
5437 err);
5438 }
5439
5440 static struct emit_ops *
5441 linux_emit_ops (void)
5442 {
5443 if (the_low_target.emit_ops != NULL)
5444 return (*the_low_target.emit_ops) ();
5445 else
5446 return NULL;
5447 }
5448
5449 static int
5450 linux_get_min_fast_tracepoint_insn_len (void)
5451 {
5452 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5453 }
5454
5455 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5456
5457 static int
5458 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5459 CORE_ADDR *phdr_memaddr, int *num_phdr)
5460 {
5461 char filename[PATH_MAX];
5462 int fd;
5463 const int auxv_size = is_elf64
5464 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5465 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5466
5467 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5468
5469 fd = open (filename, O_RDONLY);
5470 if (fd < 0)
5471 return 1;
5472
5473 *phdr_memaddr = 0;
5474 *num_phdr = 0;
5475 while (read (fd, buf, auxv_size) == auxv_size
5476 && (*phdr_memaddr == 0 || *num_phdr == 0))
5477 {
5478 if (is_elf64)
5479 {
5480 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5481
5482 switch (aux->a_type)
5483 {
5484 case AT_PHDR:
5485 *phdr_memaddr = aux->a_un.a_val;
5486 break;
5487 case AT_PHNUM:
5488 *num_phdr = aux->a_un.a_val;
5489 break;
5490 }
5491 }
5492 else
5493 {
5494 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5495
5496 switch (aux->a_type)
5497 {
5498 case AT_PHDR:
5499 *phdr_memaddr = aux->a_un.a_val;
5500 break;
5501 case AT_PHNUM:
5502 *num_phdr = aux->a_un.a_val;
5503 break;
5504 }
5505 }
5506 }
5507
5508 close (fd);
5509
5510 if (*phdr_memaddr == 0 || *num_phdr == 0)
5511 {
5512 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5513 "phdr_memaddr = %ld, phdr_num = %d",
5514 (long) *phdr_memaddr, *num_phdr);
5515 return 2;
5516 }
5517
5518 return 0;
5519 }
5520
5521 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5522
5523 static CORE_ADDR
5524 get_dynamic (const int pid, const int is_elf64)
5525 {
5526 CORE_ADDR phdr_memaddr, relocation;
5527 int num_phdr, i;
5528 unsigned char *phdr_buf;
5529 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5530
5531 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5532 return 0;
5533
5534 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5535 phdr_buf = alloca (num_phdr * phdr_size);
5536
5537 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5538 return 0;
5539
5540 /* Compute relocation: it is expected to be 0 for "regular" executables,
5541 non-zero for PIE ones. */
5542 relocation = -1;
5543 for (i = 0; relocation == -1 && i < num_phdr; i++)
5544 if (is_elf64)
5545 {
5546 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5547
5548 if (p->p_type == PT_PHDR)
5549 relocation = phdr_memaddr - p->p_vaddr;
5550 }
5551 else
5552 {
5553 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5554
5555 if (p->p_type == PT_PHDR)
5556 relocation = phdr_memaddr - p->p_vaddr;
5557 }
5558
5559 if (relocation == -1)
5560 {
5561 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5562 any real world executables, including PIE executables, have always
5563 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5564 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5565 or present DT_DEBUG anyway (fpc binaries are statically linked).
5566
5567 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5568
5569 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5570
5571 return 0;
5572 }
5573
5574 for (i = 0; i < num_phdr; i++)
5575 {
5576 if (is_elf64)
5577 {
5578 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5579
5580 if (p->p_type == PT_DYNAMIC)
5581 return p->p_vaddr + relocation;
5582 }
5583 else
5584 {
5585 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5586
5587 if (p->p_type == PT_DYNAMIC)
5588 return p->p_vaddr + relocation;
5589 }
5590 }
5591
5592 return 0;
5593 }
5594
5595 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5596 can be 0 if the inferior does not yet have the library list initialized.
5597 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5598 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5599
5600 static CORE_ADDR
5601 get_r_debug (const int pid, const int is_elf64)
5602 {
5603 CORE_ADDR dynamic_memaddr;
5604 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5605 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5606 CORE_ADDR map = -1;
5607
5608 dynamic_memaddr = get_dynamic (pid, is_elf64);
5609 if (dynamic_memaddr == 0)
5610 return map;
5611
5612 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5613 {
5614 if (is_elf64)
5615 {
5616 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5617 #ifdef DT_MIPS_RLD_MAP
5618 union
5619 {
5620 Elf64_Xword map;
5621 unsigned char buf[sizeof (Elf64_Xword)];
5622 }
5623 rld_map;
5624
5625 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5626 {
5627 if (linux_read_memory (dyn->d_un.d_val,
5628 rld_map.buf, sizeof (rld_map.buf)) == 0)
5629 return rld_map.map;
5630 else
5631 break;
5632 }
5633 #endif /* DT_MIPS_RLD_MAP */
5634
5635 if (dyn->d_tag == DT_DEBUG && map == -1)
5636 map = dyn->d_un.d_val;
5637
5638 if (dyn->d_tag == DT_NULL)
5639 break;
5640 }
5641 else
5642 {
5643 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5644 #ifdef DT_MIPS_RLD_MAP
5645 union
5646 {
5647 Elf32_Word map;
5648 unsigned char buf[sizeof (Elf32_Word)];
5649 }
5650 rld_map;
5651
5652 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5653 {
5654 if (linux_read_memory (dyn->d_un.d_val,
5655 rld_map.buf, sizeof (rld_map.buf)) == 0)
5656 return rld_map.map;
5657 else
5658 break;
5659 }
5660 #endif /* DT_MIPS_RLD_MAP */
5661
5662 if (dyn->d_tag == DT_DEBUG && map == -1)
5663 map = dyn->d_un.d_val;
5664
5665 if (dyn->d_tag == DT_NULL)
5666 break;
5667 }
5668
5669 dynamic_memaddr += dyn_size;
5670 }
5671
5672 return map;
5673 }
5674
5675 /* Read one pointer from MEMADDR in the inferior. */
5676
5677 static int
5678 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5679 {
5680 int ret;
5681
5682 /* Go through a union so this works on either big or little endian
5683 hosts, when the inferior's pointer size is smaller than the size
5684 of CORE_ADDR. It is assumed the inferior's endianness is the
5685 same of the superior's. */
5686 union
5687 {
5688 CORE_ADDR core_addr;
5689 unsigned int ui;
5690 unsigned char uc;
5691 } addr;
5692
5693 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5694 if (ret == 0)
5695 {
5696 if (ptr_size == sizeof (CORE_ADDR))
5697 *ptr = addr.core_addr;
5698 else if (ptr_size == sizeof (unsigned int))
5699 *ptr = addr.ui;
5700 else
5701 gdb_assert_not_reached ("unhandled pointer size");
5702 }
5703 return ret;
5704 }
5705
5706 struct link_map_offsets
5707 {
5708 /* Offset and size of r_debug.r_version. */
5709 int r_version_offset;
5710
5711 /* Offset and size of r_debug.r_map. */
5712 int r_map_offset;
5713
5714 /* Offset to l_addr field in struct link_map. */
5715 int l_addr_offset;
5716
5717 /* Offset to l_name field in struct link_map. */
5718 int l_name_offset;
5719
5720 /* Offset to l_ld field in struct link_map. */
5721 int l_ld_offset;
5722
5723 /* Offset to l_next field in struct link_map. */
5724 int l_next_offset;
5725
5726 /* Offset to l_prev field in struct link_map. */
5727 int l_prev_offset;
5728 };
5729
5730 /* Construct qXfer:libraries-svr4:read reply. */
5731
5732 static int
5733 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5734 unsigned const char *writebuf,
5735 CORE_ADDR offset, int len)
5736 {
5737 char *document;
5738 unsigned document_len;
5739 struct process_info_private *const priv = current_process ()->private;
5740 char filename[PATH_MAX];
5741 int pid, is_elf64;
5742
5743 static const struct link_map_offsets lmo_32bit_offsets =
5744 {
5745 0, /* r_version offset. */
5746 4, /* r_debug.r_map offset. */
5747 0, /* l_addr offset in link_map. */
5748 4, /* l_name offset in link_map. */
5749 8, /* l_ld offset in link_map. */
5750 12, /* l_next offset in link_map. */
5751 16 /* l_prev offset in link_map. */
5752 };
5753
5754 static const struct link_map_offsets lmo_64bit_offsets =
5755 {
5756 0, /* r_version offset. */
5757 8, /* r_debug.r_map offset. */
5758 0, /* l_addr offset in link_map. */
5759 8, /* l_name offset in link_map. */
5760 16, /* l_ld offset in link_map. */
5761 24, /* l_next offset in link_map. */
5762 32 /* l_prev offset in link_map. */
5763 };
5764 const struct link_map_offsets *lmo;
5765 unsigned int machine;
5766 int ptr_size;
5767 CORE_ADDR lm_addr = 0, lm_prev = 0;
5768 int allocated = 1024;
5769 char *p;
5770 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5771 int header_done = 0;
5772
5773 if (writebuf != NULL)
5774 return -2;
5775 if (readbuf == NULL)
5776 return -1;
5777
5778 pid = lwpid_of (get_thread_lwp (current_inferior));
5779 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5780 is_elf64 = elf_64_file_p (filename, &machine);
5781 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5782 ptr_size = is_elf64 ? 8 : 4;
5783
5784 while (annex[0] != '\0')
5785 {
5786 const char *sep;
5787 CORE_ADDR *addrp;
5788 int len;
5789
5790 sep = strchr (annex, '=');
5791 if (sep == NULL)
5792 break;
5793
5794 len = sep - annex;
5795 if (len == 5 && strncmp (annex, "start", 5) == 0)
5796 addrp = &lm_addr;
5797 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5798 addrp = &lm_prev;
5799 else
5800 {
5801 annex = strchr (sep, ';');
5802 if (annex == NULL)
5803 break;
5804 annex++;
5805 continue;
5806 }
5807
5808 annex = decode_address_to_semicolon (addrp, sep + 1);
5809 }
5810
5811 if (lm_addr == 0)
5812 {
5813 int r_version = 0;
5814
5815 if (priv->r_debug == 0)
5816 priv->r_debug = get_r_debug (pid, is_elf64);
5817
5818 /* We failed to find DT_DEBUG. Such situation will not change
5819 for this inferior - do not retry it. Report it to GDB as
5820 E01, see for the reasons at the GDB solib-svr4.c side. */
5821 if (priv->r_debug == (CORE_ADDR) -1)
5822 return -1;
5823
5824 if (priv->r_debug != 0)
5825 {
5826 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5827 (unsigned char *) &r_version,
5828 sizeof (r_version)) != 0
5829 || r_version != 1)
5830 {
5831 warning ("unexpected r_debug version %d", r_version);
5832 }
5833 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5834 &lm_addr, ptr_size) != 0)
5835 {
5836 warning ("unable to read r_map from 0x%lx",
5837 (long) priv->r_debug + lmo->r_map_offset);
5838 }
5839 }
5840 }
5841
5842 document = xmalloc (allocated);
5843 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5844 p = document + strlen (document);
5845
5846 while (lm_addr
5847 && read_one_ptr (lm_addr + lmo->l_name_offset,
5848 &l_name, ptr_size) == 0
5849 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5850 &l_addr, ptr_size) == 0
5851 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5852 &l_ld, ptr_size) == 0
5853 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5854 &l_prev, ptr_size) == 0
5855 && read_one_ptr (lm_addr + lmo->l_next_offset,
5856 &l_next, ptr_size) == 0)
5857 {
5858 unsigned char libname[PATH_MAX];
5859
5860 if (lm_prev != l_prev)
5861 {
5862 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5863 (long) lm_prev, (long) l_prev);
5864 break;
5865 }
5866
5867 /* Ignore the first entry even if it has valid name as the first entry
5868 corresponds to the main executable. The first entry should not be
5869 skipped if the dynamic loader was loaded late by a static executable
5870 (see solib-svr4.c parameter ignore_first). But in such case the main
5871 executable does not have PT_DYNAMIC present and this function already
5872 exited above due to failed get_r_debug. */
5873 if (lm_prev == 0)
5874 {
5875 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5876 p = p + strlen (p);
5877 }
5878 else
5879 {
5880 /* Not checking for error because reading may stop before
5881 we've got PATH_MAX worth of characters. */
5882 libname[0] = '\0';
5883 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5884 libname[sizeof (libname) - 1] = '\0';
5885 if (libname[0] != '\0')
5886 {
5887 /* 6x the size for xml_escape_text below. */
5888 size_t len = 6 * strlen ((char *) libname);
5889 char *name;
5890
5891 if (!header_done)
5892 {
5893 /* Terminate `<library-list-svr4'. */
5894 *p++ = '>';
5895 header_done = 1;
5896 }
5897
5898 while (allocated < p - document + len + 200)
5899 {
5900 /* Expand to guarantee sufficient storage. */
5901 uintptr_t document_len = p - document;
5902
5903 document = xrealloc (document, 2 * allocated);
5904 allocated *= 2;
5905 p = document + document_len;
5906 }
5907
5908 name = xml_escape_text ((char *) libname);
5909 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5910 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5911 name, (unsigned long) lm_addr,
5912 (unsigned long) l_addr, (unsigned long) l_ld);
5913 free (name);
5914 }
5915 }
5916
5917 lm_prev = lm_addr;
5918 lm_addr = l_next;
5919 }
5920
5921 if (!header_done)
5922 {
5923 /* Empty list; terminate `<library-list-svr4'. */
5924 strcpy (p, "/>");
5925 }
5926 else
5927 strcpy (p, "</library-list-svr4>");
5928
5929 document_len = strlen (document);
5930 if (offset < document_len)
5931 document_len -= offset;
5932 else
5933 document_len = 0;
5934 if (len > document_len)
5935 len = document_len;
5936
5937 memcpy (readbuf, document + offset, len);
5938 xfree (document);
5939
5940 return len;
5941 }
5942
5943 #ifdef HAVE_LINUX_BTRACE
5944
5945 /* Enable branch tracing. */
5946
5947 static struct btrace_target_info *
5948 linux_low_enable_btrace (ptid_t ptid)
5949 {
5950 struct btrace_target_info *tinfo;
5951
5952 tinfo = linux_enable_btrace (ptid);
5953
5954 if (tinfo != NULL)
5955 {
5956 struct thread_info *thread = find_thread_ptid (ptid);
5957 struct regcache *regcache = get_thread_regcache (thread, 0);
5958
5959 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5960 }
5961
5962 return tinfo;
5963 }
5964
5965 /* Read branch trace data as btrace xml document. */
5966
5967 static void
5968 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5969 int type)
5970 {
5971 VEC (btrace_block_s) *btrace;
5972 struct btrace_block *block;
5973 int i;
5974
5975 btrace = linux_read_btrace (tinfo, type);
5976
5977 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5978 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5979
5980 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5981 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5982 paddress (block->begin), paddress (block->end));
5983
5984 buffer_grow_str (buffer, "</btrace>\n");
5985
5986 VEC_free (btrace_block_s, btrace);
5987 }
5988 #endif /* HAVE_LINUX_BTRACE */
5989
5990 static struct target_ops linux_target_ops = {
5991 linux_create_inferior,
5992 linux_attach,
5993 linux_kill,
5994 linux_detach,
5995 linux_mourn,
5996 linux_join,
5997 linux_thread_alive,
5998 linux_resume,
5999 linux_wait,
6000 linux_fetch_registers,
6001 linux_store_registers,
6002 linux_prepare_to_access_memory,
6003 linux_done_accessing_memory,
6004 linux_read_memory,
6005 linux_write_memory,
6006 linux_look_up_symbols,
6007 linux_request_interrupt,
6008 linux_read_auxv,
6009 linux_insert_point,
6010 linux_remove_point,
6011 linux_stopped_by_watchpoint,
6012 linux_stopped_data_address,
6013 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6014 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6015 && defined(PT_TEXT_END_ADDR)
6016 linux_read_offsets,
6017 #else
6018 NULL,
6019 #endif
6020 #ifdef USE_THREAD_DB
6021 thread_db_get_tls_address,
6022 #else
6023 NULL,
6024 #endif
6025 linux_qxfer_spu,
6026 hostio_last_error_from_errno,
6027 linux_qxfer_osdata,
6028 linux_xfer_siginfo,
6029 linux_supports_non_stop,
6030 linux_async,
6031 linux_start_non_stop,
6032 linux_supports_multi_process,
6033 #ifdef USE_THREAD_DB
6034 thread_db_handle_monitor_command,
6035 #else
6036 NULL,
6037 #endif
6038 linux_common_core_of_thread,
6039 linux_read_loadmap,
6040 linux_process_qsupported,
6041 linux_supports_tracepoints,
6042 linux_read_pc,
6043 linux_write_pc,
6044 linux_thread_stopped,
6045 NULL,
6046 linux_pause_all,
6047 linux_unpause_all,
6048 linux_cancel_breakpoints,
6049 linux_stabilize_threads,
6050 linux_install_fast_tracepoint_jump_pad,
6051 linux_emit_ops,
6052 linux_supports_disable_randomization,
6053 linux_get_min_fast_tracepoint_insn_len,
6054 linux_qxfer_libraries_svr4,
6055 linux_supports_agent,
6056 #ifdef HAVE_LINUX_BTRACE
6057 linux_supports_btrace,
6058 linux_low_enable_btrace,
6059 linux_disable_btrace,
6060 linux_low_read_btrace,
6061 #else
6062 NULL,
6063 NULL,
6064 NULL,
6065 NULL,
6066 #endif
6067 linux_supports_range_stepping,
6068 };
6069
6070 static void
6071 linux_init_signals ()
6072 {
6073 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6074 to find what the cancel signal actually is. */
6075 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6076 signal (__SIGRTMIN+1, SIG_IGN);
6077 #endif
6078 }
6079
6080 #ifdef HAVE_LINUX_REGSETS
6081 void
6082 initialize_regsets_info (struct regsets_info *info)
6083 {
6084 for (info->num_regsets = 0;
6085 info->regsets[info->num_regsets].size >= 0;
6086 info->num_regsets++)
6087 ;
6088 }
6089 #endif
6090
6091 void
6092 initialize_low (void)
6093 {
6094 struct sigaction sigchld_action;
6095 memset (&sigchld_action, 0, sizeof (sigchld_action));
6096 set_target_ops (&linux_target_ops);
6097 set_breakpoint_data (the_low_target.breakpoint,
6098 the_low_target.breakpoint_len);
6099 linux_init_signals ();
6100 linux_test_for_tracefork ();
6101 linux_ptrace_init_warnings ();
6102
6103 sigchld_action.sa_handler = sigchld_handler;
6104 sigemptyset (&sigchld_action.sa_mask);
6105 sigchld_action.sa_flags = SA_RESTART;
6106 sigaction (SIGCHLD, &sigchld_action, NULL);
6107
6108 initialize_low_arch ();
6109 }
This page took 0.167177 seconds and 4 git commands to generate.