New gdbserver option --debug-format=timestamp.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "linux-ptrace.h"
30 #include "linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include "filestuff.h"
48 #include "tracepoint.h"
49 #include "hostio.h"
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "linux-btrace.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
142 representation of the thread ID.
143
144 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
145 the same as the LWP ID.
146
147 ``all_processes'' is keyed by the "overall process ID", which
148 GNU/Linux calls tgid, "thread group ID". */
149
150 struct inferior_list all_lwps;
151
152 /* A list of all unknown processes which receive stop signals. Some
153 other process will presumably claim each of these as forked
154 children momentarily. */
155
156 struct simple_pid_list
157 {
158 /* The process ID. */
159 int pid;
160
161 /* The status as reported by waitpid. */
162 int status;
163
164 /* Next in chain. */
165 struct simple_pid_list *next;
166 };
167 struct simple_pid_list *stopped_pids;
168
169 /* Trivial list manipulation functions to keep track of a list of new
170 stopped processes. */
171
172 static void
173 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
174 {
175 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
176
177 new_pid->pid = pid;
178 new_pid->status = status;
179 new_pid->next = *listp;
180 *listp = new_pid;
181 }
182
183 static int
184 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
185 {
186 struct simple_pid_list **p;
187
188 for (p = listp; *p != NULL; p = &(*p)->next)
189 if ((*p)->pid == pid)
190 {
191 struct simple_pid_list *next = (*p)->next;
192
193 *statusp = (*p)->status;
194 xfree (*p);
195 *p = next;
196 return 1;
197 }
198 return 0;
199 }
200
201 enum stopping_threads_kind
202 {
203 /* Not stopping threads presently. */
204 NOT_STOPPING_THREADS,
205
206 /* Stopping threads. */
207 STOPPING_THREADS,
208
209 /* Stopping and suspending threads. */
210 STOPPING_AND_SUSPENDING_THREADS
211 };
212
213 /* This is set while stop_all_lwps is in effect. */
214 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
215
216 /* FIXME make into a target method? */
217 int using_threads = 1;
218
219 /* True if we're presently stabilizing threads (moving them out of
220 jump pads). */
221 static int stabilizing_threads;
222
223 static void linux_resume_one_lwp (struct lwp_info *lwp,
224 int step, int signal, siginfo_t *info);
225 static void linux_resume (struct thread_resume *resume_info, size_t n);
226 static void stop_all_lwps (int suspend, struct lwp_info *except);
227 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
228 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
229 static void *add_lwp (ptid_t ptid);
230 static int linux_stopped_by_watchpoint (void);
231 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
232 static void proceed_all_lwps (void);
233 static int finish_step_over (struct lwp_info *lwp);
234 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
235 static int kill_lwp (unsigned long lwpid, int signo);
236
237 /* True if the low target can hardware single-step. Such targets
238 don't need a BREAKPOINT_REINSERT_ADDR callback. */
239
240 static int
241 can_hardware_single_step (void)
242 {
243 return (the_low_target.breakpoint_reinsert_addr == NULL);
244 }
245
246 /* True if the low target supports memory breakpoints. If so, we'll
247 have a GET_PC implementation. */
248
249 static int
250 supports_breakpoints (void)
251 {
252 return (the_low_target.get_pc != NULL);
253 }
254
255 /* Returns true if this target can support fast tracepoints. This
256 does not mean that the in-process agent has been loaded in the
257 inferior. */
258
259 static int
260 supports_fast_tracepoints (void)
261 {
262 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
263 }
264
265 /* True if LWP is stopped in its stepping range. */
266
267 static int
268 lwp_in_step_range (struct lwp_info *lwp)
269 {
270 CORE_ADDR pc = lwp->stop_pc;
271
272 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
273 }
274
275 struct pending_signals
276 {
277 int signal;
278 siginfo_t info;
279 struct pending_signals *prev;
280 };
281
282 /* The read/write ends of the pipe registered as waitable file in the
283 event loop. */
284 static int linux_event_pipe[2] = { -1, -1 };
285
286 /* True if we're currently in async mode. */
287 #define target_is_async_p() (linux_event_pipe[0] != -1)
288
289 static void send_sigstop (struct lwp_info *lwp);
290 static void wait_for_sigstop (struct inferior_list_entry *entry);
291
292 /* Return non-zero if HEADER is a 64-bit ELF file. */
293
294 static int
295 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
296 {
297 if (header->e_ident[EI_MAG0] == ELFMAG0
298 && header->e_ident[EI_MAG1] == ELFMAG1
299 && header->e_ident[EI_MAG2] == ELFMAG2
300 && header->e_ident[EI_MAG3] == ELFMAG3)
301 {
302 *machine = header->e_machine;
303 return header->e_ident[EI_CLASS] == ELFCLASS64;
304
305 }
306 *machine = EM_NONE;
307 return -1;
308 }
309
310 /* Return non-zero if FILE is a 64-bit ELF file,
311 zero if the file is not a 64-bit ELF file,
312 and -1 if the file is not accessible or doesn't exist. */
313
314 static int
315 elf_64_file_p (const char *file, unsigned int *machine)
316 {
317 Elf64_Ehdr header;
318 int fd;
319
320 fd = open (file, O_RDONLY);
321 if (fd < 0)
322 return -1;
323
324 if (read (fd, &header, sizeof (header)) != sizeof (header))
325 {
326 close (fd);
327 return 0;
328 }
329 close (fd);
330
331 return elf_64_header_p (&header, machine);
332 }
333
334 /* Accepts an integer PID; Returns true if the executable PID is
335 running is a 64-bit ELF file.. */
336
337 int
338 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
339 {
340 char file[PATH_MAX];
341
342 sprintf (file, "/proc/%d/exe", pid);
343 return elf_64_file_p (file, machine);
344 }
345
346 static void
347 delete_lwp (struct lwp_info *lwp)
348 {
349 remove_thread (get_lwp_thread (lwp));
350 remove_inferior (&all_lwps, &lwp->head);
351 free (lwp->arch_private);
352 free (lwp);
353 }
354
355 /* Add a process to the common process list, and set its private
356 data. */
357
358 static struct process_info *
359 linux_add_process (int pid, int attached)
360 {
361 struct process_info *proc;
362
363 proc = add_process (pid, attached);
364 proc->private = xcalloc (1, sizeof (*proc->private));
365
366 /* Set the arch when the first LWP stops. */
367 proc->private->new_inferior = 1;
368
369 if (the_low_target.new_process != NULL)
370 proc->private->arch_private = the_low_target.new_process ();
371
372 return proc;
373 }
374
375 /* Handle a GNU/Linux extended wait response. If we see a clone
376 event, we need to add the new LWP to our list (and not report the
377 trap to higher layers). */
378
379 static void
380 handle_extended_wait (struct lwp_info *event_child, int wstat)
381 {
382 int event = wstat >> 16;
383 struct lwp_info *new_lwp;
384
385 if (event == PTRACE_EVENT_CLONE)
386 {
387 ptid_t ptid;
388 unsigned long new_pid;
389 int ret, status;
390
391 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
392 &new_pid);
393
394 /* If we haven't already seen the new PID stop, wait for it now. */
395 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
396 {
397 /* The new child has a pending SIGSTOP. We can't affect it until it
398 hits the SIGSTOP, but we're already attached. */
399
400 ret = my_waitpid (new_pid, &status, __WALL);
401
402 if (ret == -1)
403 perror_with_name ("waiting for new child");
404 else if (ret != new_pid)
405 warning ("wait returned unexpected PID %d", ret);
406 else if (!WIFSTOPPED (status))
407 warning ("wait returned unexpected status 0x%x", status);
408 }
409
410 ptid = ptid_build (pid_of (event_child), new_pid, 0);
411 new_lwp = (struct lwp_info *) add_lwp (ptid);
412 add_thread (ptid, new_lwp);
413
414 /* Either we're going to immediately resume the new thread
415 or leave it stopped. linux_resume_one_lwp is a nop if it
416 thinks the thread is currently running, so set this first
417 before calling linux_resume_one_lwp. */
418 new_lwp->stopped = 1;
419
420 /* If we're suspending all threads, leave this one suspended
421 too. */
422 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
423 new_lwp->suspended = 1;
424
425 /* Normally we will get the pending SIGSTOP. But in some cases
426 we might get another signal delivered to the group first.
427 If we do get another signal, be sure not to lose it. */
428 if (WSTOPSIG (status) == SIGSTOP)
429 {
430 if (stopping_threads != NOT_STOPPING_THREADS)
431 new_lwp->stop_pc = get_stop_pc (new_lwp);
432 else
433 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
434 }
435 else
436 {
437 new_lwp->stop_expected = 1;
438
439 if (stopping_threads != NOT_STOPPING_THREADS)
440 {
441 new_lwp->stop_pc = get_stop_pc (new_lwp);
442 new_lwp->status_pending_p = 1;
443 new_lwp->status_pending = status;
444 }
445 else
446 /* Pass the signal on. This is what GDB does - except
447 shouldn't we really report it instead? */
448 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
449 }
450
451 /* Always resume the current thread. If we are stopping
452 threads, it will have a pending SIGSTOP; we may as well
453 collect it now. */
454 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
455 }
456 }
457
458 /* Return the PC as read from the regcache of LWP, without any
459 adjustment. */
460
461 static CORE_ADDR
462 get_pc (struct lwp_info *lwp)
463 {
464 struct thread_info *saved_inferior;
465 struct regcache *regcache;
466 CORE_ADDR pc;
467
468 if (the_low_target.get_pc == NULL)
469 return 0;
470
471 saved_inferior = current_inferior;
472 current_inferior = get_lwp_thread (lwp);
473
474 regcache = get_thread_regcache (current_inferior, 1);
475 pc = (*the_low_target.get_pc) (regcache);
476
477 if (debug_threads)
478 debug_printf ("pc is 0x%lx\n", (long) pc);
479
480 current_inferior = saved_inferior;
481 return pc;
482 }
483
484 /* This function should only be called if LWP got a SIGTRAP.
485 The SIGTRAP could mean several things.
486
487 On i386, where decr_pc_after_break is non-zero:
488 If we were single-stepping this process using PTRACE_SINGLESTEP,
489 we will get only the one SIGTRAP (even if the instruction we
490 stepped over was a breakpoint). The value of $eip will be the
491 next instruction.
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If we cancel the SIGTRAP, we
497 must resume at the decremented PC.
498
499 (Presumably, not yet tested) On a non-decr_pc_after_break machine
500 with hardware or kernel single-step:
501 If we single-step over a breakpoint instruction, our PC will
502 point at the following instruction. If we continue and hit a
503 breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506 static CORE_ADDR
507 get_stop_pc (struct lwp_info *lwp)
508 {
509 CORE_ADDR stop_pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 stop_pc = get_pc (lwp);
515
516 if (WSTOPSIG (lwp->last_status) == SIGTRAP
517 && !lwp->stepping
518 && !lwp->stopped_by_watchpoint
519 && lwp->last_status >> 16 == 0)
520 stop_pc -= the_low_target.decr_pc_after_break;
521
522 if (debug_threads)
523 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
524
525 return stop_pc;
526 }
527
528 static void *
529 add_lwp (ptid_t ptid)
530 {
531 struct lwp_info *lwp;
532
533 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
534 memset (lwp, 0, sizeof (*lwp));
535
536 lwp->head.id = ptid;
537
538 if (the_low_target.new_thread != NULL)
539 lwp->arch_private = the_low_target.new_thread ();
540
541 add_inferior_to_list (&all_lwps, &lwp->head);
542
543 return lwp;
544 }
545
546 /* Start an inferior process and returns its pid.
547 ALLARGS is a vector of program-name and args. */
548
549 static int
550 linux_create_inferior (char *program, char **allargs)
551 {
552 #ifdef HAVE_PERSONALITY
553 int personality_orig = 0, personality_set = 0;
554 #endif
555 struct lwp_info *new_lwp;
556 int pid;
557 ptid_t ptid;
558
559 #ifdef HAVE_PERSONALITY
560 if (disable_randomization)
561 {
562 errno = 0;
563 personality_orig = personality (0xffffffff);
564 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
565 {
566 personality_set = 1;
567 personality (personality_orig | ADDR_NO_RANDOMIZE);
568 }
569 if (errno != 0 || (personality_set
570 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
571 warning ("Error disabling address space randomization: %s",
572 strerror (errno));
573 }
574 #endif
575
576 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
577 pid = vfork ();
578 #else
579 pid = fork ();
580 #endif
581 if (pid < 0)
582 perror_with_name ("fork");
583
584 if (pid == 0)
585 {
586 close_most_fds ();
587 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
588
589 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
590 signal (__SIGRTMIN + 1, SIG_DFL);
591 #endif
592
593 setpgid (0, 0);
594
595 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
596 stdout to stderr so that inferior i/o doesn't corrupt the connection.
597 Also, redirect stdin to /dev/null. */
598 if (remote_connection_is_stdio ())
599 {
600 close (0);
601 open ("/dev/null", O_RDONLY);
602 dup2 (2, 1);
603 if (write (2, "stdin/stdout redirected\n",
604 sizeof ("stdin/stdout redirected\n") - 1) < 0)
605 {
606 /* Errors ignored. */;
607 }
608 }
609
610 execv (program, allargs);
611 if (errno == ENOENT)
612 execvp (program, allargs);
613
614 fprintf (stderr, "Cannot exec %s: %s.\n", program,
615 strerror (errno));
616 fflush (stderr);
617 _exit (0177);
618 }
619
620 #ifdef HAVE_PERSONALITY
621 if (personality_set)
622 {
623 errno = 0;
624 personality (personality_orig);
625 if (errno != 0)
626 warning ("Error restoring address space randomization: %s",
627 strerror (errno));
628 }
629 #endif
630
631 linux_add_process (pid, 0);
632
633 ptid = ptid_build (pid, pid, 0);
634 new_lwp = add_lwp (ptid);
635 add_thread (ptid, new_lwp);
636 new_lwp->must_set_ptrace_flags = 1;
637
638 return pid;
639 }
640
641 /* Attach to an inferior process. */
642
643 static void
644 linux_attach_lwp_1 (unsigned long lwpid, int initial)
645 {
646 ptid_t ptid;
647 struct lwp_info *new_lwp;
648
649 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
650 != 0)
651 {
652 struct buffer buffer;
653
654 if (!initial)
655 {
656 /* If we fail to attach to an LWP, just warn. */
657 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
658 strerror (errno), errno);
659 fflush (stderr);
660 return;
661 }
662
663 /* If we fail to attach to a process, report an error. */
664 buffer_init (&buffer);
665 linux_ptrace_attach_warnings (lwpid, &buffer);
666 buffer_grow_str0 (&buffer, "");
667 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
668 lwpid, strerror (errno), errno);
669 }
670
671 if (initial)
672 /* If lwp is the tgid, we handle adding existing threads later.
673 Otherwise we just add lwp without bothering about any other
674 threads. */
675 ptid = ptid_build (lwpid, lwpid, 0);
676 else
677 {
678 /* Note that extracting the pid from the current inferior is
679 safe, since we're always called in the context of the same
680 process as this new thread. */
681 int pid = pid_of (get_thread_lwp (current_inferior));
682 ptid = ptid_build (pid, lwpid, 0);
683 }
684
685 new_lwp = (struct lwp_info *) add_lwp (ptid);
686 add_thread (ptid, new_lwp);
687
688 /* We need to wait for SIGSTOP before being able to make the next
689 ptrace call on this LWP. */
690 new_lwp->must_set_ptrace_flags = 1;
691
692 if (linux_proc_pid_is_stopped (lwpid))
693 {
694 if (debug_threads)
695 debug_printf ("Attached to a stopped process\n");
696
697 /* The process is definitely stopped. It is in a job control
698 stop, unless the kernel predates the TASK_STOPPED /
699 TASK_TRACED distinction, in which case it might be in a
700 ptrace stop. Make sure it is in a ptrace stop; from there we
701 can kill it, signal it, et cetera.
702
703 First make sure there is a pending SIGSTOP. Since we are
704 already attached, the process can not transition from stopped
705 to running without a PTRACE_CONT; so we know this signal will
706 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
707 probably already in the queue (unless this kernel is old
708 enough to use TASK_STOPPED for ptrace stops); but since
709 SIGSTOP is not an RT signal, it can only be queued once. */
710 kill_lwp (lwpid, SIGSTOP);
711
712 /* Finally, resume the stopped process. This will deliver the
713 SIGSTOP (or a higher priority signal, just like normal
714 PTRACE_ATTACH), which we'll catch later on. */
715 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
716 }
717
718 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
719 brings it to a halt.
720
721 There are several cases to consider here:
722
723 1) gdbserver has already attached to the process and is being notified
724 of a new thread that is being created.
725 In this case we should ignore that SIGSTOP and resume the
726 process. This is handled below by setting stop_expected = 1,
727 and the fact that add_thread sets last_resume_kind ==
728 resume_continue.
729
730 2) This is the first thread (the process thread), and we're attaching
731 to it via attach_inferior.
732 In this case we want the process thread to stop.
733 This is handled by having linux_attach set last_resume_kind ==
734 resume_stop after we return.
735
736 If the pid we are attaching to is also the tgid, we attach to and
737 stop all the existing threads. Otherwise, we attach to pid and
738 ignore any other threads in the same group as this pid.
739
740 3) GDB is connecting to gdbserver and is requesting an enumeration of all
741 existing threads.
742 In this case we want the thread to stop.
743 FIXME: This case is currently not properly handled.
744 We should wait for the SIGSTOP but don't. Things work apparently
745 because enough time passes between when we ptrace (ATTACH) and when
746 gdb makes the next ptrace call on the thread.
747
748 On the other hand, if we are currently trying to stop all threads, we
749 should treat the new thread as if we had sent it a SIGSTOP. This works
750 because we are guaranteed that the add_lwp call above added us to the
751 end of the list, and so the new thread has not yet reached
752 wait_for_sigstop (but will). */
753 new_lwp->stop_expected = 1;
754 }
755
756 void
757 linux_attach_lwp (unsigned long lwpid)
758 {
759 linux_attach_lwp_1 (lwpid, 0);
760 }
761
762 /* Attach to PID. If PID is the tgid, attach to it and all
763 of its threads. */
764
765 static int
766 linux_attach (unsigned long pid)
767 {
768 /* Attach to PID. We will check for other threads
769 soon. */
770 linux_attach_lwp_1 (pid, 1);
771 linux_add_process (pid, 1);
772
773 if (!non_stop)
774 {
775 struct thread_info *thread;
776
777 /* Don't ignore the initial SIGSTOP if we just attached to this
778 process. It will be collected by wait shortly. */
779 thread = find_thread_ptid (ptid_build (pid, pid, 0));
780 thread->last_resume_kind = resume_stop;
781 }
782
783 if (linux_proc_get_tgid (pid) == pid)
784 {
785 DIR *dir;
786 char pathname[128];
787
788 sprintf (pathname, "/proc/%ld/task", pid);
789
790 dir = opendir (pathname);
791
792 if (!dir)
793 {
794 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
795 fflush (stderr);
796 }
797 else
798 {
799 /* At this point we attached to the tgid. Scan the task for
800 existing threads. */
801 unsigned long lwp;
802 int new_threads_found;
803 int iterations = 0;
804 struct dirent *dp;
805
806 while (iterations < 2)
807 {
808 new_threads_found = 0;
809 /* Add all the other threads. While we go through the
810 threads, new threads may be spawned. Cycle through
811 the list of threads until we have done two iterations without
812 finding new threads. */
813 while ((dp = readdir (dir)) != NULL)
814 {
815 /* Fetch one lwp. */
816 lwp = strtoul (dp->d_name, NULL, 10);
817
818 /* Is this a new thread? */
819 if (lwp
820 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
821 {
822 linux_attach_lwp_1 (lwp, 0);
823 new_threads_found++;
824
825 if (debug_threads)
826 debug_printf ("Found and attached to new lwp %ld\n",
827 lwp);
828 }
829 }
830
831 if (!new_threads_found)
832 iterations++;
833 else
834 iterations = 0;
835
836 rewinddir (dir);
837 }
838 closedir (dir);
839 }
840 }
841
842 return 0;
843 }
844
845 struct counter
846 {
847 int pid;
848 int count;
849 };
850
851 static int
852 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
853 {
854 struct counter *counter = args;
855
856 if (ptid_get_pid (entry->id) == counter->pid)
857 {
858 if (++counter->count > 1)
859 return 1;
860 }
861
862 return 0;
863 }
864
865 static int
866 last_thread_of_process_p (struct thread_info *thread)
867 {
868 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
869 int pid = ptid_get_pid (ptid);
870 struct counter counter = { pid , 0 };
871
872 return (find_inferior (&all_threads,
873 second_thread_of_pid_p, &counter) == NULL);
874 }
875
876 /* Kill LWP. */
877
878 static void
879 linux_kill_one_lwp (struct lwp_info *lwp)
880 {
881 int pid = lwpid_of (lwp);
882
883 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
884 there is no signal context, and ptrace(PTRACE_KILL) (or
885 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
886 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
887 alternative is to kill with SIGKILL. We only need one SIGKILL
888 per process, not one for each thread. But since we still support
889 linuxthreads, and we also support debugging programs using raw
890 clone without CLONE_THREAD, we send one for each thread. For
891 years, we used PTRACE_KILL only, so we're being a bit paranoid
892 about some old kernels where PTRACE_KILL might work better
893 (dubious if there are any such, but that's why it's paranoia), so
894 we try SIGKILL first, PTRACE_KILL second, and so we're fine
895 everywhere. */
896
897 errno = 0;
898 kill (pid, SIGKILL);
899 if (debug_threads)
900 debug_printf ("LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (lwp)),
902 errno ? strerror (errno) : "OK");
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
906 if (debug_threads)
907 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
908 target_pid_to_str (ptid_of (lwp)),
909 errno ? strerror (errno) : "OK");
910 }
911
912 /* Callback for `find_inferior'. Kills an lwp of a given process,
913 except the leader. */
914
915 static int
916 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
917 {
918 struct thread_info *thread = (struct thread_info *) entry;
919 struct lwp_info *lwp = get_thread_lwp (thread);
920 int wstat;
921 int pid = * (int *) args;
922
923 if (ptid_get_pid (entry->id) != pid)
924 return 0;
925
926 /* We avoid killing the first thread here, because of a Linux kernel (at
927 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
928 the children get a chance to be reaped, it will remain a zombie
929 forever. */
930
931 if (lwpid_of (lwp) == pid)
932 {
933 if (debug_threads)
934 debug_printf ("lkop: is last of process %s\n",
935 target_pid_to_str (entry->id));
936 return 0;
937 }
938
939 do
940 {
941 linux_kill_one_lwp (lwp);
942
943 /* Make sure it died. The loop is most likely unnecessary. */
944 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
945 } while (pid > 0 && WIFSTOPPED (wstat));
946
947 return 0;
948 }
949
950 static int
951 linux_kill (int pid)
952 {
953 struct process_info *process;
954 struct lwp_info *lwp;
955 int wstat;
956 int lwpid;
957
958 process = find_process_pid (pid);
959 if (process == NULL)
960 return -1;
961
962 /* If we're killing a running inferior, make sure it is stopped
963 first, as PTRACE_KILL will not work otherwise. */
964 stop_all_lwps (0, NULL);
965
966 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
967
968 /* See the comment in linux_kill_one_lwp. We did not kill the first
969 thread in the list, so do so now. */
970 lwp = find_lwp_pid (pid_to_ptid (pid));
971
972 if (lwp == NULL)
973 {
974 if (debug_threads)
975 debug_printf ("lk_1: cannot find lwp %ld, for pid: %d\n",
976 lwpid_of (lwp), pid);
977 }
978 else
979 {
980 if (debug_threads)
981 debug_printf ("lk_1: killing lwp %ld, for pid: %d\n",
982 lwpid_of (lwp), pid);
983
984 do
985 {
986 linux_kill_one_lwp (lwp);
987
988 /* Make sure it died. The loop is most likely unnecessary. */
989 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
990 } while (lwpid > 0 && WIFSTOPPED (wstat));
991 }
992
993 the_target->mourn (process);
994
995 /* Since we presently can only stop all lwps of all processes, we
996 need to unstop lwps of other processes. */
997 unstop_all_lwps (0, NULL);
998 return 0;
999 }
1000
1001 /* Get pending signal of THREAD, for detaching purposes. This is the
1002 signal the thread last stopped for, which we need to deliver to the
1003 thread when detaching, otherwise, it'd be suppressed/lost. */
1004
1005 static int
1006 get_detach_signal (struct thread_info *thread)
1007 {
1008 enum gdb_signal signo = GDB_SIGNAL_0;
1009 int status;
1010 struct lwp_info *lp = get_thread_lwp (thread);
1011
1012 if (lp->status_pending_p)
1013 status = lp->status_pending;
1014 else
1015 {
1016 /* If the thread had been suspended by gdbserver, and it stopped
1017 cleanly, then it'll have stopped with SIGSTOP. But we don't
1018 want to deliver that SIGSTOP. */
1019 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1020 || thread->last_status.value.sig == GDB_SIGNAL_0)
1021 return 0;
1022
1023 /* Otherwise, we may need to deliver the signal we
1024 intercepted. */
1025 status = lp->last_status;
1026 }
1027
1028 if (!WIFSTOPPED (status))
1029 {
1030 if (debug_threads)
1031 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1032 target_pid_to_str (ptid_of (lp)));
1033 return 0;
1034 }
1035
1036 /* Extended wait statuses aren't real SIGTRAPs. */
1037 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1038 {
1039 if (debug_threads)
1040 debug_printf ("GPS: lwp %s had stopped with extended "
1041 "status: no pending signal\n",
1042 target_pid_to_str (ptid_of (lp)));
1043 return 0;
1044 }
1045
1046 signo = gdb_signal_from_host (WSTOPSIG (status));
1047
1048 if (program_signals_p && !program_signals[signo])
1049 {
1050 if (debug_threads)
1051 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1052 target_pid_to_str (ptid_of (lp)),
1053 gdb_signal_to_string (signo));
1054 return 0;
1055 }
1056 else if (!program_signals_p
1057 /* If we have no way to know which signals GDB does not
1058 want to have passed to the program, assume
1059 SIGTRAP/SIGINT, which is GDB's default. */
1060 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1061 {
1062 if (debug_threads)
1063 debug_printf ("GPS: lwp %s had signal %s, "
1064 "but we don't know if we should pass it. "
1065 "Default to not.\n",
1066 target_pid_to_str (ptid_of (lp)),
1067 gdb_signal_to_string (signo));
1068 return 0;
1069 }
1070 else
1071 {
1072 if (debug_threads)
1073 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1074 target_pid_to_str (ptid_of (lp)),
1075 gdb_signal_to_string (signo));
1076
1077 return WSTOPSIG (status);
1078 }
1079 }
1080
1081 static int
1082 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1083 {
1084 struct thread_info *thread = (struct thread_info *) entry;
1085 struct lwp_info *lwp = get_thread_lwp (thread);
1086 int pid = * (int *) args;
1087 int sig;
1088
1089 if (ptid_get_pid (entry->id) != pid)
1090 return 0;
1091
1092 /* If there is a pending SIGSTOP, get rid of it. */
1093 if (lwp->stop_expected)
1094 {
1095 if (debug_threads)
1096 debug_printf ("Sending SIGCONT to %s\n",
1097 target_pid_to_str (ptid_of (lwp)));
1098
1099 kill_lwp (lwpid_of (lwp), SIGCONT);
1100 lwp->stop_expected = 0;
1101 }
1102
1103 /* Flush any pending changes to the process's registers. */
1104 regcache_invalidate_thread (get_lwp_thread (lwp));
1105
1106 /* Pass on any pending signal for this thread. */
1107 sig = get_detach_signal (thread);
1108
1109 /* Finally, let it resume. */
1110 if (the_low_target.prepare_to_resume != NULL)
1111 the_low_target.prepare_to_resume (lwp);
1112 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1113 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1114 error (_("Can't detach %s: %s"),
1115 target_pid_to_str (ptid_of (lwp)),
1116 strerror (errno));
1117
1118 delete_lwp (lwp);
1119 return 0;
1120 }
1121
1122 static int
1123 linux_detach (int pid)
1124 {
1125 struct process_info *process;
1126
1127 process = find_process_pid (pid);
1128 if (process == NULL)
1129 return -1;
1130
1131 /* Stop all threads before detaching. First, ptrace requires that
1132 the thread is stopped to sucessfully detach. Second, thread_db
1133 may need to uninstall thread event breakpoints from memory, which
1134 only works with a stopped process anyway. */
1135 stop_all_lwps (0, NULL);
1136
1137 #ifdef USE_THREAD_DB
1138 thread_db_detach (process);
1139 #endif
1140
1141 /* Stabilize threads (move out of jump pads). */
1142 stabilize_threads ();
1143
1144 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1145
1146 the_target->mourn (process);
1147
1148 /* Since we presently can only stop all lwps of all processes, we
1149 need to unstop lwps of other processes. */
1150 unstop_all_lwps (0, NULL);
1151 return 0;
1152 }
1153
1154 /* Remove all LWPs that belong to process PROC from the lwp list. */
1155
1156 static int
1157 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1158 {
1159 struct lwp_info *lwp = (struct lwp_info *) entry;
1160 struct process_info *process = proc;
1161
1162 if (pid_of (lwp) == pid_of (process))
1163 delete_lwp (lwp);
1164
1165 return 0;
1166 }
1167
1168 static void
1169 linux_mourn (struct process_info *process)
1170 {
1171 struct process_info_private *priv;
1172
1173 #ifdef USE_THREAD_DB
1174 thread_db_mourn (process);
1175 #endif
1176
1177 find_inferior (&all_lwps, delete_lwp_callback, process);
1178
1179 /* Freeing all private data. */
1180 priv = process->private;
1181 free (priv->arch_private);
1182 free (priv);
1183 process->private = NULL;
1184
1185 remove_process (process);
1186 }
1187
1188 static void
1189 linux_join (int pid)
1190 {
1191 int status, ret;
1192
1193 do {
1194 ret = my_waitpid (pid, &status, 0);
1195 if (WIFEXITED (status) || WIFSIGNALED (status))
1196 break;
1197 } while (ret != -1 || errno != ECHILD);
1198 }
1199
1200 /* Return nonzero if the given thread is still alive. */
1201 static int
1202 linux_thread_alive (ptid_t ptid)
1203 {
1204 struct lwp_info *lwp = find_lwp_pid (ptid);
1205
1206 /* We assume we always know if a thread exits. If a whole process
1207 exited but we still haven't been able to report it to GDB, we'll
1208 hold on to the last lwp of the dead process. */
1209 if (lwp != NULL)
1210 return !lwp->dead;
1211 else
1212 return 0;
1213 }
1214
1215 /* Return 1 if this lwp has an interesting status pending. */
1216 static int
1217 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1218 {
1219 struct lwp_info *lwp = (struct lwp_info *) entry;
1220 ptid_t ptid = * (ptid_t *) arg;
1221 struct thread_info *thread;
1222
1223 /* Check if we're only interested in events from a specific process
1224 or its lwps. */
1225 if (!ptid_equal (minus_one_ptid, ptid)
1226 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1227 return 0;
1228
1229 thread = get_lwp_thread (lwp);
1230
1231 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1232 report any status pending the LWP may have. */
1233 if (thread->last_resume_kind == resume_stop
1234 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1235 return 0;
1236
1237 return lwp->status_pending_p;
1238 }
1239
1240 static int
1241 same_lwp (struct inferior_list_entry *entry, void *data)
1242 {
1243 ptid_t ptid = *(ptid_t *) data;
1244 int lwp;
1245
1246 if (ptid_get_lwp (ptid) != 0)
1247 lwp = ptid_get_lwp (ptid);
1248 else
1249 lwp = ptid_get_pid (ptid);
1250
1251 if (ptid_get_lwp (entry->id) == lwp)
1252 return 1;
1253
1254 return 0;
1255 }
1256
1257 struct lwp_info *
1258 find_lwp_pid (ptid_t ptid)
1259 {
1260 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1261 }
1262
1263 static struct lwp_info *
1264 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1265 {
1266 int ret;
1267 int to_wait_for = -1;
1268 struct lwp_info *child = NULL;
1269
1270 if (debug_threads)
1271 debug_printf ("linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1272
1273 if (ptid_equal (ptid, minus_one_ptid))
1274 to_wait_for = -1; /* any child */
1275 else
1276 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1277
1278 options |= __WALL;
1279
1280 retry:
1281
1282 ret = my_waitpid (to_wait_for, wstatp, options);
1283 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1284 return NULL;
1285 else if (ret == -1)
1286 perror_with_name ("waitpid");
1287
1288 if (debug_threads
1289 && (!WIFSTOPPED (*wstatp)
1290 || (WSTOPSIG (*wstatp) != 32
1291 && WSTOPSIG (*wstatp) != 33)))
1292 debug_printf ("Got an event from %d (%x)\n", ret, *wstatp);
1293
1294 child = find_lwp_pid (pid_to_ptid (ret));
1295
1296 /* If we didn't find a process, one of two things presumably happened:
1297 - A process we started and then detached from has exited. Ignore it.
1298 - A process we are controlling has forked and the new child's stop
1299 was reported to us by the kernel. Save its PID. */
1300 if (child == NULL && WIFSTOPPED (*wstatp))
1301 {
1302 add_to_pid_list (&stopped_pids, ret, *wstatp);
1303 goto retry;
1304 }
1305 else if (child == NULL)
1306 goto retry;
1307
1308 child->stopped = 1;
1309
1310 child->last_status = *wstatp;
1311
1312 if (WIFSTOPPED (*wstatp))
1313 {
1314 struct process_info *proc;
1315
1316 /* Architecture-specific setup after inferior is running. This
1317 needs to happen after we have attached to the inferior and it
1318 is stopped for the first time, but before we access any
1319 inferior registers. */
1320 proc = find_process_pid (pid_of (child));
1321 if (proc->private->new_inferior)
1322 {
1323 struct thread_info *saved_inferior;
1324
1325 saved_inferior = current_inferior;
1326 current_inferior = get_lwp_thread (child);
1327
1328 the_low_target.arch_setup ();
1329
1330 current_inferior = saved_inferior;
1331
1332 proc->private->new_inferior = 0;
1333 }
1334 }
1335
1336 /* Fetch the possibly triggered data watchpoint info and store it in
1337 CHILD.
1338
1339 On some archs, like x86, that use debug registers to set
1340 watchpoints, it's possible that the way to know which watched
1341 address trapped, is to check the register that is used to select
1342 which address to watch. Problem is, between setting the
1343 watchpoint and reading back which data address trapped, the user
1344 may change the set of watchpoints, and, as a consequence, GDB
1345 changes the debug registers in the inferior. To avoid reading
1346 back a stale stopped-data-address when that happens, we cache in
1347 LP the fact that a watchpoint trapped, and the corresponding data
1348 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1349 changes the debug registers meanwhile, we have the cached data we
1350 can rely on. */
1351
1352 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1353 {
1354 if (the_low_target.stopped_by_watchpoint == NULL)
1355 {
1356 child->stopped_by_watchpoint = 0;
1357 }
1358 else
1359 {
1360 struct thread_info *saved_inferior;
1361
1362 saved_inferior = current_inferior;
1363 current_inferior = get_lwp_thread (child);
1364
1365 child->stopped_by_watchpoint
1366 = the_low_target.stopped_by_watchpoint ();
1367
1368 if (child->stopped_by_watchpoint)
1369 {
1370 if (the_low_target.stopped_data_address != NULL)
1371 child->stopped_data_address
1372 = the_low_target.stopped_data_address ();
1373 else
1374 child->stopped_data_address = 0;
1375 }
1376
1377 current_inferior = saved_inferior;
1378 }
1379 }
1380
1381 /* Store the STOP_PC, with adjustment applied. This depends on the
1382 architecture being defined already (so that CHILD has a valid
1383 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1384 not). */
1385 if (WIFSTOPPED (*wstatp))
1386 child->stop_pc = get_stop_pc (child);
1387
1388 if (debug_threads
1389 && WIFSTOPPED (*wstatp)
1390 && the_low_target.get_pc != NULL)
1391 {
1392 struct thread_info *saved_inferior = current_inferior;
1393 struct regcache *regcache;
1394 CORE_ADDR pc;
1395
1396 current_inferior = get_lwp_thread (child);
1397 regcache = get_thread_regcache (current_inferior, 1);
1398 pc = (*the_low_target.get_pc) (regcache);
1399 debug_printf ("linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1400 current_inferior = saved_inferior;
1401 }
1402
1403 return child;
1404 }
1405
1406 /* This function should only be called if the LWP got a SIGTRAP.
1407
1408 Handle any tracepoint steps or hits. Return true if a tracepoint
1409 event was handled, 0 otherwise. */
1410
1411 static int
1412 handle_tracepoints (struct lwp_info *lwp)
1413 {
1414 struct thread_info *tinfo = get_lwp_thread (lwp);
1415 int tpoint_related_event = 0;
1416
1417 /* If this tracepoint hit causes a tracing stop, we'll immediately
1418 uninsert tracepoints. To do this, we temporarily pause all
1419 threads, unpatch away, and then unpause threads. We need to make
1420 sure the unpausing doesn't resume LWP too. */
1421 lwp->suspended++;
1422
1423 /* And we need to be sure that any all-threads-stopping doesn't try
1424 to move threads out of the jump pads, as it could deadlock the
1425 inferior (LWP could be in the jump pad, maybe even holding the
1426 lock.) */
1427
1428 /* Do any necessary step collect actions. */
1429 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1430
1431 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1432
1433 /* See if we just hit a tracepoint and do its main collect
1434 actions. */
1435 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1436
1437 lwp->suspended--;
1438
1439 gdb_assert (lwp->suspended == 0);
1440 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1441
1442 if (tpoint_related_event)
1443 {
1444 if (debug_threads)
1445 debug_printf ("got a tracepoint event\n");
1446 return 1;
1447 }
1448
1449 return 0;
1450 }
1451
1452 /* Convenience wrapper. Returns true if LWP is presently collecting a
1453 fast tracepoint. */
1454
1455 static int
1456 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1457 struct fast_tpoint_collect_status *status)
1458 {
1459 CORE_ADDR thread_area;
1460
1461 if (the_low_target.get_thread_area == NULL)
1462 return 0;
1463
1464 /* Get the thread area address. This is used to recognize which
1465 thread is which when tracing with the in-process agent library.
1466 We don't read anything from the address, and treat it as opaque;
1467 it's the address itself that we assume is unique per-thread. */
1468 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1469 return 0;
1470
1471 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1472 }
1473
1474 /* The reason we resume in the caller, is because we want to be able
1475 to pass lwp->status_pending as WSTAT, and we need to clear
1476 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1477 refuses to resume. */
1478
1479 static int
1480 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1481 {
1482 struct thread_info *saved_inferior;
1483
1484 saved_inferior = current_inferior;
1485 current_inferior = get_lwp_thread (lwp);
1486
1487 if ((wstat == NULL
1488 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1489 && supports_fast_tracepoints ()
1490 && agent_loaded_p ())
1491 {
1492 struct fast_tpoint_collect_status status;
1493 int r;
1494
1495 if (debug_threads)
1496 debug_printf ("Checking whether LWP %ld needs to move out of the "
1497 "jump pad.\n",
1498 lwpid_of (lwp));
1499
1500 r = linux_fast_tracepoint_collecting (lwp, &status);
1501
1502 if (wstat == NULL
1503 || (WSTOPSIG (*wstat) != SIGILL
1504 && WSTOPSIG (*wstat) != SIGFPE
1505 && WSTOPSIG (*wstat) != SIGSEGV
1506 && WSTOPSIG (*wstat) != SIGBUS))
1507 {
1508 lwp->collecting_fast_tracepoint = r;
1509
1510 if (r != 0)
1511 {
1512 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1513 {
1514 /* Haven't executed the original instruction yet.
1515 Set breakpoint there, and wait till it's hit,
1516 then single-step until exiting the jump pad. */
1517 lwp->exit_jump_pad_bkpt
1518 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1519 }
1520
1521 if (debug_threads)
1522 debug_printf ("Checking whether LWP %ld needs to move out of "
1523 "the jump pad...it does\n",
1524 lwpid_of (lwp));
1525 current_inferior = saved_inferior;
1526
1527 return 1;
1528 }
1529 }
1530 else
1531 {
1532 /* If we get a synchronous signal while collecting, *and*
1533 while executing the (relocated) original instruction,
1534 reset the PC to point at the tpoint address, before
1535 reporting to GDB. Otherwise, it's an IPA lib bug: just
1536 report the signal to GDB, and pray for the best. */
1537
1538 lwp->collecting_fast_tracepoint = 0;
1539
1540 if (r != 0
1541 && (status.adjusted_insn_addr <= lwp->stop_pc
1542 && lwp->stop_pc < status.adjusted_insn_addr_end))
1543 {
1544 siginfo_t info;
1545 struct regcache *regcache;
1546
1547 /* The si_addr on a few signals references the address
1548 of the faulting instruction. Adjust that as
1549 well. */
1550 if ((WSTOPSIG (*wstat) == SIGILL
1551 || WSTOPSIG (*wstat) == SIGFPE
1552 || WSTOPSIG (*wstat) == SIGBUS
1553 || WSTOPSIG (*wstat) == SIGSEGV)
1554 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1555 (PTRACE_TYPE_ARG3) 0, &info) == 0
1556 /* Final check just to make sure we don't clobber
1557 the siginfo of non-kernel-sent signals. */
1558 && (uintptr_t) info.si_addr == lwp->stop_pc)
1559 {
1560 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1561 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1562 (PTRACE_TYPE_ARG3) 0, &info);
1563 }
1564
1565 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1566 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1567 lwp->stop_pc = status.tpoint_addr;
1568
1569 /* Cancel any fast tracepoint lock this thread was
1570 holding. */
1571 force_unlock_trace_buffer ();
1572 }
1573
1574 if (lwp->exit_jump_pad_bkpt != NULL)
1575 {
1576 if (debug_threads)
1577 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1578 "stopping all threads momentarily.\n");
1579
1580 stop_all_lwps (1, lwp);
1581 cancel_breakpoints ();
1582
1583 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1584 lwp->exit_jump_pad_bkpt = NULL;
1585
1586 unstop_all_lwps (1, lwp);
1587
1588 gdb_assert (lwp->suspended >= 0);
1589 }
1590 }
1591 }
1592
1593 if (debug_threads)
1594 debug_printf ("Checking whether LWP %ld needs to move out of the "
1595 "jump pad...no\n",
1596 lwpid_of (lwp));
1597
1598 current_inferior = saved_inferior;
1599 return 0;
1600 }
1601
1602 /* Enqueue one signal in the "signals to report later when out of the
1603 jump pad" list. */
1604
1605 static void
1606 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1607 {
1608 struct pending_signals *p_sig;
1609
1610 if (debug_threads)
1611 debug_printf ("Deferring signal %d for LWP %ld.\n",
1612 WSTOPSIG (*wstat), lwpid_of (lwp));
1613
1614 if (debug_threads)
1615 {
1616 struct pending_signals *sig;
1617
1618 for (sig = lwp->pending_signals_to_report;
1619 sig != NULL;
1620 sig = sig->prev)
1621 debug_printf (" Already queued %d\n",
1622 sig->signal);
1623
1624 debug_printf (" (no more currently queued signals)\n");
1625 }
1626
1627 /* Don't enqueue non-RT signals if they are already in the deferred
1628 queue. (SIGSTOP being the easiest signal to see ending up here
1629 twice) */
1630 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1631 {
1632 struct pending_signals *sig;
1633
1634 for (sig = lwp->pending_signals_to_report;
1635 sig != NULL;
1636 sig = sig->prev)
1637 {
1638 if (sig->signal == WSTOPSIG (*wstat))
1639 {
1640 if (debug_threads)
1641 debug_printf ("Not requeuing already queued non-RT signal %d"
1642 " for LWP %ld\n",
1643 sig->signal,
1644 lwpid_of (lwp));
1645 return;
1646 }
1647 }
1648 }
1649
1650 p_sig = xmalloc (sizeof (*p_sig));
1651 p_sig->prev = lwp->pending_signals_to_report;
1652 p_sig->signal = WSTOPSIG (*wstat);
1653 memset (&p_sig->info, 0, sizeof (siginfo_t));
1654 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1655 &p_sig->info);
1656
1657 lwp->pending_signals_to_report = p_sig;
1658 }
1659
1660 /* Dequeue one signal from the "signals to report later when out of
1661 the jump pad" list. */
1662
1663 static int
1664 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1665 {
1666 if (lwp->pending_signals_to_report != NULL)
1667 {
1668 struct pending_signals **p_sig;
1669
1670 p_sig = &lwp->pending_signals_to_report;
1671 while ((*p_sig)->prev != NULL)
1672 p_sig = &(*p_sig)->prev;
1673
1674 *wstat = W_STOPCODE ((*p_sig)->signal);
1675 if ((*p_sig)->info.si_signo != 0)
1676 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1677 &(*p_sig)->info);
1678 free (*p_sig);
1679 *p_sig = NULL;
1680
1681 if (debug_threads)
1682 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1683 WSTOPSIG (*wstat), lwpid_of (lwp));
1684
1685 if (debug_threads)
1686 {
1687 struct pending_signals *sig;
1688
1689 for (sig = lwp->pending_signals_to_report;
1690 sig != NULL;
1691 sig = sig->prev)
1692 debug_printf (" Still queued %d\n",
1693 sig->signal);
1694
1695 debug_printf (" (no more queued signals)\n");
1696 }
1697
1698 return 1;
1699 }
1700
1701 return 0;
1702 }
1703
1704 /* Arrange for a breakpoint to be hit again later. We don't keep the
1705 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1706 will handle the current event, eventually we will resume this LWP,
1707 and this breakpoint will trap again. */
1708
1709 static int
1710 cancel_breakpoint (struct lwp_info *lwp)
1711 {
1712 struct thread_info *saved_inferior;
1713
1714 /* There's nothing to do if we don't support breakpoints. */
1715 if (!supports_breakpoints ())
1716 return 0;
1717
1718 /* breakpoint_at reads from current inferior. */
1719 saved_inferior = current_inferior;
1720 current_inferior = get_lwp_thread (lwp);
1721
1722 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1723 {
1724 if (debug_threads)
1725 debug_printf ("CB: Push back breakpoint for %s\n",
1726 target_pid_to_str (ptid_of (lwp)));
1727
1728 /* Back up the PC if necessary. */
1729 if (the_low_target.decr_pc_after_break)
1730 {
1731 struct regcache *regcache
1732 = get_thread_regcache (current_inferior, 1);
1733 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1734 }
1735
1736 current_inferior = saved_inferior;
1737 return 1;
1738 }
1739 else
1740 {
1741 if (debug_threads)
1742 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1743 paddress (lwp->stop_pc),
1744 target_pid_to_str (ptid_of (lwp)));
1745 }
1746
1747 current_inferior = saved_inferior;
1748 return 0;
1749 }
1750
1751 /* When the event-loop is doing a step-over, this points at the thread
1752 being stepped. */
1753 ptid_t step_over_bkpt;
1754
1755 /* Wait for an event from child PID. If PID is -1, wait for any
1756 child. Store the stop status through the status pointer WSTAT.
1757 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1758 event was found and OPTIONS contains WNOHANG. Return the PID of
1759 the stopped child otherwise. */
1760
1761 static int
1762 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1763 {
1764 struct lwp_info *event_child, *requested_child;
1765 ptid_t wait_ptid;
1766
1767 event_child = NULL;
1768 requested_child = NULL;
1769
1770 /* Check for a lwp with a pending status. */
1771
1772 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1773 {
1774 event_child = (struct lwp_info *)
1775 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1776 if (debug_threads && event_child)
1777 debug_printf ("Got a pending child %ld\n", lwpid_of (event_child));
1778 }
1779 else
1780 {
1781 requested_child = find_lwp_pid (ptid);
1782
1783 if (stopping_threads == NOT_STOPPING_THREADS
1784 && requested_child->status_pending_p
1785 && requested_child->collecting_fast_tracepoint)
1786 {
1787 enqueue_one_deferred_signal (requested_child,
1788 &requested_child->status_pending);
1789 requested_child->status_pending_p = 0;
1790 requested_child->status_pending = 0;
1791 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1792 }
1793
1794 if (requested_child->suspended
1795 && requested_child->status_pending_p)
1796 fatal ("requesting an event out of a suspended child?");
1797
1798 if (requested_child->status_pending_p)
1799 event_child = requested_child;
1800 }
1801
1802 if (event_child != NULL)
1803 {
1804 if (debug_threads)
1805 debug_printf ("Got an event from pending child %ld (%04x)\n",
1806 lwpid_of (event_child), event_child->status_pending);
1807 *wstat = event_child->status_pending;
1808 event_child->status_pending_p = 0;
1809 event_child->status_pending = 0;
1810 current_inferior = get_lwp_thread (event_child);
1811 return lwpid_of (event_child);
1812 }
1813
1814 if (ptid_is_pid (ptid))
1815 {
1816 /* A request to wait for a specific tgid. This is not possible
1817 with waitpid, so instead, we wait for any child, and leave
1818 children we're not interested in right now with a pending
1819 status to report later. */
1820 wait_ptid = minus_one_ptid;
1821 }
1822 else
1823 wait_ptid = ptid;
1824
1825 /* We only enter this loop if no process has a pending wait status. Thus
1826 any action taken in response to a wait status inside this loop is
1827 responding as soon as we detect the status, not after any pending
1828 events. */
1829 while (1)
1830 {
1831 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1832
1833 if ((options & WNOHANG) && event_child == NULL)
1834 {
1835 if (debug_threads)
1836 debug_printf ("WNOHANG set, no event found\n");
1837 return 0;
1838 }
1839
1840 if (event_child == NULL)
1841 error ("event from unknown child");
1842
1843 if (ptid_is_pid (ptid)
1844 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1845 {
1846 if (! WIFSTOPPED (*wstat))
1847 mark_lwp_dead (event_child, *wstat);
1848 else
1849 {
1850 event_child->status_pending_p = 1;
1851 event_child->status_pending = *wstat;
1852 }
1853 continue;
1854 }
1855
1856 current_inferior = get_lwp_thread (event_child);
1857
1858 /* Check for thread exit. */
1859 if (! WIFSTOPPED (*wstat))
1860 {
1861 if (debug_threads)
1862 debug_printf ("LWP %ld exiting\n", lwpid_of (event_child));
1863
1864 /* If the last thread is exiting, just return. */
1865 if (last_thread_of_process_p (current_inferior))
1866 {
1867 if (debug_threads)
1868 debug_printf ("LWP %ld is last lwp of process\n",
1869 lwpid_of (event_child));
1870 return lwpid_of (event_child);
1871 }
1872
1873 if (!non_stop)
1874 {
1875 current_inferior = (struct thread_info *) all_threads.head;
1876 if (debug_threads)
1877 debug_printf ("Current inferior is now %ld\n",
1878 lwpid_of (get_thread_lwp (current_inferior)));
1879 }
1880 else
1881 {
1882 current_inferior = NULL;
1883 if (debug_threads)
1884 debug_printf ("Current inferior is now <NULL>\n");
1885 }
1886
1887 /* If we were waiting for this particular child to do something...
1888 well, it did something. */
1889 if (requested_child != NULL)
1890 {
1891 int lwpid = lwpid_of (event_child);
1892
1893 /* Cancel the step-over operation --- the thread that
1894 started it is gone. */
1895 if (finish_step_over (event_child))
1896 unstop_all_lwps (1, event_child);
1897 delete_lwp (event_child);
1898 return lwpid;
1899 }
1900
1901 delete_lwp (event_child);
1902
1903 /* Wait for a more interesting event. */
1904 continue;
1905 }
1906
1907 if (event_child->must_set_ptrace_flags)
1908 {
1909 linux_enable_event_reporting (lwpid_of (event_child));
1910 event_child->must_set_ptrace_flags = 0;
1911 }
1912
1913 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1914 && *wstat >> 16 != 0)
1915 {
1916 handle_extended_wait (event_child, *wstat);
1917 continue;
1918 }
1919
1920 if (WIFSTOPPED (*wstat)
1921 && WSTOPSIG (*wstat) == SIGSTOP
1922 && event_child->stop_expected)
1923 {
1924 int should_stop;
1925
1926 if (debug_threads)
1927 debug_printf ("Expected stop.\n");
1928 event_child->stop_expected = 0;
1929
1930 should_stop = (current_inferior->last_resume_kind == resume_stop
1931 || stopping_threads != NOT_STOPPING_THREADS);
1932
1933 if (!should_stop)
1934 {
1935 linux_resume_one_lwp (event_child,
1936 event_child->stepping, 0, NULL);
1937 continue;
1938 }
1939 }
1940
1941 return lwpid_of (event_child);
1942 }
1943
1944 /* NOTREACHED */
1945 return 0;
1946 }
1947
1948 /* Count the LWP's that have had events. */
1949
1950 static int
1951 count_events_callback (struct inferior_list_entry *entry, void *data)
1952 {
1953 struct lwp_info *lp = (struct lwp_info *) entry;
1954 struct thread_info *thread = get_lwp_thread (lp);
1955 int *count = data;
1956
1957 gdb_assert (count != NULL);
1958
1959 /* Count only resumed LWPs that have a SIGTRAP event pending that
1960 should be reported to GDB. */
1961 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1962 && thread->last_resume_kind != resume_stop
1963 && lp->status_pending_p
1964 && WIFSTOPPED (lp->status_pending)
1965 && WSTOPSIG (lp->status_pending) == SIGTRAP
1966 && !breakpoint_inserted_here (lp->stop_pc))
1967 (*count)++;
1968
1969 return 0;
1970 }
1971
1972 /* Select the LWP (if any) that is currently being single-stepped. */
1973
1974 static int
1975 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1976 {
1977 struct lwp_info *lp = (struct lwp_info *) entry;
1978 struct thread_info *thread = get_lwp_thread (lp);
1979
1980 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1981 && thread->last_resume_kind == resume_step
1982 && lp->status_pending_p)
1983 return 1;
1984 else
1985 return 0;
1986 }
1987
1988 /* Select the Nth LWP that has had a SIGTRAP event that should be
1989 reported to GDB. */
1990
1991 static int
1992 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1993 {
1994 struct lwp_info *lp = (struct lwp_info *) entry;
1995 struct thread_info *thread = get_lwp_thread (lp);
1996 int *selector = data;
1997
1998 gdb_assert (selector != NULL);
1999
2000 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2001 if (thread->last_resume_kind != resume_stop
2002 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2003 && lp->status_pending_p
2004 && WIFSTOPPED (lp->status_pending)
2005 && WSTOPSIG (lp->status_pending) == SIGTRAP
2006 && !breakpoint_inserted_here (lp->stop_pc))
2007 if ((*selector)-- == 0)
2008 return 1;
2009
2010 return 0;
2011 }
2012
2013 static int
2014 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2015 {
2016 struct lwp_info *lp = (struct lwp_info *) entry;
2017 struct thread_info *thread = get_lwp_thread (lp);
2018 struct lwp_info *event_lp = data;
2019
2020 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2021 if (lp == event_lp)
2022 return 0;
2023
2024 /* If a LWP other than the LWP that we're reporting an event for has
2025 hit a GDB breakpoint (as opposed to some random trap signal),
2026 then just arrange for it to hit it again later. We don't keep
2027 the SIGTRAP status and don't forward the SIGTRAP signal to the
2028 LWP. We will handle the current event, eventually we will resume
2029 all LWPs, and this one will get its breakpoint trap again.
2030
2031 If we do not do this, then we run the risk that the user will
2032 delete or disable the breakpoint, but the LWP will have already
2033 tripped on it. */
2034
2035 if (thread->last_resume_kind != resume_stop
2036 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2037 && lp->status_pending_p
2038 && WIFSTOPPED (lp->status_pending)
2039 && WSTOPSIG (lp->status_pending) == SIGTRAP
2040 && !lp->stepping
2041 && !lp->stopped_by_watchpoint
2042 && cancel_breakpoint (lp))
2043 /* Throw away the SIGTRAP. */
2044 lp->status_pending_p = 0;
2045
2046 return 0;
2047 }
2048
2049 static void
2050 linux_cancel_breakpoints (void)
2051 {
2052 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2053 }
2054
2055 /* Select one LWP out of those that have events pending. */
2056
2057 static void
2058 select_event_lwp (struct lwp_info **orig_lp)
2059 {
2060 int num_events = 0;
2061 int random_selector;
2062 struct lwp_info *event_lp;
2063
2064 /* Give preference to any LWP that is being single-stepped. */
2065 event_lp
2066 = (struct lwp_info *) find_inferior (&all_lwps,
2067 select_singlestep_lwp_callback, NULL);
2068 if (event_lp != NULL)
2069 {
2070 if (debug_threads)
2071 debug_printf ("SEL: Select single-step %s\n",
2072 target_pid_to_str (ptid_of (event_lp)));
2073 }
2074 else
2075 {
2076 /* No single-stepping LWP. Select one at random, out of those
2077 which have had SIGTRAP events. */
2078
2079 /* First see how many SIGTRAP events we have. */
2080 find_inferior (&all_lwps, count_events_callback, &num_events);
2081
2082 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2083 random_selector = (int)
2084 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2085
2086 if (debug_threads && num_events > 1)
2087 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2088 num_events, random_selector);
2089
2090 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2091 select_event_lwp_callback,
2092 &random_selector);
2093 }
2094
2095 if (event_lp != NULL)
2096 {
2097 /* Switch the event LWP. */
2098 *orig_lp = event_lp;
2099 }
2100 }
2101
2102 /* Decrement the suspend count of an LWP. */
2103
2104 static int
2105 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2106 {
2107 struct lwp_info *lwp = (struct lwp_info *) entry;
2108
2109 /* Ignore EXCEPT. */
2110 if (lwp == except)
2111 return 0;
2112
2113 lwp->suspended--;
2114
2115 gdb_assert (lwp->suspended >= 0);
2116 return 0;
2117 }
2118
2119 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2120 NULL. */
2121
2122 static void
2123 unsuspend_all_lwps (struct lwp_info *except)
2124 {
2125 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2126 }
2127
2128 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2129 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2130 void *data);
2131 static int lwp_running (struct inferior_list_entry *entry, void *data);
2132 static ptid_t linux_wait_1 (ptid_t ptid,
2133 struct target_waitstatus *ourstatus,
2134 int target_options);
2135
2136 /* Stabilize threads (move out of jump pads).
2137
2138 If a thread is midway collecting a fast tracepoint, we need to
2139 finish the collection and move it out of the jump pad before
2140 reporting the signal.
2141
2142 This avoids recursion while collecting (when a signal arrives
2143 midway, and the signal handler itself collects), which would trash
2144 the trace buffer. In case the user set a breakpoint in a signal
2145 handler, this avoids the backtrace showing the jump pad, etc..
2146 Most importantly, there are certain things we can't do safely if
2147 threads are stopped in a jump pad (or in its callee's). For
2148 example:
2149
2150 - starting a new trace run. A thread still collecting the
2151 previous run, could trash the trace buffer when resumed. The trace
2152 buffer control structures would have been reset but the thread had
2153 no way to tell. The thread could even midway memcpy'ing to the
2154 buffer, which would mean that when resumed, it would clobber the
2155 trace buffer that had been set for a new run.
2156
2157 - we can't rewrite/reuse the jump pads for new tracepoints
2158 safely. Say you do tstart while a thread is stopped midway while
2159 collecting. When the thread is later resumed, it finishes the
2160 collection, and returns to the jump pad, to execute the original
2161 instruction that was under the tracepoint jump at the time the
2162 older run had been started. If the jump pad had been rewritten
2163 since for something else in the new run, the thread would now
2164 execute the wrong / random instructions. */
2165
2166 static void
2167 linux_stabilize_threads (void)
2168 {
2169 struct thread_info *save_inferior;
2170 struct lwp_info *lwp_stuck;
2171
2172 lwp_stuck
2173 = (struct lwp_info *) find_inferior (&all_lwps,
2174 stuck_in_jump_pad_callback, NULL);
2175 if (lwp_stuck != NULL)
2176 {
2177 if (debug_threads)
2178 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2179 lwpid_of (lwp_stuck));
2180 return;
2181 }
2182
2183 save_inferior = current_inferior;
2184
2185 stabilizing_threads = 1;
2186
2187 /* Kick 'em all. */
2188 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2189
2190 /* Loop until all are stopped out of the jump pads. */
2191 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2192 {
2193 struct target_waitstatus ourstatus;
2194 struct lwp_info *lwp;
2195 int wstat;
2196
2197 /* Note that we go through the full wait even loop. While
2198 moving threads out of jump pad, we need to be able to step
2199 over internal breakpoints and such. */
2200 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2201
2202 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2203 {
2204 lwp = get_thread_lwp (current_inferior);
2205
2206 /* Lock it. */
2207 lwp->suspended++;
2208
2209 if (ourstatus.value.sig != GDB_SIGNAL_0
2210 || current_inferior->last_resume_kind == resume_stop)
2211 {
2212 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2213 enqueue_one_deferred_signal (lwp, &wstat);
2214 }
2215 }
2216 }
2217
2218 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2219
2220 stabilizing_threads = 0;
2221
2222 current_inferior = save_inferior;
2223
2224 if (debug_threads)
2225 {
2226 lwp_stuck
2227 = (struct lwp_info *) find_inferior (&all_lwps,
2228 stuck_in_jump_pad_callback, NULL);
2229 if (lwp_stuck != NULL)
2230 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2231 lwpid_of (lwp_stuck));
2232 }
2233 }
2234
2235 /* Wait for process, returns status. */
2236
2237 static ptid_t
2238 linux_wait_1 (ptid_t ptid,
2239 struct target_waitstatus *ourstatus, int target_options)
2240 {
2241 int w;
2242 struct lwp_info *event_child;
2243 int options;
2244 int pid;
2245 int step_over_finished;
2246 int bp_explains_trap;
2247 int maybe_internal_trap;
2248 int report_to_gdb;
2249 int trace_event;
2250 int in_step_range;
2251
2252 if (debug_threads)
2253 {
2254 debug_enter ();
2255 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2256 }
2257
2258 /* Translate generic target options into linux options. */
2259 options = __WALL;
2260 if (target_options & TARGET_WNOHANG)
2261 options |= WNOHANG;
2262
2263 retry:
2264 bp_explains_trap = 0;
2265 trace_event = 0;
2266 in_step_range = 0;
2267 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2268
2269 /* If we were only supposed to resume one thread, only wait for
2270 that thread - if it's still alive. If it died, however - which
2271 can happen if we're coming from the thread death case below -
2272 then we need to make sure we restart the other threads. We could
2273 pick a thread at random or restart all; restarting all is less
2274 arbitrary. */
2275 if (!non_stop
2276 && !ptid_equal (cont_thread, null_ptid)
2277 && !ptid_equal (cont_thread, minus_one_ptid))
2278 {
2279 struct thread_info *thread;
2280
2281 thread = (struct thread_info *) find_inferior_id (&all_threads,
2282 cont_thread);
2283
2284 /* No stepping, no signal - unless one is pending already, of course. */
2285 if (thread == NULL)
2286 {
2287 struct thread_resume resume_info;
2288 resume_info.thread = minus_one_ptid;
2289 resume_info.kind = resume_continue;
2290 resume_info.sig = 0;
2291 linux_resume (&resume_info, 1);
2292 }
2293 else
2294 ptid = cont_thread;
2295 }
2296
2297 if (ptid_equal (step_over_bkpt, null_ptid))
2298 pid = linux_wait_for_event (ptid, &w, options);
2299 else
2300 {
2301 if (debug_threads)
2302 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2303 target_pid_to_str (step_over_bkpt));
2304 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2305 }
2306
2307 if (pid == 0) /* only if TARGET_WNOHANG */
2308 {
2309 if (debug_threads)
2310 {
2311 debug_printf ("linux_wait_1 ret = null_ptid\n");
2312 debug_exit ();
2313 }
2314 return null_ptid;
2315 }
2316
2317 event_child = get_thread_lwp (current_inferior);
2318
2319 /* If we are waiting for a particular child, and it exited,
2320 linux_wait_for_event will return its exit status. Similarly if
2321 the last child exited. If this is not the last child, however,
2322 do not report it as exited until there is a 'thread exited' response
2323 available in the remote protocol. Instead, just wait for another event.
2324 This should be safe, because if the thread crashed we will already
2325 have reported the termination signal to GDB; that should stop any
2326 in-progress stepping operations, etc.
2327
2328 Report the exit status of the last thread to exit. This matches
2329 LinuxThreads' behavior. */
2330
2331 if (last_thread_of_process_p (current_inferior))
2332 {
2333 if (WIFEXITED (w) || WIFSIGNALED (w))
2334 {
2335 if (WIFEXITED (w))
2336 {
2337 ourstatus->kind = TARGET_WAITKIND_EXITED;
2338 ourstatus->value.integer = WEXITSTATUS (w);
2339
2340 if (debug_threads)
2341 {
2342 debug_printf ("linux_wait_1 ret = %s, exited with "
2343 "retcode %d\n",
2344 target_pid_to_str (ptid_of (event_child)),
2345 WEXITSTATUS (w));
2346 debug_exit ();
2347 }
2348 }
2349 else
2350 {
2351 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2352 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2353
2354 if (debug_threads)
2355 {
2356 debug_printf ("linux_wait_1 ret = %s, terminated with "
2357 "signal %d\n",
2358 target_pid_to_str (ptid_of (event_child)),
2359 WTERMSIG (w));
2360 debug_exit ();
2361 }
2362 }
2363
2364 return ptid_of (event_child);
2365 }
2366 }
2367 else
2368 {
2369 if (!WIFSTOPPED (w))
2370 goto retry;
2371 }
2372
2373 /* If this event was not handled before, and is not a SIGTRAP, we
2374 report it. SIGILL and SIGSEGV are also treated as traps in case
2375 a breakpoint is inserted at the current PC. If this target does
2376 not support internal breakpoints at all, we also report the
2377 SIGTRAP without further processing; it's of no concern to us. */
2378 maybe_internal_trap
2379 = (supports_breakpoints ()
2380 && (WSTOPSIG (w) == SIGTRAP
2381 || ((WSTOPSIG (w) == SIGILL
2382 || WSTOPSIG (w) == SIGSEGV)
2383 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2384
2385 if (maybe_internal_trap)
2386 {
2387 /* Handle anything that requires bookkeeping before deciding to
2388 report the event or continue waiting. */
2389
2390 /* First check if we can explain the SIGTRAP with an internal
2391 breakpoint, or if we should possibly report the event to GDB.
2392 Do this before anything that may remove or insert a
2393 breakpoint. */
2394 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2395
2396 /* We have a SIGTRAP, possibly a step-over dance has just
2397 finished. If so, tweak the state machine accordingly,
2398 reinsert breakpoints and delete any reinsert (software
2399 single-step) breakpoints. */
2400 step_over_finished = finish_step_over (event_child);
2401
2402 /* Now invoke the callbacks of any internal breakpoints there. */
2403 check_breakpoints (event_child->stop_pc);
2404
2405 /* Handle tracepoint data collecting. This may overflow the
2406 trace buffer, and cause a tracing stop, removing
2407 breakpoints. */
2408 trace_event = handle_tracepoints (event_child);
2409
2410 if (bp_explains_trap)
2411 {
2412 /* If we stepped or ran into an internal breakpoint, we've
2413 already handled it. So next time we resume (from this
2414 PC), we should step over it. */
2415 if (debug_threads)
2416 debug_printf ("Hit a gdbserver breakpoint.\n");
2417
2418 if (breakpoint_here (event_child->stop_pc))
2419 event_child->need_step_over = 1;
2420 }
2421 }
2422 else
2423 {
2424 /* We have some other signal, possibly a step-over dance was in
2425 progress, and it should be cancelled too. */
2426 step_over_finished = finish_step_over (event_child);
2427 }
2428
2429 /* We have all the data we need. Either report the event to GDB, or
2430 resume threads and keep waiting for more. */
2431
2432 /* If we're collecting a fast tracepoint, finish the collection and
2433 move out of the jump pad before delivering a signal. See
2434 linux_stabilize_threads. */
2435
2436 if (WIFSTOPPED (w)
2437 && WSTOPSIG (w) != SIGTRAP
2438 && supports_fast_tracepoints ()
2439 && agent_loaded_p ())
2440 {
2441 if (debug_threads)
2442 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2443 "to defer or adjust it.\n",
2444 WSTOPSIG (w), lwpid_of (event_child));
2445
2446 /* Allow debugging the jump pad itself. */
2447 if (current_inferior->last_resume_kind != resume_step
2448 && maybe_move_out_of_jump_pad (event_child, &w))
2449 {
2450 enqueue_one_deferred_signal (event_child, &w);
2451
2452 if (debug_threads)
2453 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2454 WSTOPSIG (w), lwpid_of (event_child));
2455
2456 linux_resume_one_lwp (event_child, 0, 0, NULL);
2457 goto retry;
2458 }
2459 }
2460
2461 if (event_child->collecting_fast_tracepoint)
2462 {
2463 if (debug_threads)
2464 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2465 "Check if we're already there.\n",
2466 lwpid_of (event_child),
2467 event_child->collecting_fast_tracepoint);
2468
2469 trace_event = 1;
2470
2471 event_child->collecting_fast_tracepoint
2472 = linux_fast_tracepoint_collecting (event_child, NULL);
2473
2474 if (event_child->collecting_fast_tracepoint != 1)
2475 {
2476 /* No longer need this breakpoint. */
2477 if (event_child->exit_jump_pad_bkpt != NULL)
2478 {
2479 if (debug_threads)
2480 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2481 "stopping all threads momentarily.\n");
2482
2483 /* Other running threads could hit this breakpoint.
2484 We don't handle moribund locations like GDB does,
2485 instead we always pause all threads when removing
2486 breakpoints, so that any step-over or
2487 decr_pc_after_break adjustment is always taken
2488 care of while the breakpoint is still
2489 inserted. */
2490 stop_all_lwps (1, event_child);
2491 cancel_breakpoints ();
2492
2493 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2494 event_child->exit_jump_pad_bkpt = NULL;
2495
2496 unstop_all_lwps (1, event_child);
2497
2498 gdb_assert (event_child->suspended >= 0);
2499 }
2500 }
2501
2502 if (event_child->collecting_fast_tracepoint == 0)
2503 {
2504 if (debug_threads)
2505 debug_printf ("fast tracepoint finished "
2506 "collecting successfully.\n");
2507
2508 /* We may have a deferred signal to report. */
2509 if (dequeue_one_deferred_signal (event_child, &w))
2510 {
2511 if (debug_threads)
2512 debug_printf ("dequeued one signal.\n");
2513 }
2514 else
2515 {
2516 if (debug_threads)
2517 debug_printf ("no deferred signals.\n");
2518
2519 if (stabilizing_threads)
2520 {
2521 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2522 ourstatus->value.sig = GDB_SIGNAL_0;
2523
2524 if (debug_threads)
2525 {
2526 debug_printf ("linux_wait_1 ret = %s, stopped "
2527 "while stabilizing threads\n",
2528 target_pid_to_str (ptid_of (event_child)));
2529 debug_exit ();
2530 }
2531
2532 return ptid_of (event_child);
2533 }
2534 }
2535 }
2536 }
2537
2538 /* Check whether GDB would be interested in this event. */
2539
2540 /* If GDB is not interested in this signal, don't stop other
2541 threads, and don't report it to GDB. Just resume the inferior
2542 right away. We do this for threading-related signals as well as
2543 any that GDB specifically requested we ignore. But never ignore
2544 SIGSTOP if we sent it ourselves, and do not ignore signals when
2545 stepping - they may require special handling to skip the signal
2546 handler. */
2547 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2548 thread library? */
2549 if (WIFSTOPPED (w)
2550 && current_inferior->last_resume_kind != resume_step
2551 && (
2552 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2553 (current_process ()->private->thread_db != NULL
2554 && (WSTOPSIG (w) == __SIGRTMIN
2555 || WSTOPSIG (w) == __SIGRTMIN + 1))
2556 ||
2557 #endif
2558 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2559 && !(WSTOPSIG (w) == SIGSTOP
2560 && current_inferior->last_resume_kind == resume_stop))))
2561 {
2562 siginfo_t info, *info_p;
2563
2564 if (debug_threads)
2565 debug_printf ("Ignored signal %d for LWP %ld.\n",
2566 WSTOPSIG (w), lwpid_of (event_child));
2567
2568 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2569 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2570 info_p = &info;
2571 else
2572 info_p = NULL;
2573 linux_resume_one_lwp (event_child, event_child->stepping,
2574 WSTOPSIG (w), info_p);
2575 goto retry;
2576 }
2577
2578 /* Note that all addresses are always "out of the step range" when
2579 there's no range to begin with. */
2580 in_step_range = lwp_in_step_range (event_child);
2581
2582 /* If GDB wanted this thread to single step, and the thread is out
2583 of the step range, we always want to report the SIGTRAP, and let
2584 GDB handle it. Watchpoints should always be reported. So should
2585 signals we can't explain. A SIGTRAP we can't explain could be a
2586 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2587 do, we're be able to handle GDB breakpoints on top of internal
2588 breakpoints, by handling the internal breakpoint and still
2589 reporting the event to GDB. If we don't, we're out of luck, GDB
2590 won't see the breakpoint hit. */
2591 report_to_gdb = (!maybe_internal_trap
2592 || (current_inferior->last_resume_kind == resume_step
2593 && !in_step_range)
2594 || event_child->stopped_by_watchpoint
2595 || (!step_over_finished && !in_step_range
2596 && !bp_explains_trap && !trace_event)
2597 || (gdb_breakpoint_here (event_child->stop_pc)
2598 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2599 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2600
2601 run_breakpoint_commands (event_child->stop_pc);
2602
2603 /* We found no reason GDB would want us to stop. We either hit one
2604 of our own breakpoints, or finished an internal step GDB
2605 shouldn't know about. */
2606 if (!report_to_gdb)
2607 {
2608 if (debug_threads)
2609 {
2610 if (bp_explains_trap)
2611 debug_printf ("Hit a gdbserver breakpoint.\n");
2612 if (step_over_finished)
2613 debug_printf ("Step-over finished.\n");
2614 if (trace_event)
2615 debug_printf ("Tracepoint event.\n");
2616 if (lwp_in_step_range (event_child))
2617 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2618 paddress (event_child->stop_pc),
2619 paddress (event_child->step_range_start),
2620 paddress (event_child->step_range_end));
2621 }
2622
2623 /* We're not reporting this breakpoint to GDB, so apply the
2624 decr_pc_after_break adjustment to the inferior's regcache
2625 ourselves. */
2626
2627 if (the_low_target.set_pc != NULL)
2628 {
2629 struct regcache *regcache
2630 = get_thread_regcache (get_lwp_thread (event_child), 1);
2631 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2632 }
2633
2634 /* We may have finished stepping over a breakpoint. If so,
2635 we've stopped and suspended all LWPs momentarily except the
2636 stepping one. This is where we resume them all again. We're
2637 going to keep waiting, so use proceed, which handles stepping
2638 over the next breakpoint. */
2639 if (debug_threads)
2640 debug_printf ("proceeding all threads.\n");
2641
2642 if (step_over_finished)
2643 unsuspend_all_lwps (event_child);
2644
2645 proceed_all_lwps ();
2646 goto retry;
2647 }
2648
2649 if (debug_threads)
2650 {
2651 if (current_inferior->last_resume_kind == resume_step)
2652 {
2653 if (event_child->step_range_start == event_child->step_range_end)
2654 debug_printf ("GDB wanted to single-step, reporting event.\n");
2655 else if (!lwp_in_step_range (event_child))
2656 debug_printf ("Out of step range, reporting event.\n");
2657 }
2658 if (event_child->stopped_by_watchpoint)
2659 debug_printf ("Stopped by watchpoint.\n");
2660 if (gdb_breakpoint_here (event_child->stop_pc))
2661 debug_printf ("Stopped by GDB breakpoint.\n");
2662 if (debug_threads)
2663 debug_printf ("Hit a non-gdbserver trap event.\n");
2664 }
2665
2666 /* Alright, we're going to report a stop. */
2667
2668 if (!non_stop && !stabilizing_threads)
2669 {
2670 /* In all-stop, stop all threads. */
2671 stop_all_lwps (0, NULL);
2672
2673 /* If we're not waiting for a specific LWP, choose an event LWP
2674 from among those that have had events. Giving equal priority
2675 to all LWPs that have had events helps prevent
2676 starvation. */
2677 if (ptid_equal (ptid, minus_one_ptid))
2678 {
2679 event_child->status_pending_p = 1;
2680 event_child->status_pending = w;
2681
2682 select_event_lwp (&event_child);
2683
2684 event_child->status_pending_p = 0;
2685 w = event_child->status_pending;
2686 }
2687
2688 /* Now that we've selected our final event LWP, cancel any
2689 breakpoints in other LWPs that have hit a GDB breakpoint.
2690 See the comment in cancel_breakpoints_callback to find out
2691 why. */
2692 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2693
2694 /* If we were going a step-over, all other threads but the stepping one
2695 had been paused in start_step_over, with their suspend counts
2696 incremented. We don't want to do a full unstop/unpause, because we're
2697 in all-stop mode (so we want threads stopped), but we still need to
2698 unsuspend the other threads, to decrement their `suspended' count
2699 back. */
2700 if (step_over_finished)
2701 unsuspend_all_lwps (event_child);
2702
2703 /* Stabilize threads (move out of jump pads). */
2704 stabilize_threads ();
2705 }
2706 else
2707 {
2708 /* If we just finished a step-over, then all threads had been
2709 momentarily paused. In all-stop, that's fine, we want
2710 threads stopped by now anyway. In non-stop, we need to
2711 re-resume threads that GDB wanted to be running. */
2712 if (step_over_finished)
2713 unstop_all_lwps (1, event_child);
2714 }
2715
2716 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2717
2718 if (current_inferior->last_resume_kind == resume_stop
2719 && WSTOPSIG (w) == SIGSTOP)
2720 {
2721 /* A thread that has been requested to stop by GDB with vCont;t,
2722 and it stopped cleanly, so report as SIG0. The use of
2723 SIGSTOP is an implementation detail. */
2724 ourstatus->value.sig = GDB_SIGNAL_0;
2725 }
2726 else if (current_inferior->last_resume_kind == resume_stop
2727 && WSTOPSIG (w) != SIGSTOP)
2728 {
2729 /* A thread that has been requested to stop by GDB with vCont;t,
2730 but, it stopped for other reasons. */
2731 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2732 }
2733 else
2734 {
2735 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2736 }
2737
2738 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2739
2740 if (debug_threads)
2741 {
2742 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2743 target_pid_to_str (ptid_of (event_child)),
2744 ourstatus->kind, ourstatus->value.sig);
2745 debug_exit ();
2746 }
2747
2748 return ptid_of (event_child);
2749 }
2750
2751 /* Get rid of any pending event in the pipe. */
2752 static void
2753 async_file_flush (void)
2754 {
2755 int ret;
2756 char buf;
2757
2758 do
2759 ret = read (linux_event_pipe[0], &buf, 1);
2760 while (ret >= 0 || (ret == -1 && errno == EINTR));
2761 }
2762
2763 /* Put something in the pipe, so the event loop wakes up. */
2764 static void
2765 async_file_mark (void)
2766 {
2767 int ret;
2768
2769 async_file_flush ();
2770
2771 do
2772 ret = write (linux_event_pipe[1], "+", 1);
2773 while (ret == 0 || (ret == -1 && errno == EINTR));
2774
2775 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2776 be awakened anyway. */
2777 }
2778
2779 static ptid_t
2780 linux_wait (ptid_t ptid,
2781 struct target_waitstatus *ourstatus, int target_options)
2782 {
2783 ptid_t event_ptid;
2784
2785 /* Flush the async file first. */
2786 if (target_is_async_p ())
2787 async_file_flush ();
2788
2789 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2790
2791 /* If at least one stop was reported, there may be more. A single
2792 SIGCHLD can signal more than one child stop. */
2793 if (target_is_async_p ()
2794 && (target_options & TARGET_WNOHANG) != 0
2795 && !ptid_equal (event_ptid, null_ptid))
2796 async_file_mark ();
2797
2798 return event_ptid;
2799 }
2800
2801 /* Send a signal to an LWP. */
2802
2803 static int
2804 kill_lwp (unsigned long lwpid, int signo)
2805 {
2806 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2807 fails, then we are not using nptl threads and we should be using kill. */
2808
2809 #ifdef __NR_tkill
2810 {
2811 static int tkill_failed;
2812
2813 if (!tkill_failed)
2814 {
2815 int ret;
2816
2817 errno = 0;
2818 ret = syscall (__NR_tkill, lwpid, signo);
2819 if (errno != ENOSYS)
2820 return ret;
2821 tkill_failed = 1;
2822 }
2823 }
2824 #endif
2825
2826 return kill (lwpid, signo);
2827 }
2828
2829 void
2830 linux_stop_lwp (struct lwp_info *lwp)
2831 {
2832 send_sigstop (lwp);
2833 }
2834
2835 static void
2836 send_sigstop (struct lwp_info *lwp)
2837 {
2838 int pid;
2839
2840 pid = lwpid_of (lwp);
2841
2842 /* If we already have a pending stop signal for this process, don't
2843 send another. */
2844 if (lwp->stop_expected)
2845 {
2846 if (debug_threads)
2847 debug_printf ("Have pending sigstop for lwp %d\n", pid);
2848
2849 return;
2850 }
2851
2852 if (debug_threads)
2853 debug_printf ("Sending sigstop to lwp %d\n", pid);
2854
2855 lwp->stop_expected = 1;
2856 kill_lwp (pid, SIGSTOP);
2857 }
2858
2859 static int
2860 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2861 {
2862 struct lwp_info *lwp = (struct lwp_info *) entry;
2863
2864 /* Ignore EXCEPT. */
2865 if (lwp == except)
2866 return 0;
2867
2868 if (lwp->stopped)
2869 return 0;
2870
2871 send_sigstop (lwp);
2872 return 0;
2873 }
2874
2875 /* Increment the suspend count of an LWP, and stop it, if not stopped
2876 yet. */
2877 static int
2878 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2879 void *except)
2880 {
2881 struct lwp_info *lwp = (struct lwp_info *) entry;
2882
2883 /* Ignore EXCEPT. */
2884 if (lwp == except)
2885 return 0;
2886
2887 lwp->suspended++;
2888
2889 return send_sigstop_callback (entry, except);
2890 }
2891
2892 static void
2893 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2894 {
2895 /* It's dead, really. */
2896 lwp->dead = 1;
2897
2898 /* Store the exit status for later. */
2899 lwp->status_pending_p = 1;
2900 lwp->status_pending = wstat;
2901
2902 /* Prevent trying to stop it. */
2903 lwp->stopped = 1;
2904
2905 /* No further stops are expected from a dead lwp. */
2906 lwp->stop_expected = 0;
2907 }
2908
2909 static void
2910 wait_for_sigstop (struct inferior_list_entry *entry)
2911 {
2912 struct lwp_info *lwp = (struct lwp_info *) entry;
2913 struct thread_info *saved_inferior;
2914 int wstat;
2915 ptid_t saved_tid;
2916 ptid_t ptid;
2917 int pid;
2918
2919 if (lwp->stopped)
2920 {
2921 if (debug_threads)
2922 debug_printf ("wait_for_sigstop: LWP %ld already stopped\n",
2923 lwpid_of (lwp));
2924 return;
2925 }
2926
2927 saved_inferior = current_inferior;
2928 if (saved_inferior != NULL)
2929 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2930 else
2931 saved_tid = null_ptid; /* avoid bogus unused warning */
2932
2933 ptid = lwp->head.id;
2934
2935 if (debug_threads)
2936 debug_printf ("wait_for_sigstop: pulling one event\n");
2937
2938 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2939
2940 /* If we stopped with a non-SIGSTOP signal, save it for later
2941 and record the pending SIGSTOP. If the process exited, just
2942 return. */
2943 if (WIFSTOPPED (wstat))
2944 {
2945 if (debug_threads)
2946 debug_printf ("LWP %ld stopped with signal %d\n",
2947 lwpid_of (lwp), WSTOPSIG (wstat));
2948
2949 if (WSTOPSIG (wstat) != SIGSTOP)
2950 {
2951 if (debug_threads)
2952 debug_printf ("LWP %ld stopped with non-sigstop status %06x\n",
2953 lwpid_of (lwp), wstat);
2954
2955 lwp->status_pending_p = 1;
2956 lwp->status_pending = wstat;
2957 }
2958 }
2959 else
2960 {
2961 if (debug_threads)
2962 debug_printf ("Process %d exited while stopping LWPs\n", pid);
2963
2964 lwp = find_lwp_pid (pid_to_ptid (pid));
2965 if (lwp)
2966 {
2967 /* Leave this status pending for the next time we're able to
2968 report it. In the mean time, we'll report this lwp as
2969 dead to GDB, so GDB doesn't try to read registers and
2970 memory from it. This can only happen if this was the
2971 last thread of the process; otherwise, PID is removed
2972 from the thread tables before linux_wait_for_event
2973 returns. */
2974 mark_lwp_dead (lwp, wstat);
2975 }
2976 }
2977
2978 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2979 current_inferior = saved_inferior;
2980 else
2981 {
2982 if (debug_threads)
2983 debug_printf ("Previously current thread died.\n");
2984
2985 if (non_stop)
2986 {
2987 /* We can't change the current inferior behind GDB's back,
2988 otherwise, a subsequent command may apply to the wrong
2989 process. */
2990 current_inferior = NULL;
2991 }
2992 else
2993 {
2994 /* Set a valid thread as current. */
2995 set_desired_inferior (0);
2996 }
2997 }
2998 }
2999
3000 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3001 move it out, because we need to report the stop event to GDB. For
3002 example, if the user puts a breakpoint in the jump pad, it's
3003 because she wants to debug it. */
3004
3005 static int
3006 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3007 {
3008 struct lwp_info *lwp = (struct lwp_info *) entry;
3009 struct thread_info *thread = get_lwp_thread (lwp);
3010
3011 gdb_assert (lwp->suspended == 0);
3012 gdb_assert (lwp->stopped);
3013
3014 /* Allow debugging the jump pad, gdb_collect, etc.. */
3015 return (supports_fast_tracepoints ()
3016 && agent_loaded_p ()
3017 && (gdb_breakpoint_here (lwp->stop_pc)
3018 || lwp->stopped_by_watchpoint
3019 || thread->last_resume_kind == resume_step)
3020 && linux_fast_tracepoint_collecting (lwp, NULL));
3021 }
3022
3023 static void
3024 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3025 {
3026 struct lwp_info *lwp = (struct lwp_info *) entry;
3027 struct thread_info *thread = get_lwp_thread (lwp);
3028 int *wstat;
3029
3030 gdb_assert (lwp->suspended == 0);
3031 gdb_assert (lwp->stopped);
3032
3033 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3034
3035 /* Allow debugging the jump pad, gdb_collect, etc. */
3036 if (!gdb_breakpoint_here (lwp->stop_pc)
3037 && !lwp->stopped_by_watchpoint
3038 && thread->last_resume_kind != resume_step
3039 && maybe_move_out_of_jump_pad (lwp, wstat))
3040 {
3041 if (debug_threads)
3042 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3043 lwpid_of (lwp));
3044
3045 if (wstat)
3046 {
3047 lwp->status_pending_p = 0;
3048 enqueue_one_deferred_signal (lwp, wstat);
3049
3050 if (debug_threads)
3051 debug_printf ("Signal %d for LWP %ld deferred "
3052 "(in jump pad)\n",
3053 WSTOPSIG (*wstat), lwpid_of (lwp));
3054 }
3055
3056 linux_resume_one_lwp (lwp, 0, 0, NULL);
3057 }
3058 else
3059 lwp->suspended++;
3060 }
3061
3062 static int
3063 lwp_running (struct inferior_list_entry *entry, void *data)
3064 {
3065 struct lwp_info *lwp = (struct lwp_info *) entry;
3066
3067 if (lwp->dead)
3068 return 0;
3069 if (lwp->stopped)
3070 return 0;
3071 return 1;
3072 }
3073
3074 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3075 If SUSPEND, then also increase the suspend count of every LWP,
3076 except EXCEPT. */
3077
3078 static void
3079 stop_all_lwps (int suspend, struct lwp_info *except)
3080 {
3081 /* Should not be called recursively. */
3082 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3083
3084 if (debug_threads)
3085 {
3086 debug_enter ();
3087 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3088 suspend ? "stop-and-suspend" : "stop",
3089 except != NULL
3090 ? target_pid_to_str (ptid_of (except))
3091 : "none");
3092 }
3093
3094 stopping_threads = (suspend
3095 ? STOPPING_AND_SUSPENDING_THREADS
3096 : STOPPING_THREADS);
3097
3098 if (suspend)
3099 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3100 else
3101 find_inferior (&all_lwps, send_sigstop_callback, except);
3102 for_each_inferior (&all_lwps, wait_for_sigstop);
3103 stopping_threads = NOT_STOPPING_THREADS;
3104
3105 if (debug_threads)
3106 {
3107 debug_printf ("stop_all_lwps done, setting stopping_threads "
3108 "back to !stopping\n");
3109 debug_exit ();
3110 }
3111 }
3112
3113 /* Resume execution of the inferior process.
3114 If STEP is nonzero, single-step it.
3115 If SIGNAL is nonzero, give it that signal. */
3116
3117 static void
3118 linux_resume_one_lwp (struct lwp_info *lwp,
3119 int step, int signal, siginfo_t *info)
3120 {
3121 struct thread_info *saved_inferior;
3122 int fast_tp_collecting;
3123
3124 if (lwp->stopped == 0)
3125 return;
3126
3127 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3128
3129 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3130
3131 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3132 user used the "jump" command, or "set $pc = foo"). */
3133 if (lwp->stop_pc != get_pc (lwp))
3134 {
3135 /* Collecting 'while-stepping' actions doesn't make sense
3136 anymore. */
3137 release_while_stepping_state_list (get_lwp_thread (lwp));
3138 }
3139
3140 /* If we have pending signals or status, and a new signal, enqueue the
3141 signal. Also enqueue the signal if we are waiting to reinsert a
3142 breakpoint; it will be picked up again below. */
3143 if (signal != 0
3144 && (lwp->status_pending_p
3145 || lwp->pending_signals != NULL
3146 || lwp->bp_reinsert != 0
3147 || fast_tp_collecting))
3148 {
3149 struct pending_signals *p_sig;
3150 p_sig = xmalloc (sizeof (*p_sig));
3151 p_sig->prev = lwp->pending_signals;
3152 p_sig->signal = signal;
3153 if (info == NULL)
3154 memset (&p_sig->info, 0, sizeof (siginfo_t));
3155 else
3156 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3157 lwp->pending_signals = p_sig;
3158 }
3159
3160 if (lwp->status_pending_p)
3161 {
3162 if (debug_threads)
3163 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3164 " has pending status\n",
3165 lwpid_of (lwp), step ? "step" : "continue", signal,
3166 lwp->stop_expected ? "expected" : "not expected");
3167 return;
3168 }
3169
3170 saved_inferior = current_inferior;
3171 current_inferior = get_lwp_thread (lwp);
3172
3173 if (debug_threads)
3174 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3175 lwpid_of (lwp), step ? "step" : "continue", signal,
3176 lwp->stop_expected ? "expected" : "not expected");
3177
3178 /* This bit needs some thinking about. If we get a signal that
3179 we must report while a single-step reinsert is still pending,
3180 we often end up resuming the thread. It might be better to
3181 (ew) allow a stack of pending events; then we could be sure that
3182 the reinsert happened right away and not lose any signals.
3183
3184 Making this stack would also shrink the window in which breakpoints are
3185 uninserted (see comment in linux_wait_for_lwp) but not enough for
3186 complete correctness, so it won't solve that problem. It may be
3187 worthwhile just to solve this one, however. */
3188 if (lwp->bp_reinsert != 0)
3189 {
3190 if (debug_threads)
3191 debug_printf (" pending reinsert at 0x%s\n",
3192 paddress (lwp->bp_reinsert));
3193
3194 if (can_hardware_single_step ())
3195 {
3196 if (fast_tp_collecting == 0)
3197 {
3198 if (step == 0)
3199 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3200 if (lwp->suspended)
3201 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3202 lwp->suspended);
3203 }
3204
3205 step = 1;
3206 }
3207
3208 /* Postpone any pending signal. It was enqueued above. */
3209 signal = 0;
3210 }
3211
3212 if (fast_tp_collecting == 1)
3213 {
3214 if (debug_threads)
3215 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3216 " (exit-jump-pad-bkpt)\n",
3217 lwpid_of (lwp));
3218
3219 /* Postpone any pending signal. It was enqueued above. */
3220 signal = 0;
3221 }
3222 else if (fast_tp_collecting == 2)
3223 {
3224 if (debug_threads)
3225 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3226 " single-stepping\n",
3227 lwpid_of (lwp));
3228
3229 if (can_hardware_single_step ())
3230 step = 1;
3231 else
3232 fatal ("moving out of jump pad single-stepping"
3233 " not implemented on this target");
3234
3235 /* Postpone any pending signal. It was enqueued above. */
3236 signal = 0;
3237 }
3238
3239 /* If we have while-stepping actions in this thread set it stepping.
3240 If we have a signal to deliver, it may or may not be set to
3241 SIG_IGN, we don't know. Assume so, and allow collecting
3242 while-stepping into a signal handler. A possible smart thing to
3243 do would be to set an internal breakpoint at the signal return
3244 address, continue, and carry on catching this while-stepping
3245 action only when that breakpoint is hit. A future
3246 enhancement. */
3247 if (get_lwp_thread (lwp)->while_stepping != NULL
3248 && can_hardware_single_step ())
3249 {
3250 if (debug_threads)
3251 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3252 lwpid_of (lwp));
3253 step = 1;
3254 }
3255
3256 if (debug_threads && the_low_target.get_pc != NULL)
3257 {
3258 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3259 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3260 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3261 }
3262
3263 /* If we have pending signals, consume one unless we are trying to
3264 reinsert a breakpoint or we're trying to finish a fast tracepoint
3265 collect. */
3266 if (lwp->pending_signals != NULL
3267 && lwp->bp_reinsert == 0
3268 && fast_tp_collecting == 0)
3269 {
3270 struct pending_signals **p_sig;
3271
3272 p_sig = &lwp->pending_signals;
3273 while ((*p_sig)->prev != NULL)
3274 p_sig = &(*p_sig)->prev;
3275
3276 signal = (*p_sig)->signal;
3277 if ((*p_sig)->info.si_signo != 0)
3278 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3279 &(*p_sig)->info);
3280
3281 free (*p_sig);
3282 *p_sig = NULL;
3283 }
3284
3285 if (the_low_target.prepare_to_resume != NULL)
3286 the_low_target.prepare_to_resume (lwp);
3287
3288 regcache_invalidate_thread (get_lwp_thread (lwp));
3289 errno = 0;
3290 lwp->stopped = 0;
3291 lwp->stopped_by_watchpoint = 0;
3292 lwp->stepping = step;
3293 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3294 (PTRACE_TYPE_ARG3) 0,
3295 /* Coerce to a uintptr_t first to avoid potential gcc warning
3296 of coercing an 8 byte integer to a 4 byte pointer. */
3297 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3298
3299 current_inferior = saved_inferior;
3300 if (errno)
3301 {
3302 /* ESRCH from ptrace either means that the thread was already
3303 running (an error) or that it is gone (a race condition). If
3304 it's gone, we will get a notification the next time we wait,
3305 so we can ignore the error. We could differentiate these
3306 two, but it's tricky without waiting; the thread still exists
3307 as a zombie, so sending it signal 0 would succeed. So just
3308 ignore ESRCH. */
3309 if (errno == ESRCH)
3310 return;
3311
3312 perror_with_name ("ptrace");
3313 }
3314 }
3315
3316 struct thread_resume_array
3317 {
3318 struct thread_resume *resume;
3319 size_t n;
3320 };
3321
3322 /* This function is called once per thread via find_inferior.
3323 ARG is a pointer to a thread_resume_array struct.
3324 We look up the thread specified by ENTRY in ARG, and mark the thread
3325 with a pointer to the appropriate resume request.
3326
3327 This algorithm is O(threads * resume elements), but resume elements
3328 is small (and will remain small at least until GDB supports thread
3329 suspension). */
3330
3331 static int
3332 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3333 {
3334 struct lwp_info *lwp;
3335 struct thread_info *thread;
3336 int ndx;
3337 struct thread_resume_array *r;
3338
3339 thread = (struct thread_info *) entry;
3340 lwp = get_thread_lwp (thread);
3341 r = arg;
3342
3343 for (ndx = 0; ndx < r->n; ndx++)
3344 {
3345 ptid_t ptid = r->resume[ndx].thread;
3346 if (ptid_equal (ptid, minus_one_ptid)
3347 || ptid_equal (ptid, entry->id)
3348 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3349 of PID'. */
3350 || (ptid_get_pid (ptid) == pid_of (lwp)
3351 && (ptid_is_pid (ptid)
3352 || ptid_get_lwp (ptid) == -1)))
3353 {
3354 if (r->resume[ndx].kind == resume_stop
3355 && thread->last_resume_kind == resume_stop)
3356 {
3357 if (debug_threads)
3358 debug_printf ("already %s LWP %ld at GDB's request\n",
3359 (thread->last_status.kind
3360 == TARGET_WAITKIND_STOPPED)
3361 ? "stopped"
3362 : "stopping",
3363 lwpid_of (lwp));
3364
3365 continue;
3366 }
3367
3368 lwp->resume = &r->resume[ndx];
3369 thread->last_resume_kind = lwp->resume->kind;
3370
3371 lwp->step_range_start = lwp->resume->step_range_start;
3372 lwp->step_range_end = lwp->resume->step_range_end;
3373
3374 /* If we had a deferred signal to report, dequeue one now.
3375 This can happen if LWP gets more than one signal while
3376 trying to get out of a jump pad. */
3377 if (lwp->stopped
3378 && !lwp->status_pending_p
3379 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3380 {
3381 lwp->status_pending_p = 1;
3382
3383 if (debug_threads)
3384 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3385 "leaving status pending.\n",
3386 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3387 }
3388
3389 return 0;
3390 }
3391 }
3392
3393 /* No resume action for this thread. */
3394 lwp->resume = NULL;
3395
3396 return 0;
3397 }
3398
3399 /* find_inferior callback for linux_resume.
3400 Set *FLAG_P if this lwp has an interesting status pending. */
3401
3402 static int
3403 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3404 {
3405 struct lwp_info *lwp = (struct lwp_info *) entry;
3406
3407 /* LWPs which will not be resumed are not interesting, because
3408 we might not wait for them next time through linux_wait. */
3409 if (lwp->resume == NULL)
3410 return 0;
3411
3412 if (lwp->status_pending_p)
3413 * (int *) flag_p = 1;
3414
3415 return 0;
3416 }
3417
3418 /* Return 1 if this lwp that GDB wants running is stopped at an
3419 internal breakpoint that we need to step over. It assumes that any
3420 required STOP_PC adjustment has already been propagated to the
3421 inferior's regcache. */
3422
3423 static int
3424 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3425 {
3426 struct lwp_info *lwp = (struct lwp_info *) entry;
3427 struct thread_info *thread;
3428 struct thread_info *saved_inferior;
3429 CORE_ADDR pc;
3430
3431 /* LWPs which will not be resumed are not interesting, because we
3432 might not wait for them next time through linux_wait. */
3433
3434 if (!lwp->stopped)
3435 {
3436 if (debug_threads)
3437 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3438 lwpid_of (lwp));
3439 return 0;
3440 }
3441
3442 thread = get_lwp_thread (lwp);
3443
3444 if (thread->last_resume_kind == resume_stop)
3445 {
3446 if (debug_threads)
3447 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3448 " stopped\n",
3449 lwpid_of (lwp));
3450 return 0;
3451 }
3452
3453 gdb_assert (lwp->suspended >= 0);
3454
3455 if (lwp->suspended)
3456 {
3457 if (debug_threads)
3458 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3459 lwpid_of (lwp));
3460 return 0;
3461 }
3462
3463 if (!lwp->need_step_over)
3464 {
3465 if (debug_threads)
3466 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3467 }
3468
3469 if (lwp->status_pending_p)
3470 {
3471 if (debug_threads)
3472 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3473 " status.\n",
3474 lwpid_of (lwp));
3475 return 0;
3476 }
3477
3478 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3479 or we have. */
3480 pc = get_pc (lwp);
3481
3482 /* If the PC has changed since we stopped, then don't do anything,
3483 and let the breakpoint/tracepoint be hit. This happens if, for
3484 instance, GDB handled the decr_pc_after_break subtraction itself,
3485 GDB is OOL stepping this thread, or the user has issued a "jump"
3486 command, or poked thread's registers herself. */
3487 if (pc != lwp->stop_pc)
3488 {
3489 if (debug_threads)
3490 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3491 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3492 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3493
3494 lwp->need_step_over = 0;
3495 return 0;
3496 }
3497
3498 saved_inferior = current_inferior;
3499 current_inferior = thread;
3500
3501 /* We can only step over breakpoints we know about. */
3502 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3503 {
3504 /* Don't step over a breakpoint that GDB expects to hit
3505 though. If the condition is being evaluated on the target's side
3506 and it evaluate to false, step over this breakpoint as well. */
3507 if (gdb_breakpoint_here (pc)
3508 && gdb_condition_true_at_breakpoint (pc)
3509 && gdb_no_commands_at_breakpoint (pc))
3510 {
3511 if (debug_threads)
3512 debug_printf ("Need step over [LWP %ld]? yes, but found"
3513 " GDB breakpoint at 0x%s; skipping step over\n",
3514 lwpid_of (lwp), paddress (pc));
3515
3516 current_inferior = saved_inferior;
3517 return 0;
3518 }
3519 else
3520 {
3521 if (debug_threads)
3522 debug_printf ("Need step over [LWP %ld]? yes, "
3523 "found breakpoint at 0x%s\n",
3524 lwpid_of (lwp), paddress (pc));
3525
3526 /* We've found an lwp that needs stepping over --- return 1 so
3527 that find_inferior stops looking. */
3528 current_inferior = saved_inferior;
3529
3530 /* If the step over is cancelled, this is set again. */
3531 lwp->need_step_over = 0;
3532 return 1;
3533 }
3534 }
3535
3536 current_inferior = saved_inferior;
3537
3538 if (debug_threads)
3539 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3540 " at 0x%s\n",
3541 lwpid_of (lwp), paddress (pc));
3542
3543 return 0;
3544 }
3545
3546 /* Start a step-over operation on LWP. When LWP stopped at a
3547 breakpoint, to make progress, we need to remove the breakpoint out
3548 of the way. If we let other threads run while we do that, they may
3549 pass by the breakpoint location and miss hitting it. To avoid
3550 that, a step-over momentarily stops all threads while LWP is
3551 single-stepped while the breakpoint is temporarily uninserted from
3552 the inferior. When the single-step finishes, we reinsert the
3553 breakpoint, and let all threads that are supposed to be running,
3554 run again.
3555
3556 On targets that don't support hardware single-step, we don't
3557 currently support full software single-stepping. Instead, we only
3558 support stepping over the thread event breakpoint, by asking the
3559 low target where to place a reinsert breakpoint. Since this
3560 routine assumes the breakpoint being stepped over is a thread event
3561 breakpoint, it usually assumes the return address of the current
3562 function is a good enough place to set the reinsert breakpoint. */
3563
3564 static int
3565 start_step_over (struct lwp_info *lwp)
3566 {
3567 struct thread_info *saved_inferior;
3568 CORE_ADDR pc;
3569 int step;
3570
3571 if (debug_threads)
3572 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3573 lwpid_of (lwp));
3574
3575 stop_all_lwps (1, lwp);
3576 gdb_assert (lwp->suspended == 0);
3577
3578 if (debug_threads)
3579 debug_printf ("Done stopping all threads for step-over.\n");
3580
3581 /* Note, we should always reach here with an already adjusted PC,
3582 either by GDB (if we're resuming due to GDB's request), or by our
3583 caller, if we just finished handling an internal breakpoint GDB
3584 shouldn't care about. */
3585 pc = get_pc (lwp);
3586
3587 saved_inferior = current_inferior;
3588 current_inferior = get_lwp_thread (lwp);
3589
3590 lwp->bp_reinsert = pc;
3591 uninsert_breakpoints_at (pc);
3592 uninsert_fast_tracepoint_jumps_at (pc);
3593
3594 if (can_hardware_single_step ())
3595 {
3596 step = 1;
3597 }
3598 else
3599 {
3600 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3601 set_reinsert_breakpoint (raddr);
3602 step = 0;
3603 }
3604
3605 current_inferior = saved_inferior;
3606
3607 linux_resume_one_lwp (lwp, step, 0, NULL);
3608
3609 /* Require next event from this LWP. */
3610 step_over_bkpt = lwp->head.id;
3611 return 1;
3612 }
3613
3614 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3615 start_step_over, if still there, and delete any reinsert
3616 breakpoints we've set, on non hardware single-step targets. */
3617
3618 static int
3619 finish_step_over (struct lwp_info *lwp)
3620 {
3621 if (lwp->bp_reinsert != 0)
3622 {
3623 if (debug_threads)
3624 debug_printf ("Finished step over.\n");
3625
3626 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3627 may be no breakpoint to reinsert there by now. */
3628 reinsert_breakpoints_at (lwp->bp_reinsert);
3629 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3630
3631 lwp->bp_reinsert = 0;
3632
3633 /* Delete any software-single-step reinsert breakpoints. No
3634 longer needed. We don't have to worry about other threads
3635 hitting this trap, and later not being able to explain it,
3636 because we were stepping over a breakpoint, and we hold all
3637 threads but LWP stopped while doing that. */
3638 if (!can_hardware_single_step ())
3639 delete_reinsert_breakpoints ();
3640
3641 step_over_bkpt = null_ptid;
3642 return 1;
3643 }
3644 else
3645 return 0;
3646 }
3647
3648 /* This function is called once per thread. We check the thread's resume
3649 request, which will tell us whether to resume, step, or leave the thread
3650 stopped; and what signal, if any, it should be sent.
3651
3652 For threads which we aren't explicitly told otherwise, we preserve
3653 the stepping flag; this is used for stepping over gdbserver-placed
3654 breakpoints.
3655
3656 If pending_flags was set in any thread, we queue any needed
3657 signals, since we won't actually resume. We already have a pending
3658 event to report, so we don't need to preserve any step requests;
3659 they should be re-issued if necessary. */
3660
3661 static int
3662 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3663 {
3664 struct lwp_info *lwp;
3665 struct thread_info *thread;
3666 int step;
3667 int leave_all_stopped = * (int *) arg;
3668 int leave_pending;
3669
3670 thread = (struct thread_info *) entry;
3671 lwp = get_thread_lwp (thread);
3672
3673 if (lwp->resume == NULL)
3674 return 0;
3675
3676 if (lwp->resume->kind == resume_stop)
3677 {
3678 if (debug_threads)
3679 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (lwp));
3680
3681 if (!lwp->stopped)
3682 {
3683 if (debug_threads)
3684 debug_printf ("stopping LWP %ld\n", lwpid_of (lwp));
3685
3686 /* Stop the thread, and wait for the event asynchronously,
3687 through the event loop. */
3688 send_sigstop (lwp);
3689 }
3690 else
3691 {
3692 if (debug_threads)
3693 debug_printf ("already stopped LWP %ld\n",
3694 lwpid_of (lwp));
3695
3696 /* The LWP may have been stopped in an internal event that
3697 was not meant to be notified back to GDB (e.g., gdbserver
3698 breakpoint), so we should be reporting a stop event in
3699 this case too. */
3700
3701 /* If the thread already has a pending SIGSTOP, this is a
3702 no-op. Otherwise, something later will presumably resume
3703 the thread and this will cause it to cancel any pending
3704 operation, due to last_resume_kind == resume_stop. If
3705 the thread already has a pending status to report, we
3706 will still report it the next time we wait - see
3707 status_pending_p_callback. */
3708
3709 /* If we already have a pending signal to report, then
3710 there's no need to queue a SIGSTOP, as this means we're
3711 midway through moving the LWP out of the jumppad, and we
3712 will report the pending signal as soon as that is
3713 finished. */
3714 if (lwp->pending_signals_to_report == NULL)
3715 send_sigstop (lwp);
3716 }
3717
3718 /* For stop requests, we're done. */
3719 lwp->resume = NULL;
3720 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3721 return 0;
3722 }
3723
3724 /* If this thread which is about to be resumed has a pending status,
3725 then don't resume any threads - we can just report the pending
3726 status. Make sure to queue any signals that would otherwise be
3727 sent. In all-stop mode, we do this decision based on if *any*
3728 thread has a pending status. If there's a thread that needs the
3729 step-over-breakpoint dance, then don't resume any other thread
3730 but that particular one. */
3731 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3732
3733 if (!leave_pending)
3734 {
3735 if (debug_threads)
3736 debug_printf ("resuming LWP %ld\n", lwpid_of (lwp));
3737
3738 step = (lwp->resume->kind == resume_step);
3739 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3740 }
3741 else
3742 {
3743 if (debug_threads)
3744 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (lwp));
3745
3746 /* If we have a new signal, enqueue the signal. */
3747 if (lwp->resume->sig != 0)
3748 {
3749 struct pending_signals *p_sig;
3750 p_sig = xmalloc (sizeof (*p_sig));
3751 p_sig->prev = lwp->pending_signals;
3752 p_sig->signal = lwp->resume->sig;
3753 memset (&p_sig->info, 0, sizeof (siginfo_t));
3754
3755 /* If this is the same signal we were previously stopped by,
3756 make sure to queue its siginfo. We can ignore the return
3757 value of ptrace; if it fails, we'll skip
3758 PTRACE_SETSIGINFO. */
3759 if (WIFSTOPPED (lwp->last_status)
3760 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3761 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3762 &p_sig->info);
3763
3764 lwp->pending_signals = p_sig;
3765 }
3766 }
3767
3768 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3769 lwp->resume = NULL;
3770 return 0;
3771 }
3772
3773 static void
3774 linux_resume (struct thread_resume *resume_info, size_t n)
3775 {
3776 struct thread_resume_array array = { resume_info, n };
3777 struct lwp_info *need_step_over = NULL;
3778 int any_pending;
3779 int leave_all_stopped;
3780
3781 if (debug_threads)
3782 {
3783 debug_enter ();
3784 debug_printf ("linux_resume:\n");
3785 }
3786
3787 find_inferior (&all_threads, linux_set_resume_request, &array);
3788
3789 /* If there is a thread which would otherwise be resumed, which has
3790 a pending status, then don't resume any threads - we can just
3791 report the pending status. Make sure to queue any signals that
3792 would otherwise be sent. In non-stop mode, we'll apply this
3793 logic to each thread individually. We consume all pending events
3794 before considering to start a step-over (in all-stop). */
3795 any_pending = 0;
3796 if (!non_stop)
3797 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3798
3799 /* If there is a thread which would otherwise be resumed, which is
3800 stopped at a breakpoint that needs stepping over, then don't
3801 resume any threads - have it step over the breakpoint with all
3802 other threads stopped, then resume all threads again. Make sure
3803 to queue any signals that would otherwise be delivered or
3804 queued. */
3805 if (!any_pending && supports_breakpoints ())
3806 need_step_over
3807 = (struct lwp_info *) find_inferior (&all_lwps,
3808 need_step_over_p, NULL);
3809
3810 leave_all_stopped = (need_step_over != NULL || any_pending);
3811
3812 if (debug_threads)
3813 {
3814 if (need_step_over != NULL)
3815 debug_printf ("Not resuming all, need step over\n");
3816 else if (any_pending)
3817 debug_printf ("Not resuming, all-stop and found "
3818 "an LWP with pending status\n");
3819 else
3820 debug_printf ("Resuming, no pending status or step over needed\n");
3821 }
3822
3823 /* Even if we're leaving threads stopped, queue all signals we'd
3824 otherwise deliver. */
3825 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3826
3827 if (need_step_over)
3828 start_step_over (need_step_over);
3829
3830 if (debug_threads)
3831 {
3832 debug_printf ("linux_resume done\n");
3833 debug_exit ();
3834 }
3835 }
3836
3837 /* This function is called once per thread. We check the thread's
3838 last resume request, which will tell us whether to resume, step, or
3839 leave the thread stopped. Any signal the client requested to be
3840 delivered has already been enqueued at this point.
3841
3842 If any thread that GDB wants running is stopped at an internal
3843 breakpoint that needs stepping over, we start a step-over operation
3844 on that particular thread, and leave all others stopped. */
3845
3846 static int
3847 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3848 {
3849 struct lwp_info *lwp = (struct lwp_info *) entry;
3850 struct thread_info *thread;
3851 int step;
3852
3853 if (lwp == except)
3854 return 0;
3855
3856 if (debug_threads)
3857 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3858
3859 if (!lwp->stopped)
3860 {
3861 if (debug_threads)
3862 debug_printf (" LWP %ld already running\n", lwpid_of (lwp));
3863 return 0;
3864 }
3865
3866 thread = get_lwp_thread (lwp);
3867
3868 if (thread->last_resume_kind == resume_stop
3869 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3870 {
3871 if (debug_threads)
3872 debug_printf (" client wants LWP to remain %ld stopped\n",
3873 lwpid_of (lwp));
3874 return 0;
3875 }
3876
3877 if (lwp->status_pending_p)
3878 {
3879 if (debug_threads)
3880 debug_printf (" LWP %ld has pending status, leaving stopped\n",
3881 lwpid_of (lwp));
3882 return 0;
3883 }
3884
3885 gdb_assert (lwp->suspended >= 0);
3886
3887 if (lwp->suspended)
3888 {
3889 if (debug_threads)
3890 debug_printf (" LWP %ld is suspended\n", lwpid_of (lwp));
3891 return 0;
3892 }
3893
3894 if (thread->last_resume_kind == resume_stop
3895 && lwp->pending_signals_to_report == NULL
3896 && lwp->collecting_fast_tracepoint == 0)
3897 {
3898 /* We haven't reported this LWP as stopped yet (otherwise, the
3899 last_status.kind check above would catch it, and we wouldn't
3900 reach here. This LWP may have been momentarily paused by a
3901 stop_all_lwps call while handling for example, another LWP's
3902 step-over. In that case, the pending expected SIGSTOP signal
3903 that was queued at vCont;t handling time will have already
3904 been consumed by wait_for_sigstop, and so we need to requeue
3905 another one here. Note that if the LWP already has a SIGSTOP
3906 pending, this is a no-op. */
3907
3908 if (debug_threads)
3909 debug_printf ("Client wants LWP %ld to stop. "
3910 "Making sure it has a SIGSTOP pending\n",
3911 lwpid_of (lwp));
3912
3913 send_sigstop (lwp);
3914 }
3915
3916 step = thread->last_resume_kind == resume_step;
3917 linux_resume_one_lwp (lwp, step, 0, NULL);
3918 return 0;
3919 }
3920
3921 static int
3922 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3923 {
3924 struct lwp_info *lwp = (struct lwp_info *) entry;
3925
3926 if (lwp == except)
3927 return 0;
3928
3929 lwp->suspended--;
3930 gdb_assert (lwp->suspended >= 0);
3931
3932 return proceed_one_lwp (entry, except);
3933 }
3934
3935 /* When we finish a step-over, set threads running again. If there's
3936 another thread that may need a step-over, now's the time to start
3937 it. Eventually, we'll move all threads past their breakpoints. */
3938
3939 static void
3940 proceed_all_lwps (void)
3941 {
3942 struct lwp_info *need_step_over;
3943
3944 /* If there is a thread which would otherwise be resumed, which is
3945 stopped at a breakpoint that needs stepping over, then don't
3946 resume any threads - have it step over the breakpoint with all
3947 other threads stopped, then resume all threads again. */
3948
3949 if (supports_breakpoints ())
3950 {
3951 need_step_over
3952 = (struct lwp_info *) find_inferior (&all_lwps,
3953 need_step_over_p, NULL);
3954
3955 if (need_step_over != NULL)
3956 {
3957 if (debug_threads)
3958 debug_printf ("proceed_all_lwps: found "
3959 "thread %ld needing a step-over\n",
3960 lwpid_of (need_step_over));
3961
3962 start_step_over (need_step_over);
3963 return;
3964 }
3965 }
3966
3967 if (debug_threads)
3968 debug_printf ("Proceeding, no step-over needed\n");
3969
3970 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3971 }
3972
3973 /* Stopped LWPs that the client wanted to be running, that don't have
3974 pending statuses, are set to run again, except for EXCEPT, if not
3975 NULL. This undoes a stop_all_lwps call. */
3976
3977 static void
3978 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3979 {
3980 if (debug_threads)
3981 {
3982 debug_enter ();
3983 if (except)
3984 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
3985 lwpid_of (except));
3986 else
3987 debug_printf ("unstopping all lwps\n");
3988 }
3989
3990 if (unsuspend)
3991 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3992 else
3993 find_inferior (&all_lwps, proceed_one_lwp, except);
3994
3995 if (debug_threads)
3996 {
3997 debug_printf ("unstop_all_lwps done\n");
3998 debug_exit ();
3999 }
4000 }
4001
4002
4003 #ifdef HAVE_LINUX_REGSETS
4004
4005 #define use_linux_regsets 1
4006
4007 /* Returns true if REGSET has been disabled. */
4008
4009 static int
4010 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4011 {
4012 return (info->disabled_regsets != NULL
4013 && info->disabled_regsets[regset - info->regsets]);
4014 }
4015
4016 /* Disable REGSET. */
4017
4018 static void
4019 disable_regset (struct regsets_info *info, struct regset_info *regset)
4020 {
4021 int dr_offset;
4022
4023 dr_offset = regset - info->regsets;
4024 if (info->disabled_regsets == NULL)
4025 info->disabled_regsets = xcalloc (1, info->num_regsets);
4026 info->disabled_regsets[dr_offset] = 1;
4027 }
4028
4029 static int
4030 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4031 struct regcache *regcache)
4032 {
4033 struct regset_info *regset;
4034 int saw_general_regs = 0;
4035 int pid;
4036 struct iovec iov;
4037
4038 regset = regsets_info->regsets;
4039
4040 pid = lwpid_of (get_thread_lwp (current_inferior));
4041 while (regset->size >= 0)
4042 {
4043 void *buf, *data;
4044 int nt_type, res;
4045
4046 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4047 {
4048 regset ++;
4049 continue;
4050 }
4051
4052 buf = xmalloc (regset->size);
4053
4054 nt_type = regset->nt_type;
4055 if (nt_type)
4056 {
4057 iov.iov_base = buf;
4058 iov.iov_len = regset->size;
4059 data = (void *) &iov;
4060 }
4061 else
4062 data = buf;
4063
4064 #ifndef __sparc__
4065 res = ptrace (regset->get_request, pid,
4066 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4067 #else
4068 res = ptrace (regset->get_request, pid, data, nt_type);
4069 #endif
4070 if (res < 0)
4071 {
4072 if (errno == EIO)
4073 {
4074 /* If we get EIO on a regset, do not try it again for
4075 this process mode. */
4076 disable_regset (regsets_info, regset);
4077 free (buf);
4078 continue;
4079 }
4080 else
4081 {
4082 char s[256];
4083 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4084 pid);
4085 perror (s);
4086 }
4087 }
4088 else if (regset->type == GENERAL_REGS)
4089 saw_general_regs = 1;
4090 regset->store_function (regcache, buf);
4091 regset ++;
4092 free (buf);
4093 }
4094 if (saw_general_regs)
4095 return 0;
4096 else
4097 return 1;
4098 }
4099
4100 static int
4101 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4102 struct regcache *regcache)
4103 {
4104 struct regset_info *regset;
4105 int saw_general_regs = 0;
4106 int pid;
4107 struct iovec iov;
4108
4109 regset = regsets_info->regsets;
4110
4111 pid = lwpid_of (get_thread_lwp (current_inferior));
4112 while (regset->size >= 0)
4113 {
4114 void *buf, *data;
4115 int nt_type, res;
4116
4117 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4118 {
4119 regset ++;
4120 continue;
4121 }
4122
4123 buf = xmalloc (regset->size);
4124
4125 /* First fill the buffer with the current register set contents,
4126 in case there are any items in the kernel's regset that are
4127 not in gdbserver's regcache. */
4128
4129 nt_type = regset->nt_type;
4130 if (nt_type)
4131 {
4132 iov.iov_base = buf;
4133 iov.iov_len = regset->size;
4134 data = (void *) &iov;
4135 }
4136 else
4137 data = buf;
4138
4139 #ifndef __sparc__
4140 res = ptrace (regset->get_request, pid,
4141 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4142 #else
4143 res = ptrace (regset->get_request, pid, data, nt_type);
4144 #endif
4145
4146 if (res == 0)
4147 {
4148 /* Then overlay our cached registers on that. */
4149 regset->fill_function (regcache, buf);
4150
4151 /* Only now do we write the register set. */
4152 #ifndef __sparc__
4153 res = ptrace (regset->set_request, pid,
4154 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4155 #else
4156 res = ptrace (regset->set_request, pid, data, nt_type);
4157 #endif
4158 }
4159
4160 if (res < 0)
4161 {
4162 if (errno == EIO)
4163 {
4164 /* If we get EIO on a regset, do not try it again for
4165 this process mode. */
4166 disable_regset (regsets_info, regset);
4167 free (buf);
4168 continue;
4169 }
4170 else if (errno == ESRCH)
4171 {
4172 /* At this point, ESRCH should mean the process is
4173 already gone, in which case we simply ignore attempts
4174 to change its registers. See also the related
4175 comment in linux_resume_one_lwp. */
4176 free (buf);
4177 return 0;
4178 }
4179 else
4180 {
4181 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4182 }
4183 }
4184 else if (regset->type == GENERAL_REGS)
4185 saw_general_regs = 1;
4186 regset ++;
4187 free (buf);
4188 }
4189 if (saw_general_regs)
4190 return 0;
4191 else
4192 return 1;
4193 }
4194
4195 #else /* !HAVE_LINUX_REGSETS */
4196
4197 #define use_linux_regsets 0
4198 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4199 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4200
4201 #endif
4202
4203 /* Return 1 if register REGNO is supported by one of the regset ptrace
4204 calls or 0 if it has to be transferred individually. */
4205
4206 static int
4207 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4208 {
4209 unsigned char mask = 1 << (regno % 8);
4210 size_t index = regno / 8;
4211
4212 return (use_linux_regsets
4213 && (regs_info->regset_bitmap == NULL
4214 || (regs_info->regset_bitmap[index] & mask) != 0));
4215 }
4216
4217 #ifdef HAVE_LINUX_USRREGS
4218
4219 int
4220 register_addr (const struct usrregs_info *usrregs, int regnum)
4221 {
4222 int addr;
4223
4224 if (regnum < 0 || regnum >= usrregs->num_regs)
4225 error ("Invalid register number %d.", regnum);
4226
4227 addr = usrregs->regmap[regnum];
4228
4229 return addr;
4230 }
4231
4232 /* Fetch one register. */
4233 static void
4234 fetch_register (const struct usrregs_info *usrregs,
4235 struct regcache *regcache, int regno)
4236 {
4237 CORE_ADDR regaddr;
4238 int i, size;
4239 char *buf;
4240 int pid;
4241
4242 if (regno >= usrregs->num_regs)
4243 return;
4244 if ((*the_low_target.cannot_fetch_register) (regno))
4245 return;
4246
4247 regaddr = register_addr (usrregs, regno);
4248 if (regaddr == -1)
4249 return;
4250
4251 size = ((register_size (regcache->tdesc, regno)
4252 + sizeof (PTRACE_XFER_TYPE) - 1)
4253 & -sizeof (PTRACE_XFER_TYPE));
4254 buf = alloca (size);
4255
4256 pid = lwpid_of (get_thread_lwp (current_inferior));
4257 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4258 {
4259 errno = 0;
4260 *(PTRACE_XFER_TYPE *) (buf + i) =
4261 ptrace (PTRACE_PEEKUSER, pid,
4262 /* Coerce to a uintptr_t first to avoid potential gcc warning
4263 of coercing an 8 byte integer to a 4 byte pointer. */
4264 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4265 regaddr += sizeof (PTRACE_XFER_TYPE);
4266 if (errno != 0)
4267 error ("reading register %d: %s", regno, strerror (errno));
4268 }
4269
4270 if (the_low_target.supply_ptrace_register)
4271 the_low_target.supply_ptrace_register (regcache, regno, buf);
4272 else
4273 supply_register (regcache, regno, buf);
4274 }
4275
4276 /* Store one register. */
4277 static void
4278 store_register (const struct usrregs_info *usrregs,
4279 struct regcache *regcache, int regno)
4280 {
4281 CORE_ADDR regaddr;
4282 int i, size;
4283 char *buf;
4284 int pid;
4285
4286 if (regno >= usrregs->num_regs)
4287 return;
4288 if ((*the_low_target.cannot_store_register) (regno))
4289 return;
4290
4291 regaddr = register_addr (usrregs, regno);
4292 if (regaddr == -1)
4293 return;
4294
4295 size = ((register_size (regcache->tdesc, regno)
4296 + sizeof (PTRACE_XFER_TYPE) - 1)
4297 & -sizeof (PTRACE_XFER_TYPE));
4298 buf = alloca (size);
4299 memset (buf, 0, size);
4300
4301 if (the_low_target.collect_ptrace_register)
4302 the_low_target.collect_ptrace_register (regcache, regno, buf);
4303 else
4304 collect_register (regcache, regno, buf);
4305
4306 pid = lwpid_of (get_thread_lwp (current_inferior));
4307 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4308 {
4309 errno = 0;
4310 ptrace (PTRACE_POKEUSER, pid,
4311 /* Coerce to a uintptr_t first to avoid potential gcc warning
4312 about coercing an 8 byte integer to a 4 byte pointer. */
4313 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4314 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4315 if (errno != 0)
4316 {
4317 /* At this point, ESRCH should mean the process is
4318 already gone, in which case we simply ignore attempts
4319 to change its registers. See also the related
4320 comment in linux_resume_one_lwp. */
4321 if (errno == ESRCH)
4322 return;
4323
4324 if ((*the_low_target.cannot_store_register) (regno) == 0)
4325 error ("writing register %d: %s", regno, strerror (errno));
4326 }
4327 regaddr += sizeof (PTRACE_XFER_TYPE);
4328 }
4329 }
4330
4331 /* Fetch all registers, or just one, from the child process.
4332 If REGNO is -1, do this for all registers, skipping any that are
4333 assumed to have been retrieved by regsets_fetch_inferior_registers,
4334 unless ALL is non-zero.
4335 Otherwise, REGNO specifies which register (so we can save time). */
4336 static void
4337 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4338 struct regcache *regcache, int regno, int all)
4339 {
4340 struct usrregs_info *usr = regs_info->usrregs;
4341
4342 if (regno == -1)
4343 {
4344 for (regno = 0; regno < usr->num_regs; regno++)
4345 if (all || !linux_register_in_regsets (regs_info, regno))
4346 fetch_register (usr, regcache, regno);
4347 }
4348 else
4349 fetch_register (usr, regcache, regno);
4350 }
4351
4352 /* Store our register values back into the inferior.
4353 If REGNO is -1, do this for all registers, skipping any that are
4354 assumed to have been saved by regsets_store_inferior_registers,
4355 unless ALL is non-zero.
4356 Otherwise, REGNO specifies which register (so we can save time). */
4357 static void
4358 usr_store_inferior_registers (const struct regs_info *regs_info,
4359 struct regcache *regcache, int regno, int all)
4360 {
4361 struct usrregs_info *usr = regs_info->usrregs;
4362
4363 if (regno == -1)
4364 {
4365 for (regno = 0; regno < usr->num_regs; regno++)
4366 if (all || !linux_register_in_regsets (regs_info, regno))
4367 store_register (usr, regcache, regno);
4368 }
4369 else
4370 store_register (usr, regcache, regno);
4371 }
4372
4373 #else /* !HAVE_LINUX_USRREGS */
4374
4375 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4376 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4377
4378 #endif
4379
4380
4381 void
4382 linux_fetch_registers (struct regcache *regcache, int regno)
4383 {
4384 int use_regsets;
4385 int all = 0;
4386 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4387
4388 if (regno == -1)
4389 {
4390 if (the_low_target.fetch_register != NULL
4391 && regs_info->usrregs != NULL)
4392 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4393 (*the_low_target.fetch_register) (regcache, regno);
4394
4395 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4396 if (regs_info->usrregs != NULL)
4397 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4398 }
4399 else
4400 {
4401 if (the_low_target.fetch_register != NULL
4402 && (*the_low_target.fetch_register) (regcache, regno))
4403 return;
4404
4405 use_regsets = linux_register_in_regsets (regs_info, regno);
4406 if (use_regsets)
4407 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4408 regcache);
4409 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4410 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4411 }
4412 }
4413
4414 void
4415 linux_store_registers (struct regcache *regcache, int regno)
4416 {
4417 int use_regsets;
4418 int all = 0;
4419 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4420
4421 if (regno == -1)
4422 {
4423 all = regsets_store_inferior_registers (regs_info->regsets_info,
4424 regcache);
4425 if (regs_info->usrregs != NULL)
4426 usr_store_inferior_registers (regs_info, regcache, regno, all);
4427 }
4428 else
4429 {
4430 use_regsets = linux_register_in_regsets (regs_info, regno);
4431 if (use_regsets)
4432 all = regsets_store_inferior_registers (regs_info->regsets_info,
4433 regcache);
4434 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4435 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4436 }
4437 }
4438
4439
4440 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4441 to debugger memory starting at MYADDR. */
4442
4443 static int
4444 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4445 {
4446 int pid = lwpid_of (get_thread_lwp (current_inferior));
4447 register PTRACE_XFER_TYPE *buffer;
4448 register CORE_ADDR addr;
4449 register int count;
4450 char filename[64];
4451 register int i;
4452 int ret;
4453 int fd;
4454
4455 /* Try using /proc. Don't bother for one word. */
4456 if (len >= 3 * sizeof (long))
4457 {
4458 int bytes;
4459
4460 /* We could keep this file open and cache it - possibly one per
4461 thread. That requires some juggling, but is even faster. */
4462 sprintf (filename, "/proc/%d/mem", pid);
4463 fd = open (filename, O_RDONLY | O_LARGEFILE);
4464 if (fd == -1)
4465 goto no_proc;
4466
4467 /* If pread64 is available, use it. It's faster if the kernel
4468 supports it (only one syscall), and it's 64-bit safe even on
4469 32-bit platforms (for instance, SPARC debugging a SPARC64
4470 application). */
4471 #ifdef HAVE_PREAD64
4472 bytes = pread64 (fd, myaddr, len, memaddr);
4473 #else
4474 bytes = -1;
4475 if (lseek (fd, memaddr, SEEK_SET) != -1)
4476 bytes = read (fd, myaddr, len);
4477 #endif
4478
4479 close (fd);
4480 if (bytes == len)
4481 return 0;
4482
4483 /* Some data was read, we'll try to get the rest with ptrace. */
4484 if (bytes > 0)
4485 {
4486 memaddr += bytes;
4487 myaddr += bytes;
4488 len -= bytes;
4489 }
4490 }
4491
4492 no_proc:
4493 /* Round starting address down to longword boundary. */
4494 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4495 /* Round ending address up; get number of longwords that makes. */
4496 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4497 / sizeof (PTRACE_XFER_TYPE));
4498 /* Allocate buffer of that many longwords. */
4499 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4500
4501 /* Read all the longwords */
4502 errno = 0;
4503 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4504 {
4505 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4506 about coercing an 8 byte integer to a 4 byte pointer. */
4507 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4508 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4509 (PTRACE_TYPE_ARG4) 0);
4510 if (errno)
4511 break;
4512 }
4513 ret = errno;
4514
4515 /* Copy appropriate bytes out of the buffer. */
4516 if (i > 0)
4517 {
4518 i *= sizeof (PTRACE_XFER_TYPE);
4519 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4520 memcpy (myaddr,
4521 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4522 i < len ? i : len);
4523 }
4524
4525 return ret;
4526 }
4527
4528 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4529 memory at MEMADDR. On failure (cannot write to the inferior)
4530 returns the value of errno. Always succeeds if LEN is zero. */
4531
4532 static int
4533 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4534 {
4535 register int i;
4536 /* Round starting address down to longword boundary. */
4537 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4538 /* Round ending address up; get number of longwords that makes. */
4539 register int count
4540 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4541 / sizeof (PTRACE_XFER_TYPE);
4542
4543 /* Allocate buffer of that many longwords. */
4544 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4545 alloca (count * sizeof (PTRACE_XFER_TYPE));
4546
4547 int pid = lwpid_of (get_thread_lwp (current_inferior));
4548
4549 if (len == 0)
4550 {
4551 /* Zero length write always succeeds. */
4552 return 0;
4553 }
4554
4555 if (debug_threads)
4556 {
4557 /* Dump up to four bytes. */
4558 unsigned int val = * (unsigned int *) myaddr;
4559 if (len == 1)
4560 val = val & 0xff;
4561 else if (len == 2)
4562 val = val & 0xffff;
4563 else if (len == 3)
4564 val = val & 0xffffff;
4565 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4566 val, (long)memaddr);
4567 }
4568
4569 /* Fill start and end extra bytes of buffer with existing memory data. */
4570
4571 errno = 0;
4572 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4573 about coercing an 8 byte integer to a 4 byte pointer. */
4574 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4575 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4576 (PTRACE_TYPE_ARG4) 0);
4577 if (errno)
4578 return errno;
4579
4580 if (count > 1)
4581 {
4582 errno = 0;
4583 buffer[count - 1]
4584 = ptrace (PTRACE_PEEKTEXT, pid,
4585 /* Coerce to a uintptr_t first to avoid potential gcc warning
4586 about coercing an 8 byte integer to a 4 byte pointer. */
4587 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4588 * sizeof (PTRACE_XFER_TYPE)),
4589 (PTRACE_TYPE_ARG4) 0);
4590 if (errno)
4591 return errno;
4592 }
4593
4594 /* Copy data to be written over corresponding part of buffer. */
4595
4596 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4597 myaddr, len);
4598
4599 /* Write the entire buffer. */
4600
4601 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4602 {
4603 errno = 0;
4604 ptrace (PTRACE_POKETEXT, pid,
4605 /* Coerce to a uintptr_t first to avoid potential gcc warning
4606 about coercing an 8 byte integer to a 4 byte pointer. */
4607 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4608 (PTRACE_TYPE_ARG4) buffer[i]);
4609 if (errno)
4610 return errno;
4611 }
4612
4613 return 0;
4614 }
4615
4616 static void
4617 linux_look_up_symbols (void)
4618 {
4619 #ifdef USE_THREAD_DB
4620 struct process_info *proc = current_process ();
4621
4622 if (proc->private->thread_db != NULL)
4623 return;
4624
4625 /* If the kernel supports tracing clones, then we don't need to
4626 use the magic thread event breakpoint to learn about
4627 threads. */
4628 thread_db_init (!linux_supports_traceclone ());
4629 #endif
4630 }
4631
4632 static void
4633 linux_request_interrupt (void)
4634 {
4635 extern unsigned long signal_pid;
4636
4637 if (!ptid_equal (cont_thread, null_ptid)
4638 && !ptid_equal (cont_thread, minus_one_ptid))
4639 {
4640 struct lwp_info *lwp;
4641 int lwpid;
4642
4643 lwp = get_thread_lwp (current_inferior);
4644 lwpid = lwpid_of (lwp);
4645 kill_lwp (lwpid, SIGINT);
4646 }
4647 else
4648 kill_lwp (signal_pid, SIGINT);
4649 }
4650
4651 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4652 to debugger memory starting at MYADDR. */
4653
4654 static int
4655 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4656 {
4657 char filename[PATH_MAX];
4658 int fd, n;
4659 int pid = lwpid_of (get_thread_lwp (current_inferior));
4660
4661 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4662
4663 fd = open (filename, O_RDONLY);
4664 if (fd < 0)
4665 return -1;
4666
4667 if (offset != (CORE_ADDR) 0
4668 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4669 n = -1;
4670 else
4671 n = read (fd, myaddr, len);
4672
4673 close (fd);
4674
4675 return n;
4676 }
4677
4678 /* These breakpoint and watchpoint related wrapper functions simply
4679 pass on the function call if the target has registered a
4680 corresponding function. */
4681
4682 static int
4683 linux_insert_point (char type, CORE_ADDR addr, int len)
4684 {
4685 if (the_low_target.insert_point != NULL)
4686 return the_low_target.insert_point (type, addr, len);
4687 else
4688 /* Unsupported (see target.h). */
4689 return 1;
4690 }
4691
4692 static int
4693 linux_remove_point (char type, CORE_ADDR addr, int len)
4694 {
4695 if (the_low_target.remove_point != NULL)
4696 return the_low_target.remove_point (type, addr, len);
4697 else
4698 /* Unsupported (see target.h). */
4699 return 1;
4700 }
4701
4702 static int
4703 linux_stopped_by_watchpoint (void)
4704 {
4705 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4706
4707 return lwp->stopped_by_watchpoint;
4708 }
4709
4710 static CORE_ADDR
4711 linux_stopped_data_address (void)
4712 {
4713 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4714
4715 return lwp->stopped_data_address;
4716 }
4717
4718 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4719 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4720 && defined(PT_TEXT_END_ADDR)
4721
4722 /* This is only used for targets that define PT_TEXT_ADDR,
4723 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4724 the target has different ways of acquiring this information, like
4725 loadmaps. */
4726
4727 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4728 to tell gdb about. */
4729
4730 static int
4731 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4732 {
4733 unsigned long text, text_end, data;
4734 int pid = lwpid_of (get_thread_lwp (current_inferior));
4735
4736 errno = 0;
4737
4738 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4739 (PTRACE_TYPE_ARG4) 0);
4740 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4741 (PTRACE_TYPE_ARG4) 0);
4742 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4743 (PTRACE_TYPE_ARG4) 0);
4744
4745 if (errno == 0)
4746 {
4747 /* Both text and data offsets produced at compile-time (and so
4748 used by gdb) are relative to the beginning of the program,
4749 with the data segment immediately following the text segment.
4750 However, the actual runtime layout in memory may put the data
4751 somewhere else, so when we send gdb a data base-address, we
4752 use the real data base address and subtract the compile-time
4753 data base-address from it (which is just the length of the
4754 text segment). BSS immediately follows data in both
4755 cases. */
4756 *text_p = text;
4757 *data_p = data - (text_end - text);
4758
4759 return 1;
4760 }
4761 return 0;
4762 }
4763 #endif
4764
4765 static int
4766 linux_qxfer_osdata (const char *annex,
4767 unsigned char *readbuf, unsigned const char *writebuf,
4768 CORE_ADDR offset, int len)
4769 {
4770 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4771 }
4772
4773 /* Convert a native/host siginfo object, into/from the siginfo in the
4774 layout of the inferiors' architecture. */
4775
4776 static void
4777 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4778 {
4779 int done = 0;
4780
4781 if (the_low_target.siginfo_fixup != NULL)
4782 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4783
4784 /* If there was no callback, or the callback didn't do anything,
4785 then just do a straight memcpy. */
4786 if (!done)
4787 {
4788 if (direction == 1)
4789 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4790 else
4791 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4792 }
4793 }
4794
4795 static int
4796 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4797 unsigned const char *writebuf, CORE_ADDR offset, int len)
4798 {
4799 int pid;
4800 siginfo_t siginfo;
4801 char inf_siginfo[sizeof (siginfo_t)];
4802
4803 if (current_inferior == NULL)
4804 return -1;
4805
4806 pid = lwpid_of (get_thread_lwp (current_inferior));
4807
4808 if (debug_threads)
4809 debug_printf ("%s siginfo for lwp %d.\n",
4810 readbuf != NULL ? "Reading" : "Writing",
4811 pid);
4812
4813 if (offset >= sizeof (siginfo))
4814 return -1;
4815
4816 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4817 return -1;
4818
4819 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4820 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4821 inferior with a 64-bit GDBSERVER should look the same as debugging it
4822 with a 32-bit GDBSERVER, we need to convert it. */
4823 siginfo_fixup (&siginfo, inf_siginfo, 0);
4824
4825 if (offset + len > sizeof (siginfo))
4826 len = sizeof (siginfo) - offset;
4827
4828 if (readbuf != NULL)
4829 memcpy (readbuf, inf_siginfo + offset, len);
4830 else
4831 {
4832 memcpy (inf_siginfo + offset, writebuf, len);
4833
4834 /* Convert back to ptrace layout before flushing it out. */
4835 siginfo_fixup (&siginfo, inf_siginfo, 1);
4836
4837 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4838 return -1;
4839 }
4840
4841 return len;
4842 }
4843
4844 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4845 so we notice when children change state; as the handler for the
4846 sigsuspend in my_waitpid. */
4847
4848 static void
4849 sigchld_handler (int signo)
4850 {
4851 int old_errno = errno;
4852
4853 if (debug_threads)
4854 {
4855 do
4856 {
4857 /* fprintf is not async-signal-safe, so call write
4858 directly. */
4859 if (write (2, "sigchld_handler\n",
4860 sizeof ("sigchld_handler\n") - 1) < 0)
4861 break; /* just ignore */
4862 } while (0);
4863 }
4864
4865 if (target_is_async_p ())
4866 async_file_mark (); /* trigger a linux_wait */
4867
4868 errno = old_errno;
4869 }
4870
4871 static int
4872 linux_supports_non_stop (void)
4873 {
4874 return 1;
4875 }
4876
4877 static int
4878 linux_async (int enable)
4879 {
4880 int previous = (linux_event_pipe[0] != -1);
4881
4882 if (debug_threads)
4883 debug_printf ("linux_async (%d), previous=%d\n",
4884 enable, previous);
4885
4886 if (previous != enable)
4887 {
4888 sigset_t mask;
4889 sigemptyset (&mask);
4890 sigaddset (&mask, SIGCHLD);
4891
4892 sigprocmask (SIG_BLOCK, &mask, NULL);
4893
4894 if (enable)
4895 {
4896 if (pipe (linux_event_pipe) == -1)
4897 fatal ("creating event pipe failed.");
4898
4899 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4900 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4901
4902 /* Register the event loop handler. */
4903 add_file_handler (linux_event_pipe[0],
4904 handle_target_event, NULL);
4905
4906 /* Always trigger a linux_wait. */
4907 async_file_mark ();
4908 }
4909 else
4910 {
4911 delete_file_handler (linux_event_pipe[0]);
4912
4913 close (linux_event_pipe[0]);
4914 close (linux_event_pipe[1]);
4915 linux_event_pipe[0] = -1;
4916 linux_event_pipe[1] = -1;
4917 }
4918
4919 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4920 }
4921
4922 return previous;
4923 }
4924
4925 static int
4926 linux_start_non_stop (int nonstop)
4927 {
4928 /* Register or unregister from event-loop accordingly. */
4929 linux_async (nonstop);
4930 return 0;
4931 }
4932
4933 static int
4934 linux_supports_multi_process (void)
4935 {
4936 return 1;
4937 }
4938
4939 static int
4940 linux_supports_disable_randomization (void)
4941 {
4942 #ifdef HAVE_PERSONALITY
4943 return 1;
4944 #else
4945 return 0;
4946 #endif
4947 }
4948
4949 static int
4950 linux_supports_agent (void)
4951 {
4952 return 1;
4953 }
4954
4955 static int
4956 linux_supports_range_stepping (void)
4957 {
4958 if (*the_low_target.supports_range_stepping == NULL)
4959 return 0;
4960
4961 return (*the_low_target.supports_range_stepping) ();
4962 }
4963
4964 /* Enumerate spufs IDs for process PID. */
4965 static int
4966 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4967 {
4968 int pos = 0;
4969 int written = 0;
4970 char path[128];
4971 DIR *dir;
4972 struct dirent *entry;
4973
4974 sprintf (path, "/proc/%ld/fd", pid);
4975 dir = opendir (path);
4976 if (!dir)
4977 return -1;
4978
4979 rewinddir (dir);
4980 while ((entry = readdir (dir)) != NULL)
4981 {
4982 struct stat st;
4983 struct statfs stfs;
4984 int fd;
4985
4986 fd = atoi (entry->d_name);
4987 if (!fd)
4988 continue;
4989
4990 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4991 if (stat (path, &st) != 0)
4992 continue;
4993 if (!S_ISDIR (st.st_mode))
4994 continue;
4995
4996 if (statfs (path, &stfs) != 0)
4997 continue;
4998 if (stfs.f_type != SPUFS_MAGIC)
4999 continue;
5000
5001 if (pos >= offset && pos + 4 <= offset + len)
5002 {
5003 *(unsigned int *)(buf + pos - offset) = fd;
5004 written += 4;
5005 }
5006 pos += 4;
5007 }
5008
5009 closedir (dir);
5010 return written;
5011 }
5012
5013 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5014 object type, using the /proc file system. */
5015 static int
5016 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5017 unsigned const char *writebuf,
5018 CORE_ADDR offset, int len)
5019 {
5020 long pid = lwpid_of (get_thread_lwp (current_inferior));
5021 char buf[128];
5022 int fd = 0;
5023 int ret = 0;
5024
5025 if (!writebuf && !readbuf)
5026 return -1;
5027
5028 if (!*annex)
5029 {
5030 if (!readbuf)
5031 return -1;
5032 else
5033 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5034 }
5035
5036 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5037 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5038 if (fd <= 0)
5039 return -1;
5040
5041 if (offset != 0
5042 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5043 {
5044 close (fd);
5045 return 0;
5046 }
5047
5048 if (writebuf)
5049 ret = write (fd, writebuf, (size_t) len);
5050 else
5051 ret = read (fd, readbuf, (size_t) len);
5052
5053 close (fd);
5054 return ret;
5055 }
5056
5057 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5058 struct target_loadseg
5059 {
5060 /* Core address to which the segment is mapped. */
5061 Elf32_Addr addr;
5062 /* VMA recorded in the program header. */
5063 Elf32_Addr p_vaddr;
5064 /* Size of this segment in memory. */
5065 Elf32_Word p_memsz;
5066 };
5067
5068 # if defined PT_GETDSBT
5069 struct target_loadmap
5070 {
5071 /* Protocol version number, must be zero. */
5072 Elf32_Word version;
5073 /* Pointer to the DSBT table, its size, and the DSBT index. */
5074 unsigned *dsbt_table;
5075 unsigned dsbt_size, dsbt_index;
5076 /* Number of segments in this map. */
5077 Elf32_Word nsegs;
5078 /* The actual memory map. */
5079 struct target_loadseg segs[/*nsegs*/];
5080 };
5081 # define LINUX_LOADMAP PT_GETDSBT
5082 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5083 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5084 # else
5085 struct target_loadmap
5086 {
5087 /* Protocol version number, must be zero. */
5088 Elf32_Half version;
5089 /* Number of segments in this map. */
5090 Elf32_Half nsegs;
5091 /* The actual memory map. */
5092 struct target_loadseg segs[/*nsegs*/];
5093 };
5094 # define LINUX_LOADMAP PTRACE_GETFDPIC
5095 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5096 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5097 # endif
5098
5099 static int
5100 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5101 unsigned char *myaddr, unsigned int len)
5102 {
5103 int pid = lwpid_of (get_thread_lwp (current_inferior));
5104 int addr = -1;
5105 struct target_loadmap *data = NULL;
5106 unsigned int actual_length, copy_length;
5107
5108 if (strcmp (annex, "exec") == 0)
5109 addr = (int) LINUX_LOADMAP_EXEC;
5110 else if (strcmp (annex, "interp") == 0)
5111 addr = (int) LINUX_LOADMAP_INTERP;
5112 else
5113 return -1;
5114
5115 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5116 return -1;
5117
5118 if (data == NULL)
5119 return -1;
5120
5121 actual_length = sizeof (struct target_loadmap)
5122 + sizeof (struct target_loadseg) * data->nsegs;
5123
5124 if (offset < 0 || offset > actual_length)
5125 return -1;
5126
5127 copy_length = actual_length - offset < len ? actual_length - offset : len;
5128 memcpy (myaddr, (char *) data + offset, copy_length);
5129 return copy_length;
5130 }
5131 #else
5132 # define linux_read_loadmap NULL
5133 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5134
5135 static void
5136 linux_process_qsupported (const char *query)
5137 {
5138 if (the_low_target.process_qsupported != NULL)
5139 the_low_target.process_qsupported (query);
5140 }
5141
5142 static int
5143 linux_supports_tracepoints (void)
5144 {
5145 if (*the_low_target.supports_tracepoints == NULL)
5146 return 0;
5147
5148 return (*the_low_target.supports_tracepoints) ();
5149 }
5150
5151 static CORE_ADDR
5152 linux_read_pc (struct regcache *regcache)
5153 {
5154 if (the_low_target.get_pc == NULL)
5155 return 0;
5156
5157 return (*the_low_target.get_pc) (regcache);
5158 }
5159
5160 static void
5161 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5162 {
5163 gdb_assert (the_low_target.set_pc != NULL);
5164
5165 (*the_low_target.set_pc) (regcache, pc);
5166 }
5167
5168 static int
5169 linux_thread_stopped (struct thread_info *thread)
5170 {
5171 return get_thread_lwp (thread)->stopped;
5172 }
5173
5174 /* This exposes stop-all-threads functionality to other modules. */
5175
5176 static void
5177 linux_pause_all (int freeze)
5178 {
5179 stop_all_lwps (freeze, NULL);
5180 }
5181
5182 /* This exposes unstop-all-threads functionality to other gdbserver
5183 modules. */
5184
5185 static void
5186 linux_unpause_all (int unfreeze)
5187 {
5188 unstop_all_lwps (unfreeze, NULL);
5189 }
5190
5191 static int
5192 linux_prepare_to_access_memory (void)
5193 {
5194 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5195 running LWP. */
5196 if (non_stop)
5197 linux_pause_all (1);
5198 return 0;
5199 }
5200
5201 static void
5202 linux_done_accessing_memory (void)
5203 {
5204 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5205 running LWP. */
5206 if (non_stop)
5207 linux_unpause_all (1);
5208 }
5209
5210 static int
5211 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5212 CORE_ADDR collector,
5213 CORE_ADDR lockaddr,
5214 ULONGEST orig_size,
5215 CORE_ADDR *jump_entry,
5216 CORE_ADDR *trampoline,
5217 ULONGEST *trampoline_size,
5218 unsigned char *jjump_pad_insn,
5219 ULONGEST *jjump_pad_insn_size,
5220 CORE_ADDR *adjusted_insn_addr,
5221 CORE_ADDR *adjusted_insn_addr_end,
5222 char *err)
5223 {
5224 return (*the_low_target.install_fast_tracepoint_jump_pad)
5225 (tpoint, tpaddr, collector, lockaddr, orig_size,
5226 jump_entry, trampoline, trampoline_size,
5227 jjump_pad_insn, jjump_pad_insn_size,
5228 adjusted_insn_addr, adjusted_insn_addr_end,
5229 err);
5230 }
5231
5232 static struct emit_ops *
5233 linux_emit_ops (void)
5234 {
5235 if (the_low_target.emit_ops != NULL)
5236 return (*the_low_target.emit_ops) ();
5237 else
5238 return NULL;
5239 }
5240
5241 static int
5242 linux_get_min_fast_tracepoint_insn_len (void)
5243 {
5244 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5245 }
5246
5247 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5248
5249 static int
5250 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5251 CORE_ADDR *phdr_memaddr, int *num_phdr)
5252 {
5253 char filename[PATH_MAX];
5254 int fd;
5255 const int auxv_size = is_elf64
5256 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5257 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5258
5259 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5260
5261 fd = open (filename, O_RDONLY);
5262 if (fd < 0)
5263 return 1;
5264
5265 *phdr_memaddr = 0;
5266 *num_phdr = 0;
5267 while (read (fd, buf, auxv_size) == auxv_size
5268 && (*phdr_memaddr == 0 || *num_phdr == 0))
5269 {
5270 if (is_elf64)
5271 {
5272 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5273
5274 switch (aux->a_type)
5275 {
5276 case AT_PHDR:
5277 *phdr_memaddr = aux->a_un.a_val;
5278 break;
5279 case AT_PHNUM:
5280 *num_phdr = aux->a_un.a_val;
5281 break;
5282 }
5283 }
5284 else
5285 {
5286 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5287
5288 switch (aux->a_type)
5289 {
5290 case AT_PHDR:
5291 *phdr_memaddr = aux->a_un.a_val;
5292 break;
5293 case AT_PHNUM:
5294 *num_phdr = aux->a_un.a_val;
5295 break;
5296 }
5297 }
5298 }
5299
5300 close (fd);
5301
5302 if (*phdr_memaddr == 0 || *num_phdr == 0)
5303 {
5304 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5305 "phdr_memaddr = %ld, phdr_num = %d",
5306 (long) *phdr_memaddr, *num_phdr);
5307 return 2;
5308 }
5309
5310 return 0;
5311 }
5312
5313 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5314
5315 static CORE_ADDR
5316 get_dynamic (const int pid, const int is_elf64)
5317 {
5318 CORE_ADDR phdr_memaddr, relocation;
5319 int num_phdr, i;
5320 unsigned char *phdr_buf;
5321 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5322
5323 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5324 return 0;
5325
5326 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5327 phdr_buf = alloca (num_phdr * phdr_size);
5328
5329 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5330 return 0;
5331
5332 /* Compute relocation: it is expected to be 0 for "regular" executables,
5333 non-zero for PIE ones. */
5334 relocation = -1;
5335 for (i = 0; relocation == -1 && i < num_phdr; i++)
5336 if (is_elf64)
5337 {
5338 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5339
5340 if (p->p_type == PT_PHDR)
5341 relocation = phdr_memaddr - p->p_vaddr;
5342 }
5343 else
5344 {
5345 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5346
5347 if (p->p_type == PT_PHDR)
5348 relocation = phdr_memaddr - p->p_vaddr;
5349 }
5350
5351 if (relocation == -1)
5352 {
5353 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5354 any real world executables, including PIE executables, have always
5355 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5356 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5357 or present DT_DEBUG anyway (fpc binaries are statically linked).
5358
5359 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5360
5361 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5362
5363 return 0;
5364 }
5365
5366 for (i = 0; i < num_phdr; i++)
5367 {
5368 if (is_elf64)
5369 {
5370 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5371
5372 if (p->p_type == PT_DYNAMIC)
5373 return p->p_vaddr + relocation;
5374 }
5375 else
5376 {
5377 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5378
5379 if (p->p_type == PT_DYNAMIC)
5380 return p->p_vaddr + relocation;
5381 }
5382 }
5383
5384 return 0;
5385 }
5386
5387 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5388 can be 0 if the inferior does not yet have the library list initialized.
5389 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5390 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5391
5392 static CORE_ADDR
5393 get_r_debug (const int pid, const int is_elf64)
5394 {
5395 CORE_ADDR dynamic_memaddr;
5396 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5397 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5398 CORE_ADDR map = -1;
5399
5400 dynamic_memaddr = get_dynamic (pid, is_elf64);
5401 if (dynamic_memaddr == 0)
5402 return map;
5403
5404 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5405 {
5406 if (is_elf64)
5407 {
5408 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5409 #ifdef DT_MIPS_RLD_MAP
5410 union
5411 {
5412 Elf64_Xword map;
5413 unsigned char buf[sizeof (Elf64_Xword)];
5414 }
5415 rld_map;
5416
5417 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5418 {
5419 if (linux_read_memory (dyn->d_un.d_val,
5420 rld_map.buf, sizeof (rld_map.buf)) == 0)
5421 return rld_map.map;
5422 else
5423 break;
5424 }
5425 #endif /* DT_MIPS_RLD_MAP */
5426
5427 if (dyn->d_tag == DT_DEBUG && map == -1)
5428 map = dyn->d_un.d_val;
5429
5430 if (dyn->d_tag == DT_NULL)
5431 break;
5432 }
5433 else
5434 {
5435 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5436 #ifdef DT_MIPS_RLD_MAP
5437 union
5438 {
5439 Elf32_Word map;
5440 unsigned char buf[sizeof (Elf32_Word)];
5441 }
5442 rld_map;
5443
5444 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5445 {
5446 if (linux_read_memory (dyn->d_un.d_val,
5447 rld_map.buf, sizeof (rld_map.buf)) == 0)
5448 return rld_map.map;
5449 else
5450 break;
5451 }
5452 #endif /* DT_MIPS_RLD_MAP */
5453
5454 if (dyn->d_tag == DT_DEBUG && map == -1)
5455 map = dyn->d_un.d_val;
5456
5457 if (dyn->d_tag == DT_NULL)
5458 break;
5459 }
5460
5461 dynamic_memaddr += dyn_size;
5462 }
5463
5464 return map;
5465 }
5466
5467 /* Read one pointer from MEMADDR in the inferior. */
5468
5469 static int
5470 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5471 {
5472 int ret;
5473
5474 /* Go through a union so this works on either big or little endian
5475 hosts, when the inferior's pointer size is smaller than the size
5476 of CORE_ADDR. It is assumed the inferior's endianness is the
5477 same of the superior's. */
5478 union
5479 {
5480 CORE_ADDR core_addr;
5481 unsigned int ui;
5482 unsigned char uc;
5483 } addr;
5484
5485 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5486 if (ret == 0)
5487 {
5488 if (ptr_size == sizeof (CORE_ADDR))
5489 *ptr = addr.core_addr;
5490 else if (ptr_size == sizeof (unsigned int))
5491 *ptr = addr.ui;
5492 else
5493 gdb_assert_not_reached ("unhandled pointer size");
5494 }
5495 return ret;
5496 }
5497
5498 struct link_map_offsets
5499 {
5500 /* Offset and size of r_debug.r_version. */
5501 int r_version_offset;
5502
5503 /* Offset and size of r_debug.r_map. */
5504 int r_map_offset;
5505
5506 /* Offset to l_addr field in struct link_map. */
5507 int l_addr_offset;
5508
5509 /* Offset to l_name field in struct link_map. */
5510 int l_name_offset;
5511
5512 /* Offset to l_ld field in struct link_map. */
5513 int l_ld_offset;
5514
5515 /* Offset to l_next field in struct link_map. */
5516 int l_next_offset;
5517
5518 /* Offset to l_prev field in struct link_map. */
5519 int l_prev_offset;
5520 };
5521
5522 /* Construct qXfer:libraries-svr4:read reply. */
5523
5524 static int
5525 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5526 unsigned const char *writebuf,
5527 CORE_ADDR offset, int len)
5528 {
5529 char *document;
5530 unsigned document_len;
5531 struct process_info_private *const priv = current_process ()->private;
5532 char filename[PATH_MAX];
5533 int pid, is_elf64;
5534
5535 static const struct link_map_offsets lmo_32bit_offsets =
5536 {
5537 0, /* r_version offset. */
5538 4, /* r_debug.r_map offset. */
5539 0, /* l_addr offset in link_map. */
5540 4, /* l_name offset in link_map. */
5541 8, /* l_ld offset in link_map. */
5542 12, /* l_next offset in link_map. */
5543 16 /* l_prev offset in link_map. */
5544 };
5545
5546 static const struct link_map_offsets lmo_64bit_offsets =
5547 {
5548 0, /* r_version offset. */
5549 8, /* r_debug.r_map offset. */
5550 0, /* l_addr offset in link_map. */
5551 8, /* l_name offset in link_map. */
5552 16, /* l_ld offset in link_map. */
5553 24, /* l_next offset in link_map. */
5554 32 /* l_prev offset in link_map. */
5555 };
5556 const struct link_map_offsets *lmo;
5557 unsigned int machine;
5558 int ptr_size;
5559 CORE_ADDR lm_addr = 0, lm_prev = 0;
5560 int allocated = 1024;
5561 char *p;
5562 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5563 int header_done = 0;
5564
5565 if (writebuf != NULL)
5566 return -2;
5567 if (readbuf == NULL)
5568 return -1;
5569
5570 pid = lwpid_of (get_thread_lwp (current_inferior));
5571 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5572 is_elf64 = elf_64_file_p (filename, &machine);
5573 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5574 ptr_size = is_elf64 ? 8 : 4;
5575
5576 while (annex[0] != '\0')
5577 {
5578 const char *sep;
5579 CORE_ADDR *addrp;
5580 int len;
5581
5582 sep = strchr (annex, '=');
5583 if (sep == NULL)
5584 break;
5585
5586 len = sep - annex;
5587 if (len == 5 && strncmp (annex, "start", 5) == 0)
5588 addrp = &lm_addr;
5589 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5590 addrp = &lm_prev;
5591 else
5592 {
5593 annex = strchr (sep, ';');
5594 if (annex == NULL)
5595 break;
5596 annex++;
5597 continue;
5598 }
5599
5600 annex = decode_address_to_semicolon (addrp, sep + 1);
5601 }
5602
5603 if (lm_addr == 0)
5604 {
5605 int r_version = 0;
5606
5607 if (priv->r_debug == 0)
5608 priv->r_debug = get_r_debug (pid, is_elf64);
5609
5610 /* We failed to find DT_DEBUG. Such situation will not change
5611 for this inferior - do not retry it. Report it to GDB as
5612 E01, see for the reasons at the GDB solib-svr4.c side. */
5613 if (priv->r_debug == (CORE_ADDR) -1)
5614 return -1;
5615
5616 if (priv->r_debug != 0)
5617 {
5618 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5619 (unsigned char *) &r_version,
5620 sizeof (r_version)) != 0
5621 || r_version != 1)
5622 {
5623 warning ("unexpected r_debug version %d", r_version);
5624 }
5625 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5626 &lm_addr, ptr_size) != 0)
5627 {
5628 warning ("unable to read r_map from 0x%lx",
5629 (long) priv->r_debug + lmo->r_map_offset);
5630 }
5631 }
5632 }
5633
5634 document = xmalloc (allocated);
5635 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5636 p = document + strlen (document);
5637
5638 while (lm_addr
5639 && read_one_ptr (lm_addr + lmo->l_name_offset,
5640 &l_name, ptr_size) == 0
5641 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5642 &l_addr, ptr_size) == 0
5643 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5644 &l_ld, ptr_size) == 0
5645 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5646 &l_prev, ptr_size) == 0
5647 && read_one_ptr (lm_addr + lmo->l_next_offset,
5648 &l_next, ptr_size) == 0)
5649 {
5650 unsigned char libname[PATH_MAX];
5651
5652 if (lm_prev != l_prev)
5653 {
5654 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5655 (long) lm_prev, (long) l_prev);
5656 break;
5657 }
5658
5659 /* Ignore the first entry even if it has valid name as the first entry
5660 corresponds to the main executable. The first entry should not be
5661 skipped if the dynamic loader was loaded late by a static executable
5662 (see solib-svr4.c parameter ignore_first). But in such case the main
5663 executable does not have PT_DYNAMIC present and this function already
5664 exited above due to failed get_r_debug. */
5665 if (lm_prev == 0)
5666 {
5667 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5668 p = p + strlen (p);
5669 }
5670 else
5671 {
5672 /* Not checking for error because reading may stop before
5673 we've got PATH_MAX worth of characters. */
5674 libname[0] = '\0';
5675 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5676 libname[sizeof (libname) - 1] = '\0';
5677 if (libname[0] != '\0')
5678 {
5679 /* 6x the size for xml_escape_text below. */
5680 size_t len = 6 * strlen ((char *) libname);
5681 char *name;
5682
5683 if (!header_done)
5684 {
5685 /* Terminate `<library-list-svr4'. */
5686 *p++ = '>';
5687 header_done = 1;
5688 }
5689
5690 while (allocated < p - document + len + 200)
5691 {
5692 /* Expand to guarantee sufficient storage. */
5693 uintptr_t document_len = p - document;
5694
5695 document = xrealloc (document, 2 * allocated);
5696 allocated *= 2;
5697 p = document + document_len;
5698 }
5699
5700 name = xml_escape_text ((char *) libname);
5701 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5702 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5703 name, (unsigned long) lm_addr,
5704 (unsigned long) l_addr, (unsigned long) l_ld);
5705 free (name);
5706 }
5707 }
5708
5709 lm_prev = lm_addr;
5710 lm_addr = l_next;
5711 }
5712
5713 if (!header_done)
5714 {
5715 /* Empty list; terminate `<library-list-svr4'. */
5716 strcpy (p, "/>");
5717 }
5718 else
5719 strcpy (p, "</library-list-svr4>");
5720
5721 document_len = strlen (document);
5722 if (offset < document_len)
5723 document_len -= offset;
5724 else
5725 document_len = 0;
5726 if (len > document_len)
5727 len = document_len;
5728
5729 memcpy (readbuf, document + offset, len);
5730 xfree (document);
5731
5732 return len;
5733 }
5734
5735 #ifdef HAVE_LINUX_BTRACE
5736
5737 /* See to_enable_btrace target method. */
5738
5739 static struct btrace_target_info *
5740 linux_low_enable_btrace (ptid_t ptid)
5741 {
5742 struct btrace_target_info *tinfo;
5743
5744 tinfo = linux_enable_btrace (ptid);
5745
5746 if (tinfo != NULL)
5747 {
5748 struct thread_info *thread = find_thread_ptid (ptid);
5749 struct regcache *regcache = get_thread_regcache (thread, 0);
5750
5751 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5752 }
5753
5754 return tinfo;
5755 }
5756
5757 /* See to_disable_btrace target method. */
5758
5759 static int
5760 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5761 {
5762 enum btrace_error err;
5763
5764 err = linux_disable_btrace (tinfo);
5765 return (err == BTRACE_ERR_NONE ? 0 : -1);
5766 }
5767
5768 /* See to_read_btrace target method. */
5769
5770 static int
5771 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5772 int type)
5773 {
5774 VEC (btrace_block_s) *btrace;
5775 struct btrace_block *block;
5776 enum btrace_error err;
5777 int i;
5778
5779 btrace = NULL;
5780 err = linux_read_btrace (&btrace, tinfo, type);
5781 if (err != BTRACE_ERR_NONE)
5782 {
5783 if (err == BTRACE_ERR_OVERFLOW)
5784 buffer_grow_str0 (buffer, "E.Overflow.");
5785 else
5786 buffer_grow_str0 (buffer, "E.Generic Error.");
5787
5788 return -1;
5789 }
5790
5791 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5792 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5793
5794 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5795 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5796 paddress (block->begin), paddress (block->end));
5797
5798 buffer_grow_str0 (buffer, "</btrace>\n");
5799
5800 VEC_free (btrace_block_s, btrace);
5801
5802 return 0;
5803 }
5804 #endif /* HAVE_LINUX_BTRACE */
5805
5806 static struct target_ops linux_target_ops = {
5807 linux_create_inferior,
5808 linux_attach,
5809 linux_kill,
5810 linux_detach,
5811 linux_mourn,
5812 linux_join,
5813 linux_thread_alive,
5814 linux_resume,
5815 linux_wait,
5816 linux_fetch_registers,
5817 linux_store_registers,
5818 linux_prepare_to_access_memory,
5819 linux_done_accessing_memory,
5820 linux_read_memory,
5821 linux_write_memory,
5822 linux_look_up_symbols,
5823 linux_request_interrupt,
5824 linux_read_auxv,
5825 linux_insert_point,
5826 linux_remove_point,
5827 linux_stopped_by_watchpoint,
5828 linux_stopped_data_address,
5829 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5830 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5831 && defined(PT_TEXT_END_ADDR)
5832 linux_read_offsets,
5833 #else
5834 NULL,
5835 #endif
5836 #ifdef USE_THREAD_DB
5837 thread_db_get_tls_address,
5838 #else
5839 NULL,
5840 #endif
5841 linux_qxfer_spu,
5842 hostio_last_error_from_errno,
5843 linux_qxfer_osdata,
5844 linux_xfer_siginfo,
5845 linux_supports_non_stop,
5846 linux_async,
5847 linux_start_non_stop,
5848 linux_supports_multi_process,
5849 #ifdef USE_THREAD_DB
5850 thread_db_handle_monitor_command,
5851 #else
5852 NULL,
5853 #endif
5854 linux_common_core_of_thread,
5855 linux_read_loadmap,
5856 linux_process_qsupported,
5857 linux_supports_tracepoints,
5858 linux_read_pc,
5859 linux_write_pc,
5860 linux_thread_stopped,
5861 NULL,
5862 linux_pause_all,
5863 linux_unpause_all,
5864 linux_cancel_breakpoints,
5865 linux_stabilize_threads,
5866 linux_install_fast_tracepoint_jump_pad,
5867 linux_emit_ops,
5868 linux_supports_disable_randomization,
5869 linux_get_min_fast_tracepoint_insn_len,
5870 linux_qxfer_libraries_svr4,
5871 linux_supports_agent,
5872 #ifdef HAVE_LINUX_BTRACE
5873 linux_supports_btrace,
5874 linux_low_enable_btrace,
5875 linux_low_disable_btrace,
5876 linux_low_read_btrace,
5877 #else
5878 NULL,
5879 NULL,
5880 NULL,
5881 NULL,
5882 #endif
5883 linux_supports_range_stepping,
5884 };
5885
5886 static void
5887 linux_init_signals ()
5888 {
5889 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5890 to find what the cancel signal actually is. */
5891 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5892 signal (__SIGRTMIN+1, SIG_IGN);
5893 #endif
5894 }
5895
5896 #ifdef HAVE_LINUX_REGSETS
5897 void
5898 initialize_regsets_info (struct regsets_info *info)
5899 {
5900 for (info->num_regsets = 0;
5901 info->regsets[info->num_regsets].size >= 0;
5902 info->num_regsets++)
5903 ;
5904 }
5905 #endif
5906
5907 void
5908 initialize_low (void)
5909 {
5910 struct sigaction sigchld_action;
5911 memset (&sigchld_action, 0, sizeof (sigchld_action));
5912 set_target_ops (&linux_target_ops);
5913 set_breakpoint_data (the_low_target.breakpoint,
5914 the_low_target.breakpoint_len);
5915 linux_init_signals ();
5916 linux_ptrace_init_warnings ();
5917
5918 sigchld_action.sa_handler = sigchld_handler;
5919 sigemptyset (&sigchld_action.sa_mask);
5920 sigchld_action.sa_flags = SA_RESTART;
5921 sigaction (SIGCHLD, &sigchld_action, NULL);
5922
5923 initialize_low_arch ();
5924 }
This page took 0.244101 seconds and 4 git commands to generate.