PR server/15604
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "linux-ptrace.h"
30 #include "linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include "gdb_stat.h"
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include "filestuff.h"
48 #ifndef ELFMAG0
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53 #include <elf.h>
54 #endif
55
56 #ifndef SPUFS_MAGIC
57 #define SPUFS_MAGIC 0x23c9b64e
58 #endif
59
60 #ifdef HAVE_PERSONALITY
61 # include <sys/personality.h>
62 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
63 # define ADDR_NO_RANDOMIZE 0x0040000
64 # endif
65 #endif
66
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
70
71 #ifndef W_STOPCODE
72 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
73 #endif
74
75 /* This is the kernel's hard limit. Not to be confused with
76 SIGRTMIN. */
77 #ifndef __SIGRTMIN
78 #define __SIGRTMIN 32
79 #endif
80
81 /* Some targets did not define these ptrace constants from the start,
82 so gdbserver defines them locally here. In the future, these may
83 be removed after they are added to asm/ptrace.h. */
84 #if !(defined(PT_TEXT_ADDR) \
85 || defined(PT_DATA_ADDR) \
86 || defined(PT_TEXT_END_ADDR))
87 #if defined(__mcoldfire__)
88 /* These are still undefined in 3.10 kernels. */
89 #define PT_TEXT_ADDR 49*4
90 #define PT_DATA_ADDR 50*4
91 #define PT_TEXT_END_ADDR 51*4
92 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #elif defined(BFIN)
94 #define PT_TEXT_ADDR 220
95 #define PT_TEXT_END_ADDR 224
96 #define PT_DATA_ADDR 228
97 /* These are still undefined in 3.10 kernels. */
98 #elif defined(__TMS320C6X__)
99 #define PT_TEXT_ADDR (0x10000*4)
100 #define PT_DATA_ADDR (0x10004*4)
101 #define PT_TEXT_END_ADDR (0x10008*4)
102 #endif
103 #endif
104
105 #ifdef HAVE_LINUX_BTRACE
106 # include "linux-btrace.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
140 representation of the thread ID.
141
142 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
143 the same as the LWP ID.
144
145 ``all_processes'' is keyed by the "overall process ID", which
146 GNU/Linux calls tgid, "thread group ID". */
147
148 struct inferior_list all_lwps;
149
150 /* A list of all unknown processes which receive stop signals. Some
151 other process will presumably claim each of these as forked
152 children momentarily. */
153
154 struct simple_pid_list
155 {
156 /* The process ID. */
157 int pid;
158
159 /* The status as reported by waitpid. */
160 int status;
161
162 /* Next in chain. */
163 struct simple_pid_list *next;
164 };
165 struct simple_pid_list *stopped_pids;
166
167 /* Trivial list manipulation functions to keep track of a list of new
168 stopped processes. */
169
170 static void
171 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
172 {
173 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
174
175 new_pid->pid = pid;
176 new_pid->status = status;
177 new_pid->next = *listp;
178 *listp = new_pid;
179 }
180
181 static int
182 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
183 {
184 struct simple_pid_list **p;
185
186 for (p = listp; *p != NULL; p = &(*p)->next)
187 if ((*p)->pid == pid)
188 {
189 struct simple_pid_list *next = (*p)->next;
190
191 *statusp = (*p)->status;
192 xfree (*p);
193 *p = next;
194 return 1;
195 }
196 return 0;
197 }
198
199 enum stopping_threads_kind
200 {
201 /* Not stopping threads presently. */
202 NOT_STOPPING_THREADS,
203
204 /* Stopping threads. */
205 STOPPING_THREADS,
206
207 /* Stopping and suspending threads. */
208 STOPPING_AND_SUSPENDING_THREADS
209 };
210
211 /* This is set while stop_all_lwps is in effect. */
212 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
213
214 /* FIXME make into a target method? */
215 int using_threads = 1;
216
217 /* True if we're presently stabilizing threads (moving them out of
218 jump pads). */
219 static int stabilizing_threads;
220
221 static void linux_resume_one_lwp (struct lwp_info *lwp,
222 int step, int signal, siginfo_t *info);
223 static void linux_resume (struct thread_resume *resume_info, size_t n);
224 static void stop_all_lwps (int suspend, struct lwp_info *except);
225 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
226 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
227 static void *add_lwp (ptid_t ptid);
228 static int linux_stopped_by_watchpoint (void);
229 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
230 static void proceed_all_lwps (void);
231 static int finish_step_over (struct lwp_info *lwp);
232 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
233 static int kill_lwp (unsigned long lwpid, int signo);
234
235 /* True if the low target can hardware single-step. Such targets
236 don't need a BREAKPOINT_REINSERT_ADDR callback. */
237
238 static int
239 can_hardware_single_step (void)
240 {
241 return (the_low_target.breakpoint_reinsert_addr == NULL);
242 }
243
244 /* True if the low target supports memory breakpoints. If so, we'll
245 have a GET_PC implementation. */
246
247 static int
248 supports_breakpoints (void)
249 {
250 return (the_low_target.get_pc != NULL);
251 }
252
253 /* Returns true if this target can support fast tracepoints. This
254 does not mean that the in-process agent has been loaded in the
255 inferior. */
256
257 static int
258 supports_fast_tracepoints (void)
259 {
260 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
261 }
262
263 /* True if LWP is stopped in its stepping range. */
264
265 static int
266 lwp_in_step_range (struct lwp_info *lwp)
267 {
268 CORE_ADDR pc = lwp->stop_pc;
269
270 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
271 }
272
273 struct pending_signals
274 {
275 int signal;
276 siginfo_t info;
277 struct pending_signals *prev;
278 };
279
280 /* The read/write ends of the pipe registered as waitable file in the
281 event loop. */
282 static int linux_event_pipe[2] = { -1, -1 };
283
284 /* True if we're currently in async mode. */
285 #define target_is_async_p() (linux_event_pipe[0] != -1)
286
287 static void send_sigstop (struct lwp_info *lwp);
288 static void wait_for_sigstop (struct inferior_list_entry *entry);
289
290 /* Return non-zero if HEADER is a 64-bit ELF file. */
291
292 static int
293 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
294 {
295 if (header->e_ident[EI_MAG0] == ELFMAG0
296 && header->e_ident[EI_MAG1] == ELFMAG1
297 && header->e_ident[EI_MAG2] == ELFMAG2
298 && header->e_ident[EI_MAG3] == ELFMAG3)
299 {
300 *machine = header->e_machine;
301 return header->e_ident[EI_CLASS] == ELFCLASS64;
302
303 }
304 *machine = EM_NONE;
305 return -1;
306 }
307
308 /* Return non-zero if FILE is a 64-bit ELF file,
309 zero if the file is not a 64-bit ELF file,
310 and -1 if the file is not accessible or doesn't exist. */
311
312 static int
313 elf_64_file_p (const char *file, unsigned int *machine)
314 {
315 Elf64_Ehdr header;
316 int fd;
317
318 fd = open (file, O_RDONLY);
319 if (fd < 0)
320 return -1;
321
322 if (read (fd, &header, sizeof (header)) != sizeof (header))
323 {
324 close (fd);
325 return 0;
326 }
327 close (fd);
328
329 return elf_64_header_p (&header, machine);
330 }
331
332 /* Accepts an integer PID; Returns true if the executable PID is
333 running is a 64-bit ELF file.. */
334
335 int
336 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
337 {
338 char file[PATH_MAX];
339
340 sprintf (file, "/proc/%d/exe", pid);
341 return elf_64_file_p (file, machine);
342 }
343
344 static void
345 delete_lwp (struct lwp_info *lwp)
346 {
347 remove_thread (get_lwp_thread (lwp));
348 remove_inferior (&all_lwps, &lwp->head);
349 free (lwp->arch_private);
350 free (lwp);
351 }
352
353 /* Add a process to the common process list, and set its private
354 data. */
355
356 static struct process_info *
357 linux_add_process (int pid, int attached)
358 {
359 struct process_info *proc;
360
361 proc = add_process (pid, attached);
362 proc->private = xcalloc (1, sizeof (*proc->private));
363
364 /* Set the arch when the first LWP stops. */
365 proc->private->new_inferior = 1;
366
367 if (the_low_target.new_process != NULL)
368 proc->private->arch_private = the_low_target.new_process ();
369
370 return proc;
371 }
372
373 /* Handle a GNU/Linux extended wait response. If we see a clone
374 event, we need to add the new LWP to our list (and not report the
375 trap to higher layers). */
376
377 static void
378 handle_extended_wait (struct lwp_info *event_child, int wstat)
379 {
380 int event = wstat >> 16;
381 struct lwp_info *new_lwp;
382
383 if (event == PTRACE_EVENT_CLONE)
384 {
385 ptid_t ptid;
386 unsigned long new_pid;
387 int ret, status;
388
389 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
390 &new_pid);
391
392 /* If we haven't already seen the new PID stop, wait for it now. */
393 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
394 {
395 /* The new child has a pending SIGSTOP. We can't affect it until it
396 hits the SIGSTOP, but we're already attached. */
397
398 ret = my_waitpid (new_pid, &status, __WALL);
399
400 if (ret == -1)
401 perror_with_name ("waiting for new child");
402 else if (ret != new_pid)
403 warning ("wait returned unexpected PID %d", ret);
404 else if (!WIFSTOPPED (status))
405 warning ("wait returned unexpected status 0x%x", status);
406 }
407
408 ptid = ptid_build (pid_of (event_child), new_pid, 0);
409 new_lwp = (struct lwp_info *) add_lwp (ptid);
410 add_thread (ptid, new_lwp);
411
412 /* Either we're going to immediately resume the new thread
413 or leave it stopped. linux_resume_one_lwp is a nop if it
414 thinks the thread is currently running, so set this first
415 before calling linux_resume_one_lwp. */
416 new_lwp->stopped = 1;
417
418 /* If we're suspending all threads, leave this one suspended
419 too. */
420 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
421 new_lwp->suspended = 1;
422
423 /* Normally we will get the pending SIGSTOP. But in some cases
424 we might get another signal delivered to the group first.
425 If we do get another signal, be sure not to lose it. */
426 if (WSTOPSIG (status) == SIGSTOP)
427 {
428 if (stopping_threads != NOT_STOPPING_THREADS)
429 new_lwp->stop_pc = get_stop_pc (new_lwp);
430 else
431 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
432 }
433 else
434 {
435 new_lwp->stop_expected = 1;
436
437 if (stopping_threads != NOT_STOPPING_THREADS)
438 {
439 new_lwp->stop_pc = get_stop_pc (new_lwp);
440 new_lwp->status_pending_p = 1;
441 new_lwp->status_pending = status;
442 }
443 else
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
446 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
447 }
448
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
451 collect it now. */
452 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
453 }
454 }
455
456 /* Return the PC as read from the regcache of LWP, without any
457 adjustment. */
458
459 static CORE_ADDR
460 get_pc (struct lwp_info *lwp)
461 {
462 struct thread_info *saved_inferior;
463 struct regcache *regcache;
464 CORE_ADDR pc;
465
466 if (the_low_target.get_pc == NULL)
467 return 0;
468
469 saved_inferior = current_inferior;
470 current_inferior = get_lwp_thread (lwp);
471
472 regcache = get_thread_regcache (current_inferior, 1);
473 pc = (*the_low_target.get_pc) (regcache);
474
475 if (debug_threads)
476 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
477
478 current_inferior = saved_inferior;
479 return pc;
480 }
481
482 /* This function should only be called if LWP got a SIGTRAP.
483 The SIGTRAP could mean several things.
484
485 On i386, where decr_pc_after_break is non-zero:
486 If we were single-stepping this process using PTRACE_SINGLESTEP,
487 we will get only the one SIGTRAP (even if the instruction we
488 stepped over was a breakpoint). The value of $eip will be the
489 next instruction.
490 If we continue the process using PTRACE_CONT, we will get a
491 SIGTRAP when we hit a breakpoint. The value of $eip will be
492 the instruction after the breakpoint (i.e. needs to be
493 decremented). If we report the SIGTRAP to GDB, we must also
494 report the undecremented PC. If we cancel the SIGTRAP, we
495 must resume at the decremented PC.
496
497 (Presumably, not yet tested) On a non-decr_pc_after_break machine
498 with hardware or kernel single-step:
499 If we single-step over a breakpoint instruction, our PC will
500 point at the following instruction. If we continue and hit a
501 breakpoint instruction, our PC will point at the breakpoint
502 instruction. */
503
504 static CORE_ADDR
505 get_stop_pc (struct lwp_info *lwp)
506 {
507 CORE_ADDR stop_pc;
508
509 if (the_low_target.get_pc == NULL)
510 return 0;
511
512 stop_pc = get_pc (lwp);
513
514 if (WSTOPSIG (lwp->last_status) == SIGTRAP
515 && !lwp->stepping
516 && !lwp->stopped_by_watchpoint
517 && lwp->last_status >> 16 == 0)
518 stop_pc -= the_low_target.decr_pc_after_break;
519
520 if (debug_threads)
521 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
522
523 return stop_pc;
524 }
525
526 static void *
527 add_lwp (ptid_t ptid)
528 {
529 struct lwp_info *lwp;
530
531 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
532 memset (lwp, 0, sizeof (*lwp));
533
534 lwp->head.id = ptid;
535
536 if (the_low_target.new_thread != NULL)
537 lwp->arch_private = the_low_target.new_thread ();
538
539 add_inferior_to_list (&all_lwps, &lwp->head);
540
541 return lwp;
542 }
543
544 /* Start an inferior process and returns its pid.
545 ALLARGS is a vector of program-name and args. */
546
547 static int
548 linux_create_inferior (char *program, char **allargs)
549 {
550 #ifdef HAVE_PERSONALITY
551 int personality_orig = 0, personality_set = 0;
552 #endif
553 struct lwp_info *new_lwp;
554 int pid;
555 ptid_t ptid;
556
557 #ifdef HAVE_PERSONALITY
558 if (disable_randomization)
559 {
560 errno = 0;
561 personality_orig = personality (0xffffffff);
562 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
563 {
564 personality_set = 1;
565 personality (personality_orig | ADDR_NO_RANDOMIZE);
566 }
567 if (errno != 0 || (personality_set
568 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
569 warning ("Error disabling address space randomization: %s",
570 strerror (errno));
571 }
572 #endif
573
574 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
575 pid = vfork ();
576 #else
577 pid = fork ();
578 #endif
579 if (pid < 0)
580 perror_with_name ("fork");
581
582 if (pid == 0)
583 {
584 close_most_fds ();
585 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
586
587 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
588 signal (__SIGRTMIN + 1, SIG_DFL);
589 #endif
590
591 setpgid (0, 0);
592
593 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
594 stdout to stderr so that inferior i/o doesn't corrupt the connection.
595 Also, redirect stdin to /dev/null. */
596 if (remote_connection_is_stdio ())
597 {
598 close (0);
599 open ("/dev/null", O_RDONLY);
600 dup2 (2, 1);
601 if (write (2, "stdin/stdout redirected\n",
602 sizeof ("stdin/stdout redirected\n") - 1) < 0)
603 {
604 /* Errors ignored. */;
605 }
606 }
607
608 execv (program, allargs);
609 if (errno == ENOENT)
610 execvp (program, allargs);
611
612 fprintf (stderr, "Cannot exec %s: %s.\n", program,
613 strerror (errno));
614 fflush (stderr);
615 _exit (0177);
616 }
617
618 #ifdef HAVE_PERSONALITY
619 if (personality_set)
620 {
621 errno = 0;
622 personality (personality_orig);
623 if (errno != 0)
624 warning ("Error restoring address space randomization: %s",
625 strerror (errno));
626 }
627 #endif
628
629 linux_add_process (pid, 0);
630
631 ptid = ptid_build (pid, pid, 0);
632 new_lwp = add_lwp (ptid);
633 add_thread (ptid, new_lwp);
634 new_lwp->must_set_ptrace_flags = 1;
635
636 return pid;
637 }
638
639 /* Attach to an inferior process. */
640
641 static void
642 linux_attach_lwp_1 (unsigned long lwpid, int initial)
643 {
644 ptid_t ptid;
645 struct lwp_info *new_lwp;
646
647 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
648 != 0)
649 {
650 struct buffer buffer;
651
652 if (!initial)
653 {
654 /* If we fail to attach to an LWP, just warn. */
655 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
656 strerror (errno), errno);
657 fflush (stderr);
658 return;
659 }
660
661 /* If we fail to attach to a process, report an error. */
662 buffer_init (&buffer);
663 linux_ptrace_attach_warnings (lwpid, &buffer);
664 buffer_grow_str0 (&buffer, "");
665 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
666 lwpid, strerror (errno), errno);
667 }
668
669 if (initial)
670 /* If lwp is the tgid, we handle adding existing threads later.
671 Otherwise we just add lwp without bothering about any other
672 threads. */
673 ptid = ptid_build (lwpid, lwpid, 0);
674 else
675 {
676 /* Note that extracting the pid from the current inferior is
677 safe, since we're always called in the context of the same
678 process as this new thread. */
679 int pid = pid_of (get_thread_lwp (current_inferior));
680 ptid = ptid_build (pid, lwpid, 0);
681 }
682
683 new_lwp = (struct lwp_info *) add_lwp (ptid);
684 add_thread (ptid, new_lwp);
685
686 /* We need to wait for SIGSTOP before being able to make the next
687 ptrace call on this LWP. */
688 new_lwp->must_set_ptrace_flags = 1;
689
690 if (linux_proc_pid_is_stopped (lwpid))
691 {
692 if (debug_threads)
693 fprintf (stderr,
694 "Attached to a stopped process\n");
695
696 /* The process is definitely stopped. It is in a job control
697 stop, unless the kernel predates the TASK_STOPPED /
698 TASK_TRACED distinction, in which case it might be in a
699 ptrace stop. Make sure it is in a ptrace stop; from there we
700 can kill it, signal it, et cetera.
701
702 First make sure there is a pending SIGSTOP. Since we are
703 already attached, the process can not transition from stopped
704 to running without a PTRACE_CONT; so we know this signal will
705 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
706 probably already in the queue (unless this kernel is old
707 enough to use TASK_STOPPED for ptrace stops); but since
708 SIGSTOP is not an RT signal, it can only be queued once. */
709 kill_lwp (lwpid, SIGSTOP);
710
711 /* Finally, resume the stopped process. This will deliver the
712 SIGSTOP (or a higher priority signal, just like normal
713 PTRACE_ATTACH), which we'll catch later on. */
714 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
715 }
716
717 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
718 brings it to a halt.
719
720 There are several cases to consider here:
721
722 1) gdbserver has already attached to the process and is being notified
723 of a new thread that is being created.
724 In this case we should ignore that SIGSTOP and resume the
725 process. This is handled below by setting stop_expected = 1,
726 and the fact that add_thread sets last_resume_kind ==
727 resume_continue.
728
729 2) This is the first thread (the process thread), and we're attaching
730 to it via attach_inferior.
731 In this case we want the process thread to stop.
732 This is handled by having linux_attach set last_resume_kind ==
733 resume_stop after we return.
734
735 If the pid we are attaching to is also the tgid, we attach to and
736 stop all the existing threads. Otherwise, we attach to pid and
737 ignore any other threads in the same group as this pid.
738
739 3) GDB is connecting to gdbserver and is requesting an enumeration of all
740 existing threads.
741 In this case we want the thread to stop.
742 FIXME: This case is currently not properly handled.
743 We should wait for the SIGSTOP but don't. Things work apparently
744 because enough time passes between when we ptrace (ATTACH) and when
745 gdb makes the next ptrace call on the thread.
746
747 On the other hand, if we are currently trying to stop all threads, we
748 should treat the new thread as if we had sent it a SIGSTOP. This works
749 because we are guaranteed that the add_lwp call above added us to the
750 end of the list, and so the new thread has not yet reached
751 wait_for_sigstop (but will). */
752 new_lwp->stop_expected = 1;
753 }
754
755 void
756 linux_attach_lwp (unsigned long lwpid)
757 {
758 linux_attach_lwp_1 (lwpid, 0);
759 }
760
761 /* Attach to PID. If PID is the tgid, attach to it and all
762 of its threads. */
763
764 static int
765 linux_attach (unsigned long pid)
766 {
767 /* Attach to PID. We will check for other threads
768 soon. */
769 linux_attach_lwp_1 (pid, 1);
770 linux_add_process (pid, 1);
771
772 if (!non_stop)
773 {
774 struct thread_info *thread;
775
776 /* Don't ignore the initial SIGSTOP if we just attached to this
777 process. It will be collected by wait shortly. */
778 thread = find_thread_ptid (ptid_build (pid, pid, 0));
779 thread->last_resume_kind = resume_stop;
780 }
781
782 if (linux_proc_get_tgid (pid) == pid)
783 {
784 DIR *dir;
785 char pathname[128];
786
787 sprintf (pathname, "/proc/%ld/task", pid);
788
789 dir = opendir (pathname);
790
791 if (!dir)
792 {
793 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
794 fflush (stderr);
795 }
796 else
797 {
798 /* At this point we attached to the tgid. Scan the task for
799 existing threads. */
800 unsigned long lwp;
801 int new_threads_found;
802 int iterations = 0;
803 struct dirent *dp;
804
805 while (iterations < 2)
806 {
807 new_threads_found = 0;
808 /* Add all the other threads. While we go through the
809 threads, new threads may be spawned. Cycle through
810 the list of threads until we have done two iterations without
811 finding new threads. */
812 while ((dp = readdir (dir)) != NULL)
813 {
814 /* Fetch one lwp. */
815 lwp = strtoul (dp->d_name, NULL, 10);
816
817 /* Is this a new thread? */
818 if (lwp
819 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
820 {
821 linux_attach_lwp_1 (lwp, 0);
822 new_threads_found++;
823
824 if (debug_threads)
825 fprintf (stderr, "\
826 Found and attached to new lwp %ld\n", lwp);
827 }
828 }
829
830 if (!new_threads_found)
831 iterations++;
832 else
833 iterations = 0;
834
835 rewinddir (dir);
836 }
837 closedir (dir);
838 }
839 }
840
841 return 0;
842 }
843
844 struct counter
845 {
846 int pid;
847 int count;
848 };
849
850 static int
851 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
852 {
853 struct counter *counter = args;
854
855 if (ptid_get_pid (entry->id) == counter->pid)
856 {
857 if (++counter->count > 1)
858 return 1;
859 }
860
861 return 0;
862 }
863
864 static int
865 last_thread_of_process_p (struct thread_info *thread)
866 {
867 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
868 int pid = ptid_get_pid (ptid);
869 struct counter counter = { pid , 0 };
870
871 return (find_inferior (&all_threads,
872 second_thread_of_pid_p, &counter) == NULL);
873 }
874
875 /* Kill LWP. */
876
877 static void
878 linux_kill_one_lwp (struct lwp_info *lwp)
879 {
880 int pid = lwpid_of (lwp);
881
882 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
883 there is no signal context, and ptrace(PTRACE_KILL) (or
884 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
885 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
886 alternative is to kill with SIGKILL. We only need one SIGKILL
887 per process, not one for each thread. But since we still support
888 linuxthreads, and we also support debugging programs using raw
889 clone without CLONE_THREAD, we send one for each thread. For
890 years, we used PTRACE_KILL only, so we're being a bit paranoid
891 about some old kernels where PTRACE_KILL might work better
892 (dubious if there are any such, but that's why it's paranoia), so
893 we try SIGKILL first, PTRACE_KILL second, and so we're fine
894 everywhere. */
895
896 errno = 0;
897 kill (pid, SIGKILL);
898 if (debug_threads)
899 fprintf (stderr,
900 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (lwp)),
902 errno ? strerror (errno) : "OK");
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
906 if (debug_threads)
907 fprintf (stderr,
908 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
909 target_pid_to_str (ptid_of (lwp)),
910 errno ? strerror (errno) : "OK");
911 }
912
913 /* Callback for `find_inferior'. Kills an lwp of a given process,
914 except the leader. */
915
916 static int
917 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
918 {
919 struct thread_info *thread = (struct thread_info *) entry;
920 struct lwp_info *lwp = get_thread_lwp (thread);
921 int wstat;
922 int pid = * (int *) args;
923
924 if (ptid_get_pid (entry->id) != pid)
925 return 0;
926
927 /* We avoid killing the first thread here, because of a Linux kernel (at
928 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
929 the children get a chance to be reaped, it will remain a zombie
930 forever. */
931
932 if (lwpid_of (lwp) == pid)
933 {
934 if (debug_threads)
935 fprintf (stderr, "lkop: is last of process %s\n",
936 target_pid_to_str (entry->id));
937 return 0;
938 }
939
940 do
941 {
942 linux_kill_one_lwp (lwp);
943
944 /* Make sure it died. The loop is most likely unnecessary. */
945 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
946 } while (pid > 0 && WIFSTOPPED (wstat));
947
948 return 0;
949 }
950
951 static int
952 linux_kill (int pid)
953 {
954 struct process_info *process;
955 struct lwp_info *lwp;
956 int wstat;
957 int lwpid;
958
959 process = find_process_pid (pid);
960 if (process == NULL)
961 return -1;
962
963 /* If we're killing a running inferior, make sure it is stopped
964 first, as PTRACE_KILL will not work otherwise. */
965 stop_all_lwps (0, NULL);
966
967 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
968
969 /* See the comment in linux_kill_one_lwp. We did not kill the first
970 thread in the list, so do so now. */
971 lwp = find_lwp_pid (pid_to_ptid (pid));
972
973 if (lwp == NULL)
974 {
975 if (debug_threads)
976 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
977 lwpid_of (lwp), pid);
978 }
979 else
980 {
981 if (debug_threads)
982 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
983 lwpid_of (lwp), pid);
984
985 do
986 {
987 linux_kill_one_lwp (lwp);
988
989 /* Make sure it died. The loop is most likely unnecessary. */
990 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
991 } while (lwpid > 0 && WIFSTOPPED (wstat));
992 }
993
994 the_target->mourn (process);
995
996 /* Since we presently can only stop all lwps of all processes, we
997 need to unstop lwps of other processes. */
998 unstop_all_lwps (0, NULL);
999 return 0;
1000 }
1001
1002 /* Get pending signal of THREAD, for detaching purposes. This is the
1003 signal the thread last stopped for, which we need to deliver to the
1004 thread when detaching, otherwise, it'd be suppressed/lost. */
1005
1006 static int
1007 get_detach_signal (struct thread_info *thread)
1008 {
1009 enum gdb_signal signo = GDB_SIGNAL_0;
1010 int status;
1011 struct lwp_info *lp = get_thread_lwp (thread);
1012
1013 if (lp->status_pending_p)
1014 status = lp->status_pending;
1015 else
1016 {
1017 /* If the thread had been suspended by gdbserver, and it stopped
1018 cleanly, then it'll have stopped with SIGSTOP. But we don't
1019 want to deliver that SIGSTOP. */
1020 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1021 || thread->last_status.value.sig == GDB_SIGNAL_0)
1022 return 0;
1023
1024 /* Otherwise, we may need to deliver the signal we
1025 intercepted. */
1026 status = lp->last_status;
1027 }
1028
1029 if (!WIFSTOPPED (status))
1030 {
1031 if (debug_threads)
1032 fprintf (stderr,
1033 "GPS: lwp %s hasn't stopped: no pending signal\n",
1034 target_pid_to_str (ptid_of (lp)));
1035 return 0;
1036 }
1037
1038 /* Extended wait statuses aren't real SIGTRAPs. */
1039 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1040 {
1041 if (debug_threads)
1042 fprintf (stderr,
1043 "GPS: lwp %s had stopped with extended "
1044 "status: no pending signal\n",
1045 target_pid_to_str (ptid_of (lp)));
1046 return 0;
1047 }
1048
1049 signo = gdb_signal_from_host (WSTOPSIG (status));
1050
1051 if (program_signals_p && !program_signals[signo])
1052 {
1053 if (debug_threads)
1054 fprintf (stderr,
1055 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1056 target_pid_to_str (ptid_of (lp)),
1057 gdb_signal_to_string (signo));
1058 return 0;
1059 }
1060 else if (!program_signals_p
1061 /* If we have no way to know which signals GDB does not
1062 want to have passed to the program, assume
1063 SIGTRAP/SIGINT, which is GDB's default. */
1064 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1065 {
1066 if (debug_threads)
1067 fprintf (stderr,
1068 "GPS: lwp %s had signal %s, "
1069 "but we don't know if we should pass it. Default to not.\n",
1070 target_pid_to_str (ptid_of (lp)),
1071 gdb_signal_to_string (signo));
1072 return 0;
1073 }
1074 else
1075 {
1076 if (debug_threads)
1077 fprintf (stderr,
1078 "GPS: lwp %s has pending signal %s: delivering it.\n",
1079 target_pid_to_str (ptid_of (lp)),
1080 gdb_signal_to_string (signo));
1081
1082 return WSTOPSIG (status);
1083 }
1084 }
1085
1086 static int
1087 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1088 {
1089 struct thread_info *thread = (struct thread_info *) entry;
1090 struct lwp_info *lwp = get_thread_lwp (thread);
1091 int pid = * (int *) args;
1092 int sig;
1093
1094 if (ptid_get_pid (entry->id) != pid)
1095 return 0;
1096
1097 /* If there is a pending SIGSTOP, get rid of it. */
1098 if (lwp->stop_expected)
1099 {
1100 if (debug_threads)
1101 fprintf (stderr,
1102 "Sending SIGCONT to %s\n",
1103 target_pid_to_str (ptid_of (lwp)));
1104
1105 kill_lwp (lwpid_of (lwp), SIGCONT);
1106 lwp->stop_expected = 0;
1107 }
1108
1109 /* Flush any pending changes to the process's registers. */
1110 regcache_invalidate_thread (get_lwp_thread (lwp));
1111
1112 /* Pass on any pending signal for this thread. */
1113 sig = get_detach_signal (thread);
1114
1115 /* Finally, let it resume. */
1116 if (the_low_target.prepare_to_resume != NULL)
1117 the_low_target.prepare_to_resume (lwp);
1118 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1119 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1120 error (_("Can't detach %s: %s"),
1121 target_pid_to_str (ptid_of (lwp)),
1122 strerror (errno));
1123
1124 delete_lwp (lwp);
1125 return 0;
1126 }
1127
1128 static int
1129 linux_detach (int pid)
1130 {
1131 struct process_info *process;
1132
1133 process = find_process_pid (pid);
1134 if (process == NULL)
1135 return -1;
1136
1137 /* Stop all threads before detaching. First, ptrace requires that
1138 the thread is stopped to sucessfully detach. Second, thread_db
1139 may need to uninstall thread event breakpoints from memory, which
1140 only works with a stopped process anyway. */
1141 stop_all_lwps (0, NULL);
1142
1143 #ifdef USE_THREAD_DB
1144 thread_db_detach (process);
1145 #endif
1146
1147 /* Stabilize threads (move out of jump pads). */
1148 stabilize_threads ();
1149
1150 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1151
1152 the_target->mourn (process);
1153
1154 /* Since we presently can only stop all lwps of all processes, we
1155 need to unstop lwps of other processes. */
1156 unstop_all_lwps (0, NULL);
1157 return 0;
1158 }
1159
1160 /* Remove all LWPs that belong to process PROC from the lwp list. */
1161
1162 static int
1163 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1164 {
1165 struct lwp_info *lwp = (struct lwp_info *) entry;
1166 struct process_info *process = proc;
1167
1168 if (pid_of (lwp) == pid_of (process))
1169 delete_lwp (lwp);
1170
1171 return 0;
1172 }
1173
1174 static void
1175 linux_mourn (struct process_info *process)
1176 {
1177 struct process_info_private *priv;
1178
1179 #ifdef USE_THREAD_DB
1180 thread_db_mourn (process);
1181 #endif
1182
1183 find_inferior (&all_lwps, delete_lwp_callback, process);
1184
1185 /* Freeing all private data. */
1186 priv = process->private;
1187 free (priv->arch_private);
1188 free (priv);
1189 process->private = NULL;
1190
1191 remove_process (process);
1192 }
1193
1194 static void
1195 linux_join (int pid)
1196 {
1197 int status, ret;
1198
1199 do {
1200 ret = my_waitpid (pid, &status, 0);
1201 if (WIFEXITED (status) || WIFSIGNALED (status))
1202 break;
1203 } while (ret != -1 || errno != ECHILD);
1204 }
1205
1206 /* Return nonzero if the given thread is still alive. */
1207 static int
1208 linux_thread_alive (ptid_t ptid)
1209 {
1210 struct lwp_info *lwp = find_lwp_pid (ptid);
1211
1212 /* We assume we always know if a thread exits. If a whole process
1213 exited but we still haven't been able to report it to GDB, we'll
1214 hold on to the last lwp of the dead process. */
1215 if (lwp != NULL)
1216 return !lwp->dead;
1217 else
1218 return 0;
1219 }
1220
1221 /* Return 1 if this lwp has an interesting status pending. */
1222 static int
1223 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1224 {
1225 struct lwp_info *lwp = (struct lwp_info *) entry;
1226 ptid_t ptid = * (ptid_t *) arg;
1227 struct thread_info *thread;
1228
1229 /* Check if we're only interested in events from a specific process
1230 or its lwps. */
1231 if (!ptid_equal (minus_one_ptid, ptid)
1232 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1233 return 0;
1234
1235 thread = get_lwp_thread (lwp);
1236
1237 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1238 report any status pending the LWP may have. */
1239 if (thread->last_resume_kind == resume_stop
1240 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1241 return 0;
1242
1243 return lwp->status_pending_p;
1244 }
1245
1246 static int
1247 same_lwp (struct inferior_list_entry *entry, void *data)
1248 {
1249 ptid_t ptid = *(ptid_t *) data;
1250 int lwp;
1251
1252 if (ptid_get_lwp (ptid) != 0)
1253 lwp = ptid_get_lwp (ptid);
1254 else
1255 lwp = ptid_get_pid (ptid);
1256
1257 if (ptid_get_lwp (entry->id) == lwp)
1258 return 1;
1259
1260 return 0;
1261 }
1262
1263 struct lwp_info *
1264 find_lwp_pid (ptid_t ptid)
1265 {
1266 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1267 }
1268
1269 static struct lwp_info *
1270 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1271 {
1272 int ret;
1273 int to_wait_for = -1;
1274 struct lwp_info *child = NULL;
1275
1276 if (debug_threads)
1277 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1278
1279 if (ptid_equal (ptid, minus_one_ptid))
1280 to_wait_for = -1; /* any child */
1281 else
1282 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1283
1284 options |= __WALL;
1285
1286 retry:
1287
1288 ret = my_waitpid (to_wait_for, wstatp, options);
1289 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1290 return NULL;
1291 else if (ret == -1)
1292 perror_with_name ("waitpid");
1293
1294 if (debug_threads
1295 && (!WIFSTOPPED (*wstatp)
1296 || (WSTOPSIG (*wstatp) != 32
1297 && WSTOPSIG (*wstatp) != 33)))
1298 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1299
1300 child = find_lwp_pid (pid_to_ptid (ret));
1301
1302 /* If we didn't find a process, one of two things presumably happened:
1303 - A process we started and then detached from has exited. Ignore it.
1304 - A process we are controlling has forked and the new child's stop
1305 was reported to us by the kernel. Save its PID. */
1306 if (child == NULL && WIFSTOPPED (*wstatp))
1307 {
1308 add_to_pid_list (&stopped_pids, ret, *wstatp);
1309 goto retry;
1310 }
1311 else if (child == NULL)
1312 goto retry;
1313
1314 child->stopped = 1;
1315
1316 child->last_status = *wstatp;
1317
1318 if (WIFSTOPPED (*wstatp))
1319 {
1320 struct process_info *proc;
1321
1322 /* Architecture-specific setup after inferior is running. This
1323 needs to happen after we have attached to the inferior and it
1324 is stopped for the first time, but before we access any
1325 inferior registers. */
1326 proc = find_process_pid (pid_of (child));
1327 if (proc->private->new_inferior)
1328 {
1329 struct thread_info *saved_inferior;
1330
1331 saved_inferior = current_inferior;
1332 current_inferior = get_lwp_thread (child);
1333
1334 the_low_target.arch_setup ();
1335
1336 current_inferior = saved_inferior;
1337
1338 proc->private->new_inferior = 0;
1339 }
1340 }
1341
1342 /* Fetch the possibly triggered data watchpoint info and store it in
1343 CHILD.
1344
1345 On some archs, like x86, that use debug registers to set
1346 watchpoints, it's possible that the way to know which watched
1347 address trapped, is to check the register that is used to select
1348 which address to watch. Problem is, between setting the
1349 watchpoint and reading back which data address trapped, the user
1350 may change the set of watchpoints, and, as a consequence, GDB
1351 changes the debug registers in the inferior. To avoid reading
1352 back a stale stopped-data-address when that happens, we cache in
1353 LP the fact that a watchpoint trapped, and the corresponding data
1354 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1355 changes the debug registers meanwhile, we have the cached data we
1356 can rely on. */
1357
1358 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1359 {
1360 if (the_low_target.stopped_by_watchpoint == NULL)
1361 {
1362 child->stopped_by_watchpoint = 0;
1363 }
1364 else
1365 {
1366 struct thread_info *saved_inferior;
1367
1368 saved_inferior = current_inferior;
1369 current_inferior = get_lwp_thread (child);
1370
1371 child->stopped_by_watchpoint
1372 = the_low_target.stopped_by_watchpoint ();
1373
1374 if (child->stopped_by_watchpoint)
1375 {
1376 if (the_low_target.stopped_data_address != NULL)
1377 child->stopped_data_address
1378 = the_low_target.stopped_data_address ();
1379 else
1380 child->stopped_data_address = 0;
1381 }
1382
1383 current_inferior = saved_inferior;
1384 }
1385 }
1386
1387 /* Store the STOP_PC, with adjustment applied. This depends on the
1388 architecture being defined already (so that CHILD has a valid
1389 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1390 not). */
1391 if (WIFSTOPPED (*wstatp))
1392 child->stop_pc = get_stop_pc (child);
1393
1394 if (debug_threads
1395 && WIFSTOPPED (*wstatp)
1396 && the_low_target.get_pc != NULL)
1397 {
1398 struct thread_info *saved_inferior = current_inferior;
1399 struct regcache *regcache;
1400 CORE_ADDR pc;
1401
1402 current_inferior = get_lwp_thread (child);
1403 regcache = get_thread_regcache (current_inferior, 1);
1404 pc = (*the_low_target.get_pc) (regcache);
1405 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1406 current_inferior = saved_inferior;
1407 }
1408
1409 return child;
1410 }
1411
1412 /* This function should only be called if the LWP got a SIGTRAP.
1413
1414 Handle any tracepoint steps or hits. Return true if a tracepoint
1415 event was handled, 0 otherwise. */
1416
1417 static int
1418 handle_tracepoints (struct lwp_info *lwp)
1419 {
1420 struct thread_info *tinfo = get_lwp_thread (lwp);
1421 int tpoint_related_event = 0;
1422
1423 /* If this tracepoint hit causes a tracing stop, we'll immediately
1424 uninsert tracepoints. To do this, we temporarily pause all
1425 threads, unpatch away, and then unpause threads. We need to make
1426 sure the unpausing doesn't resume LWP too. */
1427 lwp->suspended++;
1428
1429 /* And we need to be sure that any all-threads-stopping doesn't try
1430 to move threads out of the jump pads, as it could deadlock the
1431 inferior (LWP could be in the jump pad, maybe even holding the
1432 lock.) */
1433
1434 /* Do any necessary step collect actions. */
1435 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1436
1437 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1438
1439 /* See if we just hit a tracepoint and do its main collect
1440 actions. */
1441 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1442
1443 lwp->suspended--;
1444
1445 gdb_assert (lwp->suspended == 0);
1446 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1447
1448 if (tpoint_related_event)
1449 {
1450 if (debug_threads)
1451 fprintf (stderr, "got a tracepoint event\n");
1452 return 1;
1453 }
1454
1455 return 0;
1456 }
1457
1458 /* Convenience wrapper. Returns true if LWP is presently collecting a
1459 fast tracepoint. */
1460
1461 static int
1462 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1463 struct fast_tpoint_collect_status *status)
1464 {
1465 CORE_ADDR thread_area;
1466
1467 if (the_low_target.get_thread_area == NULL)
1468 return 0;
1469
1470 /* Get the thread area address. This is used to recognize which
1471 thread is which when tracing with the in-process agent library.
1472 We don't read anything from the address, and treat it as opaque;
1473 it's the address itself that we assume is unique per-thread. */
1474 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1475 return 0;
1476
1477 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1478 }
1479
1480 /* The reason we resume in the caller, is because we want to be able
1481 to pass lwp->status_pending as WSTAT, and we need to clear
1482 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1483 refuses to resume. */
1484
1485 static int
1486 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1487 {
1488 struct thread_info *saved_inferior;
1489
1490 saved_inferior = current_inferior;
1491 current_inferior = get_lwp_thread (lwp);
1492
1493 if ((wstat == NULL
1494 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1495 && supports_fast_tracepoints ()
1496 && agent_loaded_p ())
1497 {
1498 struct fast_tpoint_collect_status status;
1499 int r;
1500
1501 if (debug_threads)
1502 fprintf (stderr, "\
1503 Checking whether LWP %ld needs to move out of the jump pad.\n",
1504 lwpid_of (lwp));
1505
1506 r = linux_fast_tracepoint_collecting (lwp, &status);
1507
1508 if (wstat == NULL
1509 || (WSTOPSIG (*wstat) != SIGILL
1510 && WSTOPSIG (*wstat) != SIGFPE
1511 && WSTOPSIG (*wstat) != SIGSEGV
1512 && WSTOPSIG (*wstat) != SIGBUS))
1513 {
1514 lwp->collecting_fast_tracepoint = r;
1515
1516 if (r != 0)
1517 {
1518 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1519 {
1520 /* Haven't executed the original instruction yet.
1521 Set breakpoint there, and wait till it's hit,
1522 then single-step until exiting the jump pad. */
1523 lwp->exit_jump_pad_bkpt
1524 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1525 }
1526
1527 if (debug_threads)
1528 fprintf (stderr, "\
1529 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1530 lwpid_of (lwp));
1531 current_inferior = saved_inferior;
1532
1533 return 1;
1534 }
1535 }
1536 else
1537 {
1538 /* If we get a synchronous signal while collecting, *and*
1539 while executing the (relocated) original instruction,
1540 reset the PC to point at the tpoint address, before
1541 reporting to GDB. Otherwise, it's an IPA lib bug: just
1542 report the signal to GDB, and pray for the best. */
1543
1544 lwp->collecting_fast_tracepoint = 0;
1545
1546 if (r != 0
1547 && (status.adjusted_insn_addr <= lwp->stop_pc
1548 && lwp->stop_pc < status.adjusted_insn_addr_end))
1549 {
1550 siginfo_t info;
1551 struct regcache *regcache;
1552
1553 /* The si_addr on a few signals references the address
1554 of the faulting instruction. Adjust that as
1555 well. */
1556 if ((WSTOPSIG (*wstat) == SIGILL
1557 || WSTOPSIG (*wstat) == SIGFPE
1558 || WSTOPSIG (*wstat) == SIGBUS
1559 || WSTOPSIG (*wstat) == SIGSEGV)
1560 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1561 (PTRACE_TYPE_ARG3) 0, &info) == 0
1562 /* Final check just to make sure we don't clobber
1563 the siginfo of non-kernel-sent signals. */
1564 && (uintptr_t) info.si_addr == lwp->stop_pc)
1565 {
1566 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1567 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1568 (PTRACE_TYPE_ARG3) 0, &info);
1569 }
1570
1571 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1572 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1573 lwp->stop_pc = status.tpoint_addr;
1574
1575 /* Cancel any fast tracepoint lock this thread was
1576 holding. */
1577 force_unlock_trace_buffer ();
1578 }
1579
1580 if (lwp->exit_jump_pad_bkpt != NULL)
1581 {
1582 if (debug_threads)
1583 fprintf (stderr,
1584 "Cancelling fast exit-jump-pad: removing bkpt. "
1585 "stopping all threads momentarily.\n");
1586
1587 stop_all_lwps (1, lwp);
1588 cancel_breakpoints ();
1589
1590 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1591 lwp->exit_jump_pad_bkpt = NULL;
1592
1593 unstop_all_lwps (1, lwp);
1594
1595 gdb_assert (lwp->suspended >= 0);
1596 }
1597 }
1598 }
1599
1600 if (debug_threads)
1601 fprintf (stderr, "\
1602 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1603 lwpid_of (lwp));
1604
1605 current_inferior = saved_inferior;
1606 return 0;
1607 }
1608
1609 /* Enqueue one signal in the "signals to report later when out of the
1610 jump pad" list. */
1611
1612 static void
1613 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1614 {
1615 struct pending_signals *p_sig;
1616
1617 if (debug_threads)
1618 fprintf (stderr, "\
1619 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1620
1621 if (debug_threads)
1622 {
1623 struct pending_signals *sig;
1624
1625 for (sig = lwp->pending_signals_to_report;
1626 sig != NULL;
1627 sig = sig->prev)
1628 fprintf (stderr,
1629 " Already queued %d\n",
1630 sig->signal);
1631
1632 fprintf (stderr, " (no more currently queued signals)\n");
1633 }
1634
1635 /* Don't enqueue non-RT signals if they are already in the deferred
1636 queue. (SIGSTOP being the easiest signal to see ending up here
1637 twice) */
1638 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1639 {
1640 struct pending_signals *sig;
1641
1642 for (sig = lwp->pending_signals_to_report;
1643 sig != NULL;
1644 sig = sig->prev)
1645 {
1646 if (sig->signal == WSTOPSIG (*wstat))
1647 {
1648 if (debug_threads)
1649 fprintf (stderr,
1650 "Not requeuing already queued non-RT signal %d"
1651 " for LWP %ld\n",
1652 sig->signal,
1653 lwpid_of (lwp));
1654 return;
1655 }
1656 }
1657 }
1658
1659 p_sig = xmalloc (sizeof (*p_sig));
1660 p_sig->prev = lwp->pending_signals_to_report;
1661 p_sig->signal = WSTOPSIG (*wstat);
1662 memset (&p_sig->info, 0, sizeof (siginfo_t));
1663 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1664 &p_sig->info);
1665
1666 lwp->pending_signals_to_report = p_sig;
1667 }
1668
1669 /* Dequeue one signal from the "signals to report later when out of
1670 the jump pad" list. */
1671
1672 static int
1673 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1674 {
1675 if (lwp->pending_signals_to_report != NULL)
1676 {
1677 struct pending_signals **p_sig;
1678
1679 p_sig = &lwp->pending_signals_to_report;
1680 while ((*p_sig)->prev != NULL)
1681 p_sig = &(*p_sig)->prev;
1682
1683 *wstat = W_STOPCODE ((*p_sig)->signal);
1684 if ((*p_sig)->info.si_signo != 0)
1685 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1686 &(*p_sig)->info);
1687 free (*p_sig);
1688 *p_sig = NULL;
1689
1690 if (debug_threads)
1691 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1692 WSTOPSIG (*wstat), lwpid_of (lwp));
1693
1694 if (debug_threads)
1695 {
1696 struct pending_signals *sig;
1697
1698 for (sig = lwp->pending_signals_to_report;
1699 sig != NULL;
1700 sig = sig->prev)
1701 fprintf (stderr,
1702 " Still queued %d\n",
1703 sig->signal);
1704
1705 fprintf (stderr, " (no more queued signals)\n");
1706 }
1707
1708 return 1;
1709 }
1710
1711 return 0;
1712 }
1713
1714 /* Arrange for a breakpoint to be hit again later. We don't keep the
1715 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1716 will handle the current event, eventually we will resume this LWP,
1717 and this breakpoint will trap again. */
1718
1719 static int
1720 cancel_breakpoint (struct lwp_info *lwp)
1721 {
1722 struct thread_info *saved_inferior;
1723
1724 /* There's nothing to do if we don't support breakpoints. */
1725 if (!supports_breakpoints ())
1726 return 0;
1727
1728 /* breakpoint_at reads from current inferior. */
1729 saved_inferior = current_inferior;
1730 current_inferior = get_lwp_thread (lwp);
1731
1732 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1733 {
1734 if (debug_threads)
1735 fprintf (stderr,
1736 "CB: Push back breakpoint for %s\n",
1737 target_pid_to_str (ptid_of (lwp)));
1738
1739 /* Back up the PC if necessary. */
1740 if (the_low_target.decr_pc_after_break)
1741 {
1742 struct regcache *regcache
1743 = get_thread_regcache (current_inferior, 1);
1744 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1745 }
1746
1747 current_inferior = saved_inferior;
1748 return 1;
1749 }
1750 else
1751 {
1752 if (debug_threads)
1753 fprintf (stderr,
1754 "CB: No breakpoint found at %s for [%s]\n",
1755 paddress (lwp->stop_pc),
1756 target_pid_to_str (ptid_of (lwp)));
1757 }
1758
1759 current_inferior = saved_inferior;
1760 return 0;
1761 }
1762
1763 /* When the event-loop is doing a step-over, this points at the thread
1764 being stepped. */
1765 ptid_t step_over_bkpt;
1766
1767 /* Wait for an event from child PID. If PID is -1, wait for any
1768 child. Store the stop status through the status pointer WSTAT.
1769 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1770 event was found and OPTIONS contains WNOHANG. Return the PID of
1771 the stopped child otherwise. */
1772
1773 static int
1774 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1775 {
1776 struct lwp_info *event_child, *requested_child;
1777 ptid_t wait_ptid;
1778
1779 event_child = NULL;
1780 requested_child = NULL;
1781
1782 /* Check for a lwp with a pending status. */
1783
1784 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1785 {
1786 event_child = (struct lwp_info *)
1787 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1788 if (debug_threads && event_child)
1789 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1790 }
1791 else
1792 {
1793 requested_child = find_lwp_pid (ptid);
1794
1795 if (stopping_threads == NOT_STOPPING_THREADS
1796 && requested_child->status_pending_p
1797 && requested_child->collecting_fast_tracepoint)
1798 {
1799 enqueue_one_deferred_signal (requested_child,
1800 &requested_child->status_pending);
1801 requested_child->status_pending_p = 0;
1802 requested_child->status_pending = 0;
1803 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1804 }
1805
1806 if (requested_child->suspended
1807 && requested_child->status_pending_p)
1808 fatal ("requesting an event out of a suspended child?");
1809
1810 if (requested_child->status_pending_p)
1811 event_child = requested_child;
1812 }
1813
1814 if (event_child != NULL)
1815 {
1816 if (debug_threads)
1817 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1818 lwpid_of (event_child), event_child->status_pending);
1819 *wstat = event_child->status_pending;
1820 event_child->status_pending_p = 0;
1821 event_child->status_pending = 0;
1822 current_inferior = get_lwp_thread (event_child);
1823 return lwpid_of (event_child);
1824 }
1825
1826 if (ptid_is_pid (ptid))
1827 {
1828 /* A request to wait for a specific tgid. This is not possible
1829 with waitpid, so instead, we wait for any child, and leave
1830 children we're not interested in right now with a pending
1831 status to report later. */
1832 wait_ptid = minus_one_ptid;
1833 }
1834 else
1835 wait_ptid = ptid;
1836
1837 /* We only enter this loop if no process has a pending wait status. Thus
1838 any action taken in response to a wait status inside this loop is
1839 responding as soon as we detect the status, not after any pending
1840 events. */
1841 while (1)
1842 {
1843 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1844
1845 if ((options & WNOHANG) && event_child == NULL)
1846 {
1847 if (debug_threads)
1848 fprintf (stderr, "WNOHANG set, no event found\n");
1849 return 0;
1850 }
1851
1852 if (event_child == NULL)
1853 error ("event from unknown child");
1854
1855 if (ptid_is_pid (ptid)
1856 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1857 {
1858 if (! WIFSTOPPED (*wstat))
1859 mark_lwp_dead (event_child, *wstat);
1860 else
1861 {
1862 event_child->status_pending_p = 1;
1863 event_child->status_pending = *wstat;
1864 }
1865 continue;
1866 }
1867
1868 current_inferior = get_lwp_thread (event_child);
1869
1870 /* Check for thread exit. */
1871 if (! WIFSTOPPED (*wstat))
1872 {
1873 if (debug_threads)
1874 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1875
1876 /* If the last thread is exiting, just return. */
1877 if (last_thread_of_process_p (current_inferior))
1878 {
1879 if (debug_threads)
1880 fprintf (stderr, "LWP %ld is last lwp of process\n",
1881 lwpid_of (event_child));
1882 return lwpid_of (event_child);
1883 }
1884
1885 if (!non_stop)
1886 {
1887 current_inferior = (struct thread_info *) all_threads.head;
1888 if (debug_threads)
1889 fprintf (stderr, "Current inferior is now %ld\n",
1890 lwpid_of (get_thread_lwp (current_inferior)));
1891 }
1892 else
1893 {
1894 current_inferior = NULL;
1895 if (debug_threads)
1896 fprintf (stderr, "Current inferior is now <NULL>\n");
1897 }
1898
1899 /* If we were waiting for this particular child to do something...
1900 well, it did something. */
1901 if (requested_child != NULL)
1902 {
1903 int lwpid = lwpid_of (event_child);
1904
1905 /* Cancel the step-over operation --- the thread that
1906 started it is gone. */
1907 if (finish_step_over (event_child))
1908 unstop_all_lwps (1, event_child);
1909 delete_lwp (event_child);
1910 return lwpid;
1911 }
1912
1913 delete_lwp (event_child);
1914
1915 /* Wait for a more interesting event. */
1916 continue;
1917 }
1918
1919 if (event_child->must_set_ptrace_flags)
1920 {
1921 linux_enable_event_reporting (lwpid_of (event_child));
1922 event_child->must_set_ptrace_flags = 0;
1923 }
1924
1925 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1926 && *wstat >> 16 != 0)
1927 {
1928 handle_extended_wait (event_child, *wstat);
1929 continue;
1930 }
1931
1932 if (WIFSTOPPED (*wstat)
1933 && WSTOPSIG (*wstat) == SIGSTOP
1934 && event_child->stop_expected)
1935 {
1936 int should_stop;
1937
1938 if (debug_threads)
1939 fprintf (stderr, "Expected stop.\n");
1940 event_child->stop_expected = 0;
1941
1942 should_stop = (current_inferior->last_resume_kind == resume_stop
1943 || stopping_threads != NOT_STOPPING_THREADS);
1944
1945 if (!should_stop)
1946 {
1947 linux_resume_one_lwp (event_child,
1948 event_child->stepping, 0, NULL);
1949 continue;
1950 }
1951 }
1952
1953 return lwpid_of (event_child);
1954 }
1955
1956 /* NOTREACHED */
1957 return 0;
1958 }
1959
1960 /* Count the LWP's that have had events. */
1961
1962 static int
1963 count_events_callback (struct inferior_list_entry *entry, void *data)
1964 {
1965 struct lwp_info *lp = (struct lwp_info *) entry;
1966 struct thread_info *thread = get_lwp_thread (lp);
1967 int *count = data;
1968
1969 gdb_assert (count != NULL);
1970
1971 /* Count only resumed LWPs that have a SIGTRAP event pending that
1972 should be reported to GDB. */
1973 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1974 && thread->last_resume_kind != resume_stop
1975 && lp->status_pending_p
1976 && WIFSTOPPED (lp->status_pending)
1977 && WSTOPSIG (lp->status_pending) == SIGTRAP
1978 && !breakpoint_inserted_here (lp->stop_pc))
1979 (*count)++;
1980
1981 return 0;
1982 }
1983
1984 /* Select the LWP (if any) that is currently being single-stepped. */
1985
1986 static int
1987 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1988 {
1989 struct lwp_info *lp = (struct lwp_info *) entry;
1990 struct thread_info *thread = get_lwp_thread (lp);
1991
1992 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1993 && thread->last_resume_kind == resume_step
1994 && lp->status_pending_p)
1995 return 1;
1996 else
1997 return 0;
1998 }
1999
2000 /* Select the Nth LWP that has had a SIGTRAP event that should be
2001 reported to GDB. */
2002
2003 static int
2004 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2005 {
2006 struct lwp_info *lp = (struct lwp_info *) entry;
2007 struct thread_info *thread = get_lwp_thread (lp);
2008 int *selector = data;
2009
2010 gdb_assert (selector != NULL);
2011
2012 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2013 if (thread->last_resume_kind != resume_stop
2014 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2015 && lp->status_pending_p
2016 && WIFSTOPPED (lp->status_pending)
2017 && WSTOPSIG (lp->status_pending) == SIGTRAP
2018 && !breakpoint_inserted_here (lp->stop_pc))
2019 if ((*selector)-- == 0)
2020 return 1;
2021
2022 return 0;
2023 }
2024
2025 static int
2026 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2027 {
2028 struct lwp_info *lp = (struct lwp_info *) entry;
2029 struct thread_info *thread = get_lwp_thread (lp);
2030 struct lwp_info *event_lp = data;
2031
2032 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2033 if (lp == event_lp)
2034 return 0;
2035
2036 /* If a LWP other than the LWP that we're reporting an event for has
2037 hit a GDB breakpoint (as opposed to some random trap signal),
2038 then just arrange for it to hit it again later. We don't keep
2039 the SIGTRAP status and don't forward the SIGTRAP signal to the
2040 LWP. We will handle the current event, eventually we will resume
2041 all LWPs, and this one will get its breakpoint trap again.
2042
2043 If we do not do this, then we run the risk that the user will
2044 delete or disable the breakpoint, but the LWP will have already
2045 tripped on it. */
2046
2047 if (thread->last_resume_kind != resume_stop
2048 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2049 && lp->status_pending_p
2050 && WIFSTOPPED (lp->status_pending)
2051 && WSTOPSIG (lp->status_pending) == SIGTRAP
2052 && !lp->stepping
2053 && !lp->stopped_by_watchpoint
2054 && cancel_breakpoint (lp))
2055 /* Throw away the SIGTRAP. */
2056 lp->status_pending_p = 0;
2057
2058 return 0;
2059 }
2060
2061 static void
2062 linux_cancel_breakpoints (void)
2063 {
2064 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2065 }
2066
2067 /* Select one LWP out of those that have events pending. */
2068
2069 static void
2070 select_event_lwp (struct lwp_info **orig_lp)
2071 {
2072 int num_events = 0;
2073 int random_selector;
2074 struct lwp_info *event_lp;
2075
2076 /* Give preference to any LWP that is being single-stepped. */
2077 event_lp
2078 = (struct lwp_info *) find_inferior (&all_lwps,
2079 select_singlestep_lwp_callback, NULL);
2080 if (event_lp != NULL)
2081 {
2082 if (debug_threads)
2083 fprintf (stderr,
2084 "SEL: Select single-step %s\n",
2085 target_pid_to_str (ptid_of (event_lp)));
2086 }
2087 else
2088 {
2089 /* No single-stepping LWP. Select one at random, out of those
2090 which have had SIGTRAP events. */
2091
2092 /* First see how many SIGTRAP events we have. */
2093 find_inferior (&all_lwps, count_events_callback, &num_events);
2094
2095 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2096 random_selector = (int)
2097 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2098
2099 if (debug_threads && num_events > 1)
2100 fprintf (stderr,
2101 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2102 num_events, random_selector);
2103
2104 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2105 select_event_lwp_callback,
2106 &random_selector);
2107 }
2108
2109 if (event_lp != NULL)
2110 {
2111 /* Switch the event LWP. */
2112 *orig_lp = event_lp;
2113 }
2114 }
2115
2116 /* Decrement the suspend count of an LWP. */
2117
2118 static int
2119 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2120 {
2121 struct lwp_info *lwp = (struct lwp_info *) entry;
2122
2123 /* Ignore EXCEPT. */
2124 if (lwp == except)
2125 return 0;
2126
2127 lwp->suspended--;
2128
2129 gdb_assert (lwp->suspended >= 0);
2130 return 0;
2131 }
2132
2133 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2134 NULL. */
2135
2136 static void
2137 unsuspend_all_lwps (struct lwp_info *except)
2138 {
2139 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2140 }
2141
2142 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2143 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2144 void *data);
2145 static int lwp_running (struct inferior_list_entry *entry, void *data);
2146 static ptid_t linux_wait_1 (ptid_t ptid,
2147 struct target_waitstatus *ourstatus,
2148 int target_options);
2149
2150 /* Stabilize threads (move out of jump pads).
2151
2152 If a thread is midway collecting a fast tracepoint, we need to
2153 finish the collection and move it out of the jump pad before
2154 reporting the signal.
2155
2156 This avoids recursion while collecting (when a signal arrives
2157 midway, and the signal handler itself collects), which would trash
2158 the trace buffer. In case the user set a breakpoint in a signal
2159 handler, this avoids the backtrace showing the jump pad, etc..
2160 Most importantly, there are certain things we can't do safely if
2161 threads are stopped in a jump pad (or in its callee's). For
2162 example:
2163
2164 - starting a new trace run. A thread still collecting the
2165 previous run, could trash the trace buffer when resumed. The trace
2166 buffer control structures would have been reset but the thread had
2167 no way to tell. The thread could even midway memcpy'ing to the
2168 buffer, which would mean that when resumed, it would clobber the
2169 trace buffer that had been set for a new run.
2170
2171 - we can't rewrite/reuse the jump pads for new tracepoints
2172 safely. Say you do tstart while a thread is stopped midway while
2173 collecting. When the thread is later resumed, it finishes the
2174 collection, and returns to the jump pad, to execute the original
2175 instruction that was under the tracepoint jump at the time the
2176 older run had been started. If the jump pad had been rewritten
2177 since for something else in the new run, the thread would now
2178 execute the wrong / random instructions. */
2179
2180 static void
2181 linux_stabilize_threads (void)
2182 {
2183 struct thread_info *save_inferior;
2184 struct lwp_info *lwp_stuck;
2185
2186 lwp_stuck
2187 = (struct lwp_info *) find_inferior (&all_lwps,
2188 stuck_in_jump_pad_callback, NULL);
2189 if (lwp_stuck != NULL)
2190 {
2191 if (debug_threads)
2192 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2193 lwpid_of (lwp_stuck));
2194 return;
2195 }
2196
2197 save_inferior = current_inferior;
2198
2199 stabilizing_threads = 1;
2200
2201 /* Kick 'em all. */
2202 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2203
2204 /* Loop until all are stopped out of the jump pads. */
2205 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2206 {
2207 struct target_waitstatus ourstatus;
2208 struct lwp_info *lwp;
2209 int wstat;
2210
2211 /* Note that we go through the full wait even loop. While
2212 moving threads out of jump pad, we need to be able to step
2213 over internal breakpoints and such. */
2214 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2215
2216 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2217 {
2218 lwp = get_thread_lwp (current_inferior);
2219
2220 /* Lock it. */
2221 lwp->suspended++;
2222
2223 if (ourstatus.value.sig != GDB_SIGNAL_0
2224 || current_inferior->last_resume_kind == resume_stop)
2225 {
2226 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2227 enqueue_one_deferred_signal (lwp, &wstat);
2228 }
2229 }
2230 }
2231
2232 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2233
2234 stabilizing_threads = 0;
2235
2236 current_inferior = save_inferior;
2237
2238 if (debug_threads)
2239 {
2240 lwp_stuck
2241 = (struct lwp_info *) find_inferior (&all_lwps,
2242 stuck_in_jump_pad_callback, NULL);
2243 if (lwp_stuck != NULL)
2244 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2245 lwpid_of (lwp_stuck));
2246 }
2247 }
2248
2249 /* Wait for process, returns status. */
2250
2251 static ptid_t
2252 linux_wait_1 (ptid_t ptid,
2253 struct target_waitstatus *ourstatus, int target_options)
2254 {
2255 int w;
2256 struct lwp_info *event_child;
2257 int options;
2258 int pid;
2259 int step_over_finished;
2260 int bp_explains_trap;
2261 int maybe_internal_trap;
2262 int report_to_gdb;
2263 int trace_event;
2264 int in_step_range;
2265
2266 /* Translate generic target options into linux options. */
2267 options = __WALL;
2268 if (target_options & TARGET_WNOHANG)
2269 options |= WNOHANG;
2270
2271 retry:
2272 bp_explains_trap = 0;
2273 trace_event = 0;
2274 in_step_range = 0;
2275 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2276
2277 /* If we were only supposed to resume one thread, only wait for
2278 that thread - if it's still alive. If it died, however - which
2279 can happen if we're coming from the thread death case below -
2280 then we need to make sure we restart the other threads. We could
2281 pick a thread at random or restart all; restarting all is less
2282 arbitrary. */
2283 if (!non_stop
2284 && !ptid_equal (cont_thread, null_ptid)
2285 && !ptid_equal (cont_thread, minus_one_ptid))
2286 {
2287 struct thread_info *thread;
2288
2289 thread = (struct thread_info *) find_inferior_id (&all_threads,
2290 cont_thread);
2291
2292 /* No stepping, no signal - unless one is pending already, of course. */
2293 if (thread == NULL)
2294 {
2295 struct thread_resume resume_info;
2296 resume_info.thread = minus_one_ptid;
2297 resume_info.kind = resume_continue;
2298 resume_info.sig = 0;
2299 linux_resume (&resume_info, 1);
2300 }
2301 else
2302 ptid = cont_thread;
2303 }
2304
2305 if (ptid_equal (step_over_bkpt, null_ptid))
2306 pid = linux_wait_for_event (ptid, &w, options);
2307 else
2308 {
2309 if (debug_threads)
2310 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2311 target_pid_to_str (step_over_bkpt));
2312 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2313 }
2314
2315 if (pid == 0) /* only if TARGET_WNOHANG */
2316 return null_ptid;
2317
2318 event_child = get_thread_lwp (current_inferior);
2319
2320 /* If we are waiting for a particular child, and it exited,
2321 linux_wait_for_event will return its exit status. Similarly if
2322 the last child exited. If this is not the last child, however,
2323 do not report it as exited until there is a 'thread exited' response
2324 available in the remote protocol. Instead, just wait for another event.
2325 This should be safe, because if the thread crashed we will already
2326 have reported the termination signal to GDB; that should stop any
2327 in-progress stepping operations, etc.
2328
2329 Report the exit status of the last thread to exit. This matches
2330 LinuxThreads' behavior. */
2331
2332 if (last_thread_of_process_p (current_inferior))
2333 {
2334 if (WIFEXITED (w) || WIFSIGNALED (w))
2335 {
2336 if (WIFEXITED (w))
2337 {
2338 ourstatus->kind = TARGET_WAITKIND_EXITED;
2339 ourstatus->value.integer = WEXITSTATUS (w);
2340
2341 if (debug_threads)
2342 fprintf (stderr,
2343 "\nChild exited with retcode = %x \n",
2344 WEXITSTATUS (w));
2345 }
2346 else
2347 {
2348 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2349 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2350
2351 if (debug_threads)
2352 fprintf (stderr,
2353 "\nChild terminated with signal = %x \n",
2354 WTERMSIG (w));
2355
2356 }
2357
2358 return ptid_of (event_child);
2359 }
2360 }
2361 else
2362 {
2363 if (!WIFSTOPPED (w))
2364 goto retry;
2365 }
2366
2367 /* If this event was not handled before, and is not a SIGTRAP, we
2368 report it. SIGILL and SIGSEGV are also treated as traps in case
2369 a breakpoint is inserted at the current PC. If this target does
2370 not support internal breakpoints at all, we also report the
2371 SIGTRAP without further processing; it's of no concern to us. */
2372 maybe_internal_trap
2373 = (supports_breakpoints ()
2374 && (WSTOPSIG (w) == SIGTRAP
2375 || ((WSTOPSIG (w) == SIGILL
2376 || WSTOPSIG (w) == SIGSEGV)
2377 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2378
2379 if (maybe_internal_trap)
2380 {
2381 /* Handle anything that requires bookkeeping before deciding to
2382 report the event or continue waiting. */
2383
2384 /* First check if we can explain the SIGTRAP with an internal
2385 breakpoint, or if we should possibly report the event to GDB.
2386 Do this before anything that may remove or insert a
2387 breakpoint. */
2388 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2389
2390 /* We have a SIGTRAP, possibly a step-over dance has just
2391 finished. If so, tweak the state machine accordingly,
2392 reinsert breakpoints and delete any reinsert (software
2393 single-step) breakpoints. */
2394 step_over_finished = finish_step_over (event_child);
2395
2396 /* Now invoke the callbacks of any internal breakpoints there. */
2397 check_breakpoints (event_child->stop_pc);
2398
2399 /* Handle tracepoint data collecting. This may overflow the
2400 trace buffer, and cause a tracing stop, removing
2401 breakpoints. */
2402 trace_event = handle_tracepoints (event_child);
2403
2404 if (bp_explains_trap)
2405 {
2406 /* If we stepped or ran into an internal breakpoint, we've
2407 already handled it. So next time we resume (from this
2408 PC), we should step over it. */
2409 if (debug_threads)
2410 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2411
2412 if (breakpoint_here (event_child->stop_pc))
2413 event_child->need_step_over = 1;
2414 }
2415 }
2416 else
2417 {
2418 /* We have some other signal, possibly a step-over dance was in
2419 progress, and it should be cancelled too. */
2420 step_over_finished = finish_step_over (event_child);
2421 }
2422
2423 /* We have all the data we need. Either report the event to GDB, or
2424 resume threads and keep waiting for more. */
2425
2426 /* If we're collecting a fast tracepoint, finish the collection and
2427 move out of the jump pad before delivering a signal. See
2428 linux_stabilize_threads. */
2429
2430 if (WIFSTOPPED (w)
2431 && WSTOPSIG (w) != SIGTRAP
2432 && supports_fast_tracepoints ()
2433 && agent_loaded_p ())
2434 {
2435 if (debug_threads)
2436 fprintf (stderr,
2437 "Got signal %d for LWP %ld. Check if we need "
2438 "to defer or adjust it.\n",
2439 WSTOPSIG (w), lwpid_of (event_child));
2440
2441 /* Allow debugging the jump pad itself. */
2442 if (current_inferior->last_resume_kind != resume_step
2443 && maybe_move_out_of_jump_pad (event_child, &w))
2444 {
2445 enqueue_one_deferred_signal (event_child, &w);
2446
2447 if (debug_threads)
2448 fprintf (stderr,
2449 "Signal %d for LWP %ld deferred (in jump pad)\n",
2450 WSTOPSIG (w), lwpid_of (event_child));
2451
2452 linux_resume_one_lwp (event_child, 0, 0, NULL);
2453 goto retry;
2454 }
2455 }
2456
2457 if (event_child->collecting_fast_tracepoint)
2458 {
2459 if (debug_threads)
2460 fprintf (stderr, "\
2461 LWP %ld was trying to move out of the jump pad (%d). \
2462 Check if we're already there.\n",
2463 lwpid_of (event_child),
2464 event_child->collecting_fast_tracepoint);
2465
2466 trace_event = 1;
2467
2468 event_child->collecting_fast_tracepoint
2469 = linux_fast_tracepoint_collecting (event_child, NULL);
2470
2471 if (event_child->collecting_fast_tracepoint != 1)
2472 {
2473 /* No longer need this breakpoint. */
2474 if (event_child->exit_jump_pad_bkpt != NULL)
2475 {
2476 if (debug_threads)
2477 fprintf (stderr,
2478 "No longer need exit-jump-pad bkpt; removing it."
2479 "stopping all threads momentarily.\n");
2480
2481 /* Other running threads could hit this breakpoint.
2482 We don't handle moribund locations like GDB does,
2483 instead we always pause all threads when removing
2484 breakpoints, so that any step-over or
2485 decr_pc_after_break adjustment is always taken
2486 care of while the breakpoint is still
2487 inserted. */
2488 stop_all_lwps (1, event_child);
2489 cancel_breakpoints ();
2490
2491 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2492 event_child->exit_jump_pad_bkpt = NULL;
2493
2494 unstop_all_lwps (1, event_child);
2495
2496 gdb_assert (event_child->suspended >= 0);
2497 }
2498 }
2499
2500 if (event_child->collecting_fast_tracepoint == 0)
2501 {
2502 if (debug_threads)
2503 fprintf (stderr,
2504 "fast tracepoint finished "
2505 "collecting successfully.\n");
2506
2507 /* We may have a deferred signal to report. */
2508 if (dequeue_one_deferred_signal (event_child, &w))
2509 {
2510 if (debug_threads)
2511 fprintf (stderr, "dequeued one signal.\n");
2512 }
2513 else
2514 {
2515 if (debug_threads)
2516 fprintf (stderr, "no deferred signals.\n");
2517
2518 if (stabilizing_threads)
2519 {
2520 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2521 ourstatus->value.sig = GDB_SIGNAL_0;
2522 return ptid_of (event_child);
2523 }
2524 }
2525 }
2526 }
2527
2528 /* Check whether GDB would be interested in this event. */
2529
2530 /* If GDB is not interested in this signal, don't stop other
2531 threads, and don't report it to GDB. Just resume the inferior
2532 right away. We do this for threading-related signals as well as
2533 any that GDB specifically requested we ignore. But never ignore
2534 SIGSTOP if we sent it ourselves, and do not ignore signals when
2535 stepping - they may require special handling to skip the signal
2536 handler. */
2537 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2538 thread library? */
2539 if (WIFSTOPPED (w)
2540 && current_inferior->last_resume_kind != resume_step
2541 && (
2542 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2543 (current_process ()->private->thread_db != NULL
2544 && (WSTOPSIG (w) == __SIGRTMIN
2545 || WSTOPSIG (w) == __SIGRTMIN + 1))
2546 ||
2547 #endif
2548 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2549 && !(WSTOPSIG (w) == SIGSTOP
2550 && current_inferior->last_resume_kind == resume_stop))))
2551 {
2552 siginfo_t info, *info_p;
2553
2554 if (debug_threads)
2555 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2556 WSTOPSIG (w), lwpid_of (event_child));
2557
2558 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2559 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2560 info_p = &info;
2561 else
2562 info_p = NULL;
2563 linux_resume_one_lwp (event_child, event_child->stepping,
2564 WSTOPSIG (w), info_p);
2565 goto retry;
2566 }
2567
2568 /* Note that all addresses are always "out of the step range" when
2569 there's no range to begin with. */
2570 in_step_range = lwp_in_step_range (event_child);
2571
2572 /* If GDB wanted this thread to single step, and the thread is out
2573 of the step range, we always want to report the SIGTRAP, and let
2574 GDB handle it. Watchpoints should always be reported. So should
2575 signals we can't explain. A SIGTRAP we can't explain could be a
2576 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2577 do, we're be able to handle GDB breakpoints on top of internal
2578 breakpoints, by handling the internal breakpoint and still
2579 reporting the event to GDB. If we don't, we're out of luck, GDB
2580 won't see the breakpoint hit. */
2581 report_to_gdb = (!maybe_internal_trap
2582 || (current_inferior->last_resume_kind == resume_step
2583 && !in_step_range)
2584 || event_child->stopped_by_watchpoint
2585 || (!step_over_finished && !in_step_range
2586 && !bp_explains_trap && !trace_event)
2587 || (gdb_breakpoint_here (event_child->stop_pc)
2588 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2589 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2590
2591 run_breakpoint_commands (event_child->stop_pc);
2592
2593 /* We found no reason GDB would want us to stop. We either hit one
2594 of our own breakpoints, or finished an internal step GDB
2595 shouldn't know about. */
2596 if (!report_to_gdb)
2597 {
2598 if (debug_threads)
2599 {
2600 if (bp_explains_trap)
2601 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2602 if (step_over_finished)
2603 fprintf (stderr, "Step-over finished.\n");
2604 if (trace_event)
2605 fprintf (stderr, "Tracepoint event.\n");
2606 if (lwp_in_step_range (event_child))
2607 fprintf (stderr, "Range stepping pc 0x%s [0x%s, 0x%s).\n",
2608 paddress (event_child->stop_pc),
2609 paddress (event_child->step_range_start),
2610 paddress (event_child->step_range_end));
2611 }
2612
2613 /* We're not reporting this breakpoint to GDB, so apply the
2614 decr_pc_after_break adjustment to the inferior's regcache
2615 ourselves. */
2616
2617 if (the_low_target.set_pc != NULL)
2618 {
2619 struct regcache *regcache
2620 = get_thread_regcache (get_lwp_thread (event_child), 1);
2621 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2622 }
2623
2624 /* We may have finished stepping over a breakpoint. If so,
2625 we've stopped and suspended all LWPs momentarily except the
2626 stepping one. This is where we resume them all again. We're
2627 going to keep waiting, so use proceed, which handles stepping
2628 over the next breakpoint. */
2629 if (debug_threads)
2630 fprintf (stderr, "proceeding all threads.\n");
2631
2632 if (step_over_finished)
2633 unsuspend_all_lwps (event_child);
2634
2635 proceed_all_lwps ();
2636 goto retry;
2637 }
2638
2639 if (debug_threads)
2640 {
2641 if (current_inferior->last_resume_kind == resume_step)
2642 {
2643 if (event_child->step_range_start == event_child->step_range_end)
2644 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2645 else if (!lwp_in_step_range (event_child))
2646 fprintf (stderr, "Out of step range, reporting event.\n");
2647 }
2648 if (event_child->stopped_by_watchpoint)
2649 fprintf (stderr, "Stopped by watchpoint.\n");
2650 if (gdb_breakpoint_here (event_child->stop_pc))
2651 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2652 if (debug_threads)
2653 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2654 }
2655
2656 /* Alright, we're going to report a stop. */
2657
2658 if (!non_stop && !stabilizing_threads)
2659 {
2660 /* In all-stop, stop all threads. */
2661 stop_all_lwps (0, NULL);
2662
2663 /* If we're not waiting for a specific LWP, choose an event LWP
2664 from among those that have had events. Giving equal priority
2665 to all LWPs that have had events helps prevent
2666 starvation. */
2667 if (ptid_equal (ptid, minus_one_ptid))
2668 {
2669 event_child->status_pending_p = 1;
2670 event_child->status_pending = w;
2671
2672 select_event_lwp (&event_child);
2673
2674 event_child->status_pending_p = 0;
2675 w = event_child->status_pending;
2676 }
2677
2678 /* Now that we've selected our final event LWP, cancel any
2679 breakpoints in other LWPs that have hit a GDB breakpoint.
2680 See the comment in cancel_breakpoints_callback to find out
2681 why. */
2682 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2683
2684 /* If we were going a step-over, all other threads but the stepping one
2685 had been paused in start_step_over, with their suspend counts
2686 incremented. We don't want to do a full unstop/unpause, because we're
2687 in all-stop mode (so we want threads stopped), but we still need to
2688 unsuspend the other threads, to decrement their `suspended' count
2689 back. */
2690 if (step_over_finished)
2691 unsuspend_all_lwps (event_child);
2692
2693 /* Stabilize threads (move out of jump pads). */
2694 stabilize_threads ();
2695 }
2696 else
2697 {
2698 /* If we just finished a step-over, then all threads had been
2699 momentarily paused. In all-stop, that's fine, we want
2700 threads stopped by now anyway. In non-stop, we need to
2701 re-resume threads that GDB wanted to be running. */
2702 if (step_over_finished)
2703 unstop_all_lwps (1, event_child);
2704 }
2705
2706 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2707
2708 if (current_inferior->last_resume_kind == resume_stop
2709 && WSTOPSIG (w) == SIGSTOP)
2710 {
2711 /* A thread that has been requested to stop by GDB with vCont;t,
2712 and it stopped cleanly, so report as SIG0. The use of
2713 SIGSTOP is an implementation detail. */
2714 ourstatus->value.sig = GDB_SIGNAL_0;
2715 }
2716 else if (current_inferior->last_resume_kind == resume_stop
2717 && WSTOPSIG (w) != SIGSTOP)
2718 {
2719 /* A thread that has been requested to stop by GDB with vCont;t,
2720 but, it stopped for other reasons. */
2721 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2722 }
2723 else
2724 {
2725 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2726 }
2727
2728 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2729
2730 if (debug_threads)
2731 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2732 target_pid_to_str (ptid_of (event_child)),
2733 ourstatus->kind,
2734 ourstatus->value.sig);
2735
2736 return ptid_of (event_child);
2737 }
2738
2739 /* Get rid of any pending event in the pipe. */
2740 static void
2741 async_file_flush (void)
2742 {
2743 int ret;
2744 char buf;
2745
2746 do
2747 ret = read (linux_event_pipe[0], &buf, 1);
2748 while (ret >= 0 || (ret == -1 && errno == EINTR));
2749 }
2750
2751 /* Put something in the pipe, so the event loop wakes up. */
2752 static void
2753 async_file_mark (void)
2754 {
2755 int ret;
2756
2757 async_file_flush ();
2758
2759 do
2760 ret = write (linux_event_pipe[1], "+", 1);
2761 while (ret == 0 || (ret == -1 && errno == EINTR));
2762
2763 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2764 be awakened anyway. */
2765 }
2766
2767 static ptid_t
2768 linux_wait (ptid_t ptid,
2769 struct target_waitstatus *ourstatus, int target_options)
2770 {
2771 ptid_t event_ptid;
2772
2773 if (debug_threads)
2774 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2775
2776 /* Flush the async file first. */
2777 if (target_is_async_p ())
2778 async_file_flush ();
2779
2780 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2781
2782 /* If at least one stop was reported, there may be more. A single
2783 SIGCHLD can signal more than one child stop. */
2784 if (target_is_async_p ()
2785 && (target_options & TARGET_WNOHANG) != 0
2786 && !ptid_equal (event_ptid, null_ptid))
2787 async_file_mark ();
2788
2789 return event_ptid;
2790 }
2791
2792 /* Send a signal to an LWP. */
2793
2794 static int
2795 kill_lwp (unsigned long lwpid, int signo)
2796 {
2797 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2798 fails, then we are not using nptl threads and we should be using kill. */
2799
2800 #ifdef __NR_tkill
2801 {
2802 static int tkill_failed;
2803
2804 if (!tkill_failed)
2805 {
2806 int ret;
2807
2808 errno = 0;
2809 ret = syscall (__NR_tkill, lwpid, signo);
2810 if (errno != ENOSYS)
2811 return ret;
2812 tkill_failed = 1;
2813 }
2814 }
2815 #endif
2816
2817 return kill (lwpid, signo);
2818 }
2819
2820 void
2821 linux_stop_lwp (struct lwp_info *lwp)
2822 {
2823 send_sigstop (lwp);
2824 }
2825
2826 static void
2827 send_sigstop (struct lwp_info *lwp)
2828 {
2829 int pid;
2830
2831 pid = lwpid_of (lwp);
2832
2833 /* If we already have a pending stop signal for this process, don't
2834 send another. */
2835 if (lwp->stop_expected)
2836 {
2837 if (debug_threads)
2838 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2839
2840 return;
2841 }
2842
2843 if (debug_threads)
2844 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2845
2846 lwp->stop_expected = 1;
2847 kill_lwp (pid, SIGSTOP);
2848 }
2849
2850 static int
2851 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2852 {
2853 struct lwp_info *lwp = (struct lwp_info *) entry;
2854
2855 /* Ignore EXCEPT. */
2856 if (lwp == except)
2857 return 0;
2858
2859 if (lwp->stopped)
2860 return 0;
2861
2862 send_sigstop (lwp);
2863 return 0;
2864 }
2865
2866 /* Increment the suspend count of an LWP, and stop it, if not stopped
2867 yet. */
2868 static int
2869 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2870 void *except)
2871 {
2872 struct lwp_info *lwp = (struct lwp_info *) entry;
2873
2874 /* Ignore EXCEPT. */
2875 if (lwp == except)
2876 return 0;
2877
2878 lwp->suspended++;
2879
2880 return send_sigstop_callback (entry, except);
2881 }
2882
2883 static void
2884 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2885 {
2886 /* It's dead, really. */
2887 lwp->dead = 1;
2888
2889 /* Store the exit status for later. */
2890 lwp->status_pending_p = 1;
2891 lwp->status_pending = wstat;
2892
2893 /* Prevent trying to stop it. */
2894 lwp->stopped = 1;
2895
2896 /* No further stops are expected from a dead lwp. */
2897 lwp->stop_expected = 0;
2898 }
2899
2900 static void
2901 wait_for_sigstop (struct inferior_list_entry *entry)
2902 {
2903 struct lwp_info *lwp = (struct lwp_info *) entry;
2904 struct thread_info *saved_inferior;
2905 int wstat;
2906 ptid_t saved_tid;
2907 ptid_t ptid;
2908 int pid;
2909
2910 if (lwp->stopped)
2911 {
2912 if (debug_threads)
2913 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2914 lwpid_of (lwp));
2915 return;
2916 }
2917
2918 saved_inferior = current_inferior;
2919 if (saved_inferior != NULL)
2920 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2921 else
2922 saved_tid = null_ptid; /* avoid bogus unused warning */
2923
2924 ptid = lwp->head.id;
2925
2926 if (debug_threads)
2927 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2928
2929 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2930
2931 /* If we stopped with a non-SIGSTOP signal, save it for later
2932 and record the pending SIGSTOP. If the process exited, just
2933 return. */
2934 if (WIFSTOPPED (wstat))
2935 {
2936 if (debug_threads)
2937 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2938 lwpid_of (lwp), WSTOPSIG (wstat));
2939
2940 if (WSTOPSIG (wstat) != SIGSTOP)
2941 {
2942 if (debug_threads)
2943 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2944 lwpid_of (lwp), wstat);
2945
2946 lwp->status_pending_p = 1;
2947 lwp->status_pending = wstat;
2948 }
2949 }
2950 else
2951 {
2952 if (debug_threads)
2953 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2954
2955 lwp = find_lwp_pid (pid_to_ptid (pid));
2956 if (lwp)
2957 {
2958 /* Leave this status pending for the next time we're able to
2959 report it. In the mean time, we'll report this lwp as
2960 dead to GDB, so GDB doesn't try to read registers and
2961 memory from it. This can only happen if this was the
2962 last thread of the process; otherwise, PID is removed
2963 from the thread tables before linux_wait_for_event
2964 returns. */
2965 mark_lwp_dead (lwp, wstat);
2966 }
2967 }
2968
2969 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2970 current_inferior = saved_inferior;
2971 else
2972 {
2973 if (debug_threads)
2974 fprintf (stderr, "Previously current thread died.\n");
2975
2976 if (non_stop)
2977 {
2978 /* We can't change the current inferior behind GDB's back,
2979 otherwise, a subsequent command may apply to the wrong
2980 process. */
2981 current_inferior = NULL;
2982 }
2983 else
2984 {
2985 /* Set a valid thread as current. */
2986 set_desired_inferior (0);
2987 }
2988 }
2989 }
2990
2991 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2992 move it out, because we need to report the stop event to GDB. For
2993 example, if the user puts a breakpoint in the jump pad, it's
2994 because she wants to debug it. */
2995
2996 static int
2997 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2998 {
2999 struct lwp_info *lwp = (struct lwp_info *) entry;
3000 struct thread_info *thread = get_lwp_thread (lwp);
3001
3002 gdb_assert (lwp->suspended == 0);
3003 gdb_assert (lwp->stopped);
3004
3005 /* Allow debugging the jump pad, gdb_collect, etc.. */
3006 return (supports_fast_tracepoints ()
3007 && agent_loaded_p ()
3008 && (gdb_breakpoint_here (lwp->stop_pc)
3009 || lwp->stopped_by_watchpoint
3010 || thread->last_resume_kind == resume_step)
3011 && linux_fast_tracepoint_collecting (lwp, NULL));
3012 }
3013
3014 static void
3015 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3016 {
3017 struct lwp_info *lwp = (struct lwp_info *) entry;
3018 struct thread_info *thread = get_lwp_thread (lwp);
3019 int *wstat;
3020
3021 gdb_assert (lwp->suspended == 0);
3022 gdb_assert (lwp->stopped);
3023
3024 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3025
3026 /* Allow debugging the jump pad, gdb_collect, etc. */
3027 if (!gdb_breakpoint_here (lwp->stop_pc)
3028 && !lwp->stopped_by_watchpoint
3029 && thread->last_resume_kind != resume_step
3030 && maybe_move_out_of_jump_pad (lwp, wstat))
3031 {
3032 if (debug_threads)
3033 fprintf (stderr,
3034 "LWP %ld needs stabilizing (in jump pad)\n",
3035 lwpid_of (lwp));
3036
3037 if (wstat)
3038 {
3039 lwp->status_pending_p = 0;
3040 enqueue_one_deferred_signal (lwp, wstat);
3041
3042 if (debug_threads)
3043 fprintf (stderr,
3044 "Signal %d for LWP %ld deferred "
3045 "(in jump pad)\n",
3046 WSTOPSIG (*wstat), lwpid_of (lwp));
3047 }
3048
3049 linux_resume_one_lwp (lwp, 0, 0, NULL);
3050 }
3051 else
3052 lwp->suspended++;
3053 }
3054
3055 static int
3056 lwp_running (struct inferior_list_entry *entry, void *data)
3057 {
3058 struct lwp_info *lwp = (struct lwp_info *) entry;
3059
3060 if (lwp->dead)
3061 return 0;
3062 if (lwp->stopped)
3063 return 0;
3064 return 1;
3065 }
3066
3067 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3068 If SUSPEND, then also increase the suspend count of every LWP,
3069 except EXCEPT. */
3070
3071 static void
3072 stop_all_lwps (int suspend, struct lwp_info *except)
3073 {
3074 /* Should not be called recursively. */
3075 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3076
3077 stopping_threads = (suspend
3078 ? STOPPING_AND_SUSPENDING_THREADS
3079 : STOPPING_THREADS);
3080
3081 if (suspend)
3082 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3083 else
3084 find_inferior (&all_lwps, send_sigstop_callback, except);
3085 for_each_inferior (&all_lwps, wait_for_sigstop);
3086 stopping_threads = NOT_STOPPING_THREADS;
3087 }
3088
3089 /* Resume execution of the inferior process.
3090 If STEP is nonzero, single-step it.
3091 If SIGNAL is nonzero, give it that signal. */
3092
3093 static void
3094 linux_resume_one_lwp (struct lwp_info *lwp,
3095 int step, int signal, siginfo_t *info)
3096 {
3097 struct thread_info *saved_inferior;
3098 int fast_tp_collecting;
3099
3100 if (lwp->stopped == 0)
3101 return;
3102
3103 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3104
3105 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3106
3107 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3108 user used the "jump" command, or "set $pc = foo"). */
3109 if (lwp->stop_pc != get_pc (lwp))
3110 {
3111 /* Collecting 'while-stepping' actions doesn't make sense
3112 anymore. */
3113 release_while_stepping_state_list (get_lwp_thread (lwp));
3114 }
3115
3116 /* If we have pending signals or status, and a new signal, enqueue the
3117 signal. Also enqueue the signal if we are waiting to reinsert a
3118 breakpoint; it will be picked up again below. */
3119 if (signal != 0
3120 && (lwp->status_pending_p
3121 || lwp->pending_signals != NULL
3122 || lwp->bp_reinsert != 0
3123 || fast_tp_collecting))
3124 {
3125 struct pending_signals *p_sig;
3126 p_sig = xmalloc (sizeof (*p_sig));
3127 p_sig->prev = lwp->pending_signals;
3128 p_sig->signal = signal;
3129 if (info == NULL)
3130 memset (&p_sig->info, 0, sizeof (siginfo_t));
3131 else
3132 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3133 lwp->pending_signals = p_sig;
3134 }
3135
3136 if (lwp->status_pending_p)
3137 {
3138 if (debug_threads)
3139 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3140 " has pending status\n",
3141 lwpid_of (lwp), step ? "step" : "continue", signal,
3142 lwp->stop_expected ? "expected" : "not expected");
3143 return;
3144 }
3145
3146 saved_inferior = current_inferior;
3147 current_inferior = get_lwp_thread (lwp);
3148
3149 if (debug_threads)
3150 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3151 lwpid_of (lwp), step ? "step" : "continue", signal,
3152 lwp->stop_expected ? "expected" : "not expected");
3153
3154 /* This bit needs some thinking about. If we get a signal that
3155 we must report while a single-step reinsert is still pending,
3156 we often end up resuming the thread. It might be better to
3157 (ew) allow a stack of pending events; then we could be sure that
3158 the reinsert happened right away and not lose any signals.
3159
3160 Making this stack would also shrink the window in which breakpoints are
3161 uninserted (see comment in linux_wait_for_lwp) but not enough for
3162 complete correctness, so it won't solve that problem. It may be
3163 worthwhile just to solve this one, however. */
3164 if (lwp->bp_reinsert != 0)
3165 {
3166 if (debug_threads)
3167 fprintf (stderr, " pending reinsert at 0x%s\n",
3168 paddress (lwp->bp_reinsert));
3169
3170 if (can_hardware_single_step ())
3171 {
3172 if (fast_tp_collecting == 0)
3173 {
3174 if (step == 0)
3175 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3176 if (lwp->suspended)
3177 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3178 lwp->suspended);
3179 }
3180
3181 step = 1;
3182 }
3183
3184 /* Postpone any pending signal. It was enqueued above. */
3185 signal = 0;
3186 }
3187
3188 if (fast_tp_collecting == 1)
3189 {
3190 if (debug_threads)
3191 fprintf (stderr, "\
3192 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3193 lwpid_of (lwp));
3194
3195 /* Postpone any pending signal. It was enqueued above. */
3196 signal = 0;
3197 }
3198 else if (fast_tp_collecting == 2)
3199 {
3200 if (debug_threads)
3201 fprintf (stderr, "\
3202 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3203 lwpid_of (lwp));
3204
3205 if (can_hardware_single_step ())
3206 step = 1;
3207 else
3208 fatal ("moving out of jump pad single-stepping"
3209 " not implemented on this target");
3210
3211 /* Postpone any pending signal. It was enqueued above. */
3212 signal = 0;
3213 }
3214
3215 /* If we have while-stepping actions in this thread set it stepping.
3216 If we have a signal to deliver, it may or may not be set to
3217 SIG_IGN, we don't know. Assume so, and allow collecting
3218 while-stepping into a signal handler. A possible smart thing to
3219 do would be to set an internal breakpoint at the signal return
3220 address, continue, and carry on catching this while-stepping
3221 action only when that breakpoint is hit. A future
3222 enhancement. */
3223 if (get_lwp_thread (lwp)->while_stepping != NULL
3224 && can_hardware_single_step ())
3225 {
3226 if (debug_threads)
3227 fprintf (stderr,
3228 "lwp %ld has a while-stepping action -> forcing step.\n",
3229 lwpid_of (lwp));
3230 step = 1;
3231 }
3232
3233 if (debug_threads && the_low_target.get_pc != NULL)
3234 {
3235 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3236 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3237 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3238 }
3239
3240 /* If we have pending signals, consume one unless we are trying to
3241 reinsert a breakpoint or we're trying to finish a fast tracepoint
3242 collect. */
3243 if (lwp->pending_signals != NULL
3244 && lwp->bp_reinsert == 0
3245 && fast_tp_collecting == 0)
3246 {
3247 struct pending_signals **p_sig;
3248
3249 p_sig = &lwp->pending_signals;
3250 while ((*p_sig)->prev != NULL)
3251 p_sig = &(*p_sig)->prev;
3252
3253 signal = (*p_sig)->signal;
3254 if ((*p_sig)->info.si_signo != 0)
3255 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3256 &(*p_sig)->info);
3257
3258 free (*p_sig);
3259 *p_sig = NULL;
3260 }
3261
3262 if (the_low_target.prepare_to_resume != NULL)
3263 the_low_target.prepare_to_resume (lwp);
3264
3265 regcache_invalidate_thread (get_lwp_thread (lwp));
3266 errno = 0;
3267 lwp->stopped = 0;
3268 lwp->stopped_by_watchpoint = 0;
3269 lwp->stepping = step;
3270 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3271 (PTRACE_TYPE_ARG3) 0,
3272 /* Coerce to a uintptr_t first to avoid potential gcc warning
3273 of coercing an 8 byte integer to a 4 byte pointer. */
3274 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3275
3276 current_inferior = saved_inferior;
3277 if (errno)
3278 {
3279 /* ESRCH from ptrace either means that the thread was already
3280 running (an error) or that it is gone (a race condition). If
3281 it's gone, we will get a notification the next time we wait,
3282 so we can ignore the error. We could differentiate these
3283 two, but it's tricky without waiting; the thread still exists
3284 as a zombie, so sending it signal 0 would succeed. So just
3285 ignore ESRCH. */
3286 if (errno == ESRCH)
3287 return;
3288
3289 perror_with_name ("ptrace");
3290 }
3291 }
3292
3293 struct thread_resume_array
3294 {
3295 struct thread_resume *resume;
3296 size_t n;
3297 };
3298
3299 /* This function is called once per thread. We look up the thread
3300 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3301 resume request.
3302
3303 This algorithm is O(threads * resume elements), but resume elements
3304 is small (and will remain small at least until GDB supports thread
3305 suspension). */
3306 static int
3307 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3308 {
3309 struct lwp_info *lwp;
3310 struct thread_info *thread;
3311 int ndx;
3312 struct thread_resume_array *r;
3313
3314 thread = (struct thread_info *) entry;
3315 lwp = get_thread_lwp (thread);
3316 r = arg;
3317
3318 for (ndx = 0; ndx < r->n; ndx++)
3319 {
3320 ptid_t ptid = r->resume[ndx].thread;
3321 if (ptid_equal (ptid, minus_one_ptid)
3322 || ptid_equal (ptid, entry->id)
3323 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3324 of PID'. */
3325 || (ptid_get_pid (ptid) == pid_of (lwp)
3326 && (ptid_is_pid (ptid)
3327 || ptid_get_lwp (ptid) == -1)))
3328 {
3329 if (r->resume[ndx].kind == resume_stop
3330 && thread->last_resume_kind == resume_stop)
3331 {
3332 if (debug_threads)
3333 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3334 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3335 ? "stopped"
3336 : "stopping",
3337 lwpid_of (lwp));
3338
3339 continue;
3340 }
3341
3342 lwp->resume = &r->resume[ndx];
3343 thread->last_resume_kind = lwp->resume->kind;
3344
3345 lwp->step_range_start = lwp->resume->step_range_start;
3346 lwp->step_range_end = lwp->resume->step_range_end;
3347
3348 /* If we had a deferred signal to report, dequeue one now.
3349 This can happen if LWP gets more than one signal while
3350 trying to get out of a jump pad. */
3351 if (lwp->stopped
3352 && !lwp->status_pending_p
3353 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3354 {
3355 lwp->status_pending_p = 1;
3356
3357 if (debug_threads)
3358 fprintf (stderr,
3359 "Dequeueing deferred signal %d for LWP %ld, "
3360 "leaving status pending.\n",
3361 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3362 }
3363
3364 return 0;
3365 }
3366 }
3367
3368 /* No resume action for this thread. */
3369 lwp->resume = NULL;
3370
3371 return 0;
3372 }
3373
3374
3375 /* Set *FLAG_P if this lwp has an interesting status pending. */
3376 static int
3377 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3378 {
3379 struct lwp_info *lwp = (struct lwp_info *) entry;
3380
3381 /* LWPs which will not be resumed are not interesting, because
3382 we might not wait for them next time through linux_wait. */
3383 if (lwp->resume == NULL)
3384 return 0;
3385
3386 if (lwp->status_pending_p)
3387 * (int *) flag_p = 1;
3388
3389 return 0;
3390 }
3391
3392 /* Return 1 if this lwp that GDB wants running is stopped at an
3393 internal breakpoint that we need to step over. It assumes that any
3394 required STOP_PC adjustment has already been propagated to the
3395 inferior's regcache. */
3396
3397 static int
3398 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3399 {
3400 struct lwp_info *lwp = (struct lwp_info *) entry;
3401 struct thread_info *thread;
3402 struct thread_info *saved_inferior;
3403 CORE_ADDR pc;
3404
3405 /* LWPs which will not be resumed are not interesting, because we
3406 might not wait for them next time through linux_wait. */
3407
3408 if (!lwp->stopped)
3409 {
3410 if (debug_threads)
3411 fprintf (stderr,
3412 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3413 lwpid_of (lwp));
3414 return 0;
3415 }
3416
3417 thread = get_lwp_thread (lwp);
3418
3419 if (thread->last_resume_kind == resume_stop)
3420 {
3421 if (debug_threads)
3422 fprintf (stderr,
3423 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3424 lwpid_of (lwp));
3425 return 0;
3426 }
3427
3428 gdb_assert (lwp->suspended >= 0);
3429
3430 if (lwp->suspended)
3431 {
3432 if (debug_threads)
3433 fprintf (stderr,
3434 "Need step over [LWP %ld]? Ignoring, suspended\n",
3435 lwpid_of (lwp));
3436 return 0;
3437 }
3438
3439 if (!lwp->need_step_over)
3440 {
3441 if (debug_threads)
3442 fprintf (stderr,
3443 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3444 }
3445
3446 if (lwp->status_pending_p)
3447 {
3448 if (debug_threads)
3449 fprintf (stderr,
3450 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3451 lwpid_of (lwp));
3452 return 0;
3453 }
3454
3455 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3456 or we have. */
3457 pc = get_pc (lwp);
3458
3459 /* If the PC has changed since we stopped, then don't do anything,
3460 and let the breakpoint/tracepoint be hit. This happens if, for
3461 instance, GDB handled the decr_pc_after_break subtraction itself,
3462 GDB is OOL stepping this thread, or the user has issued a "jump"
3463 command, or poked thread's registers herself. */
3464 if (pc != lwp->stop_pc)
3465 {
3466 if (debug_threads)
3467 fprintf (stderr,
3468 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3469 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3470 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3471
3472 lwp->need_step_over = 0;
3473 return 0;
3474 }
3475
3476 saved_inferior = current_inferior;
3477 current_inferior = thread;
3478
3479 /* We can only step over breakpoints we know about. */
3480 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3481 {
3482 /* Don't step over a breakpoint that GDB expects to hit
3483 though. If the condition is being evaluated on the target's side
3484 and it evaluate to false, step over this breakpoint as well. */
3485 if (gdb_breakpoint_here (pc)
3486 && gdb_condition_true_at_breakpoint (pc)
3487 && gdb_no_commands_at_breakpoint (pc))
3488 {
3489 if (debug_threads)
3490 fprintf (stderr,
3491 "Need step over [LWP %ld]? yes, but found"
3492 " GDB breakpoint at 0x%s; skipping step over\n",
3493 lwpid_of (lwp), paddress (pc));
3494
3495 current_inferior = saved_inferior;
3496 return 0;
3497 }
3498 else
3499 {
3500 if (debug_threads)
3501 fprintf (stderr,
3502 "Need step over [LWP %ld]? yes, "
3503 "found breakpoint at 0x%s\n",
3504 lwpid_of (lwp), paddress (pc));
3505
3506 /* We've found an lwp that needs stepping over --- return 1 so
3507 that find_inferior stops looking. */
3508 current_inferior = saved_inferior;
3509
3510 /* If the step over is cancelled, this is set again. */
3511 lwp->need_step_over = 0;
3512 return 1;
3513 }
3514 }
3515
3516 current_inferior = saved_inferior;
3517
3518 if (debug_threads)
3519 fprintf (stderr,
3520 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3521 lwpid_of (lwp), paddress (pc));
3522
3523 return 0;
3524 }
3525
3526 /* Start a step-over operation on LWP. When LWP stopped at a
3527 breakpoint, to make progress, we need to remove the breakpoint out
3528 of the way. If we let other threads run while we do that, they may
3529 pass by the breakpoint location and miss hitting it. To avoid
3530 that, a step-over momentarily stops all threads while LWP is
3531 single-stepped while the breakpoint is temporarily uninserted from
3532 the inferior. When the single-step finishes, we reinsert the
3533 breakpoint, and let all threads that are supposed to be running,
3534 run again.
3535
3536 On targets that don't support hardware single-step, we don't
3537 currently support full software single-stepping. Instead, we only
3538 support stepping over the thread event breakpoint, by asking the
3539 low target where to place a reinsert breakpoint. Since this
3540 routine assumes the breakpoint being stepped over is a thread event
3541 breakpoint, it usually assumes the return address of the current
3542 function is a good enough place to set the reinsert breakpoint. */
3543
3544 static int
3545 start_step_over (struct lwp_info *lwp)
3546 {
3547 struct thread_info *saved_inferior;
3548 CORE_ADDR pc;
3549 int step;
3550
3551 if (debug_threads)
3552 fprintf (stderr,
3553 "Starting step-over on LWP %ld. Stopping all threads\n",
3554 lwpid_of (lwp));
3555
3556 stop_all_lwps (1, lwp);
3557 gdb_assert (lwp->suspended == 0);
3558
3559 if (debug_threads)
3560 fprintf (stderr, "Done stopping all threads for step-over.\n");
3561
3562 /* Note, we should always reach here with an already adjusted PC,
3563 either by GDB (if we're resuming due to GDB's request), or by our
3564 caller, if we just finished handling an internal breakpoint GDB
3565 shouldn't care about. */
3566 pc = get_pc (lwp);
3567
3568 saved_inferior = current_inferior;
3569 current_inferior = get_lwp_thread (lwp);
3570
3571 lwp->bp_reinsert = pc;
3572 uninsert_breakpoints_at (pc);
3573 uninsert_fast_tracepoint_jumps_at (pc);
3574
3575 if (can_hardware_single_step ())
3576 {
3577 step = 1;
3578 }
3579 else
3580 {
3581 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3582 set_reinsert_breakpoint (raddr);
3583 step = 0;
3584 }
3585
3586 current_inferior = saved_inferior;
3587
3588 linux_resume_one_lwp (lwp, step, 0, NULL);
3589
3590 /* Require next event from this LWP. */
3591 step_over_bkpt = lwp->head.id;
3592 return 1;
3593 }
3594
3595 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3596 start_step_over, if still there, and delete any reinsert
3597 breakpoints we've set, on non hardware single-step targets. */
3598
3599 static int
3600 finish_step_over (struct lwp_info *lwp)
3601 {
3602 if (lwp->bp_reinsert != 0)
3603 {
3604 if (debug_threads)
3605 fprintf (stderr, "Finished step over.\n");
3606
3607 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3608 may be no breakpoint to reinsert there by now. */
3609 reinsert_breakpoints_at (lwp->bp_reinsert);
3610 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3611
3612 lwp->bp_reinsert = 0;
3613
3614 /* Delete any software-single-step reinsert breakpoints. No
3615 longer needed. We don't have to worry about other threads
3616 hitting this trap, and later not being able to explain it,
3617 because we were stepping over a breakpoint, and we hold all
3618 threads but LWP stopped while doing that. */
3619 if (!can_hardware_single_step ())
3620 delete_reinsert_breakpoints ();
3621
3622 step_over_bkpt = null_ptid;
3623 return 1;
3624 }
3625 else
3626 return 0;
3627 }
3628
3629 /* This function is called once per thread. We check the thread's resume
3630 request, which will tell us whether to resume, step, or leave the thread
3631 stopped; and what signal, if any, it should be sent.
3632
3633 For threads which we aren't explicitly told otherwise, we preserve
3634 the stepping flag; this is used for stepping over gdbserver-placed
3635 breakpoints.
3636
3637 If pending_flags was set in any thread, we queue any needed
3638 signals, since we won't actually resume. We already have a pending
3639 event to report, so we don't need to preserve any step requests;
3640 they should be re-issued if necessary. */
3641
3642 static int
3643 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3644 {
3645 struct lwp_info *lwp;
3646 struct thread_info *thread;
3647 int step;
3648 int leave_all_stopped = * (int *) arg;
3649 int leave_pending;
3650
3651 thread = (struct thread_info *) entry;
3652 lwp = get_thread_lwp (thread);
3653
3654 if (lwp->resume == NULL)
3655 return 0;
3656
3657 if (lwp->resume->kind == resume_stop)
3658 {
3659 if (debug_threads)
3660 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3661
3662 if (!lwp->stopped)
3663 {
3664 if (debug_threads)
3665 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3666
3667 /* Stop the thread, and wait for the event asynchronously,
3668 through the event loop. */
3669 send_sigstop (lwp);
3670 }
3671 else
3672 {
3673 if (debug_threads)
3674 fprintf (stderr, "already stopped LWP %ld\n",
3675 lwpid_of (lwp));
3676
3677 /* The LWP may have been stopped in an internal event that
3678 was not meant to be notified back to GDB (e.g., gdbserver
3679 breakpoint), so we should be reporting a stop event in
3680 this case too. */
3681
3682 /* If the thread already has a pending SIGSTOP, this is a
3683 no-op. Otherwise, something later will presumably resume
3684 the thread and this will cause it to cancel any pending
3685 operation, due to last_resume_kind == resume_stop. If
3686 the thread already has a pending status to report, we
3687 will still report it the next time we wait - see
3688 status_pending_p_callback. */
3689
3690 /* If we already have a pending signal to report, then
3691 there's no need to queue a SIGSTOP, as this means we're
3692 midway through moving the LWP out of the jumppad, and we
3693 will report the pending signal as soon as that is
3694 finished. */
3695 if (lwp->pending_signals_to_report == NULL)
3696 send_sigstop (lwp);
3697 }
3698
3699 /* For stop requests, we're done. */
3700 lwp->resume = NULL;
3701 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3702 return 0;
3703 }
3704
3705 /* If this thread which is about to be resumed has a pending status,
3706 then don't resume any threads - we can just report the pending
3707 status. Make sure to queue any signals that would otherwise be
3708 sent. In all-stop mode, we do this decision based on if *any*
3709 thread has a pending status. If there's a thread that needs the
3710 step-over-breakpoint dance, then don't resume any other thread
3711 but that particular one. */
3712 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3713
3714 if (!leave_pending)
3715 {
3716 if (debug_threads)
3717 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3718
3719 step = (lwp->resume->kind == resume_step);
3720 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3721 }
3722 else
3723 {
3724 if (debug_threads)
3725 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3726
3727 /* If we have a new signal, enqueue the signal. */
3728 if (lwp->resume->sig != 0)
3729 {
3730 struct pending_signals *p_sig;
3731 p_sig = xmalloc (sizeof (*p_sig));
3732 p_sig->prev = lwp->pending_signals;
3733 p_sig->signal = lwp->resume->sig;
3734 memset (&p_sig->info, 0, sizeof (siginfo_t));
3735
3736 /* If this is the same signal we were previously stopped by,
3737 make sure to queue its siginfo. We can ignore the return
3738 value of ptrace; if it fails, we'll skip
3739 PTRACE_SETSIGINFO. */
3740 if (WIFSTOPPED (lwp->last_status)
3741 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3742 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3743 &p_sig->info);
3744
3745 lwp->pending_signals = p_sig;
3746 }
3747 }
3748
3749 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3750 lwp->resume = NULL;
3751 return 0;
3752 }
3753
3754 static void
3755 linux_resume (struct thread_resume *resume_info, size_t n)
3756 {
3757 struct thread_resume_array array = { resume_info, n };
3758 struct lwp_info *need_step_over = NULL;
3759 int any_pending;
3760 int leave_all_stopped;
3761
3762 find_inferior (&all_threads, linux_set_resume_request, &array);
3763
3764 /* If there is a thread which would otherwise be resumed, which has
3765 a pending status, then don't resume any threads - we can just
3766 report the pending status. Make sure to queue any signals that
3767 would otherwise be sent. In non-stop mode, we'll apply this
3768 logic to each thread individually. We consume all pending events
3769 before considering to start a step-over (in all-stop). */
3770 any_pending = 0;
3771 if (!non_stop)
3772 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3773
3774 /* If there is a thread which would otherwise be resumed, which is
3775 stopped at a breakpoint that needs stepping over, then don't
3776 resume any threads - have it step over the breakpoint with all
3777 other threads stopped, then resume all threads again. Make sure
3778 to queue any signals that would otherwise be delivered or
3779 queued. */
3780 if (!any_pending && supports_breakpoints ())
3781 need_step_over
3782 = (struct lwp_info *) find_inferior (&all_lwps,
3783 need_step_over_p, NULL);
3784
3785 leave_all_stopped = (need_step_over != NULL || any_pending);
3786
3787 if (debug_threads)
3788 {
3789 if (need_step_over != NULL)
3790 fprintf (stderr, "Not resuming all, need step over\n");
3791 else if (any_pending)
3792 fprintf (stderr,
3793 "Not resuming, all-stop and found "
3794 "an LWP with pending status\n");
3795 else
3796 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3797 }
3798
3799 /* Even if we're leaving threads stopped, queue all signals we'd
3800 otherwise deliver. */
3801 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3802
3803 if (need_step_over)
3804 start_step_over (need_step_over);
3805 }
3806
3807 /* This function is called once per thread. We check the thread's
3808 last resume request, which will tell us whether to resume, step, or
3809 leave the thread stopped. Any signal the client requested to be
3810 delivered has already been enqueued at this point.
3811
3812 If any thread that GDB wants running is stopped at an internal
3813 breakpoint that needs stepping over, we start a step-over operation
3814 on that particular thread, and leave all others stopped. */
3815
3816 static int
3817 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3818 {
3819 struct lwp_info *lwp = (struct lwp_info *) entry;
3820 struct thread_info *thread;
3821 int step;
3822
3823 if (lwp == except)
3824 return 0;
3825
3826 if (debug_threads)
3827 fprintf (stderr,
3828 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3829
3830 if (!lwp->stopped)
3831 {
3832 if (debug_threads)
3833 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3834 return 0;
3835 }
3836
3837 thread = get_lwp_thread (lwp);
3838
3839 if (thread->last_resume_kind == resume_stop
3840 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3841 {
3842 if (debug_threads)
3843 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3844 lwpid_of (lwp));
3845 return 0;
3846 }
3847
3848 if (lwp->status_pending_p)
3849 {
3850 if (debug_threads)
3851 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3852 lwpid_of (lwp));
3853 return 0;
3854 }
3855
3856 gdb_assert (lwp->suspended >= 0);
3857
3858 if (lwp->suspended)
3859 {
3860 if (debug_threads)
3861 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3862 return 0;
3863 }
3864
3865 if (thread->last_resume_kind == resume_stop
3866 && lwp->pending_signals_to_report == NULL
3867 && lwp->collecting_fast_tracepoint == 0)
3868 {
3869 /* We haven't reported this LWP as stopped yet (otherwise, the
3870 last_status.kind check above would catch it, and we wouldn't
3871 reach here. This LWP may have been momentarily paused by a
3872 stop_all_lwps call while handling for example, another LWP's
3873 step-over. In that case, the pending expected SIGSTOP signal
3874 that was queued at vCont;t handling time will have already
3875 been consumed by wait_for_sigstop, and so we need to requeue
3876 another one here. Note that if the LWP already has a SIGSTOP
3877 pending, this is a no-op. */
3878
3879 if (debug_threads)
3880 fprintf (stderr,
3881 "Client wants LWP %ld to stop. "
3882 "Making sure it has a SIGSTOP pending\n",
3883 lwpid_of (lwp));
3884
3885 send_sigstop (lwp);
3886 }
3887
3888 step = thread->last_resume_kind == resume_step;
3889 linux_resume_one_lwp (lwp, step, 0, NULL);
3890 return 0;
3891 }
3892
3893 static int
3894 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3895 {
3896 struct lwp_info *lwp = (struct lwp_info *) entry;
3897
3898 if (lwp == except)
3899 return 0;
3900
3901 lwp->suspended--;
3902 gdb_assert (lwp->suspended >= 0);
3903
3904 return proceed_one_lwp (entry, except);
3905 }
3906
3907 /* When we finish a step-over, set threads running again. If there's
3908 another thread that may need a step-over, now's the time to start
3909 it. Eventually, we'll move all threads past their breakpoints. */
3910
3911 static void
3912 proceed_all_lwps (void)
3913 {
3914 struct lwp_info *need_step_over;
3915
3916 /* If there is a thread which would otherwise be resumed, which is
3917 stopped at a breakpoint that needs stepping over, then don't
3918 resume any threads - have it step over the breakpoint with all
3919 other threads stopped, then resume all threads again. */
3920
3921 if (supports_breakpoints ())
3922 {
3923 need_step_over
3924 = (struct lwp_info *) find_inferior (&all_lwps,
3925 need_step_over_p, NULL);
3926
3927 if (need_step_over != NULL)
3928 {
3929 if (debug_threads)
3930 fprintf (stderr, "proceed_all_lwps: found "
3931 "thread %ld needing a step-over\n",
3932 lwpid_of (need_step_over));
3933
3934 start_step_over (need_step_over);
3935 return;
3936 }
3937 }
3938
3939 if (debug_threads)
3940 fprintf (stderr, "Proceeding, no step-over needed\n");
3941
3942 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3943 }
3944
3945 /* Stopped LWPs that the client wanted to be running, that don't have
3946 pending statuses, are set to run again, except for EXCEPT, if not
3947 NULL. This undoes a stop_all_lwps call. */
3948
3949 static void
3950 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3951 {
3952 if (debug_threads)
3953 {
3954 if (except)
3955 fprintf (stderr,
3956 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3957 else
3958 fprintf (stderr,
3959 "unstopping all lwps\n");
3960 }
3961
3962 if (unsuspend)
3963 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3964 else
3965 find_inferior (&all_lwps, proceed_one_lwp, except);
3966 }
3967
3968
3969 #ifdef HAVE_LINUX_REGSETS
3970
3971 #define use_linux_regsets 1
3972
3973 /* Returns true if REGSET has been disabled. */
3974
3975 static int
3976 regset_disabled (struct regsets_info *info, struct regset_info *regset)
3977 {
3978 return (info->disabled_regsets != NULL
3979 && info->disabled_regsets[regset - info->regsets]);
3980 }
3981
3982 /* Disable REGSET. */
3983
3984 static void
3985 disable_regset (struct regsets_info *info, struct regset_info *regset)
3986 {
3987 int dr_offset;
3988
3989 dr_offset = regset - info->regsets;
3990 if (info->disabled_regsets == NULL)
3991 info->disabled_regsets = xcalloc (1, info->num_regsets);
3992 info->disabled_regsets[dr_offset] = 1;
3993 }
3994
3995 static int
3996 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
3997 struct regcache *regcache)
3998 {
3999 struct regset_info *regset;
4000 int saw_general_regs = 0;
4001 int pid;
4002 struct iovec iov;
4003
4004 regset = regsets_info->regsets;
4005
4006 pid = lwpid_of (get_thread_lwp (current_inferior));
4007 while (regset->size >= 0)
4008 {
4009 void *buf, *data;
4010 int nt_type, res;
4011
4012 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4013 {
4014 regset ++;
4015 continue;
4016 }
4017
4018 buf = xmalloc (regset->size);
4019
4020 nt_type = regset->nt_type;
4021 if (nt_type)
4022 {
4023 iov.iov_base = buf;
4024 iov.iov_len = regset->size;
4025 data = (void *) &iov;
4026 }
4027 else
4028 data = buf;
4029
4030 #ifndef __sparc__
4031 res = ptrace (regset->get_request, pid,
4032 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4033 #else
4034 res = ptrace (regset->get_request, pid, data, nt_type);
4035 #endif
4036 if (res < 0)
4037 {
4038 if (errno == EIO)
4039 {
4040 /* If we get EIO on a regset, do not try it again for
4041 this process mode. */
4042 disable_regset (regsets_info, regset);
4043 free (buf);
4044 continue;
4045 }
4046 else
4047 {
4048 char s[256];
4049 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4050 pid);
4051 perror (s);
4052 }
4053 }
4054 else if (regset->type == GENERAL_REGS)
4055 saw_general_regs = 1;
4056 regset->store_function (regcache, buf);
4057 regset ++;
4058 free (buf);
4059 }
4060 if (saw_general_regs)
4061 return 0;
4062 else
4063 return 1;
4064 }
4065
4066 static int
4067 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4068 struct regcache *regcache)
4069 {
4070 struct regset_info *regset;
4071 int saw_general_regs = 0;
4072 int pid;
4073 struct iovec iov;
4074
4075 regset = regsets_info->regsets;
4076
4077 pid = lwpid_of (get_thread_lwp (current_inferior));
4078 while (regset->size >= 0)
4079 {
4080 void *buf, *data;
4081 int nt_type, res;
4082
4083 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4084 {
4085 regset ++;
4086 continue;
4087 }
4088
4089 buf = xmalloc (regset->size);
4090
4091 /* First fill the buffer with the current register set contents,
4092 in case there are any items in the kernel's regset that are
4093 not in gdbserver's regcache. */
4094
4095 nt_type = regset->nt_type;
4096 if (nt_type)
4097 {
4098 iov.iov_base = buf;
4099 iov.iov_len = regset->size;
4100 data = (void *) &iov;
4101 }
4102 else
4103 data = buf;
4104
4105 #ifndef __sparc__
4106 res = ptrace (regset->get_request, pid,
4107 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4108 #else
4109 res = ptrace (regset->get_request, pid, data, nt_type);
4110 #endif
4111
4112 if (res == 0)
4113 {
4114 /* Then overlay our cached registers on that. */
4115 regset->fill_function (regcache, buf);
4116
4117 /* Only now do we write the register set. */
4118 #ifndef __sparc__
4119 res = ptrace (regset->set_request, pid,
4120 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4121 #else
4122 res = ptrace (regset->set_request, pid, data, nt_type);
4123 #endif
4124 }
4125
4126 if (res < 0)
4127 {
4128 if (errno == EIO)
4129 {
4130 /* If we get EIO on a regset, do not try it again for
4131 this process mode. */
4132 disable_regset (regsets_info, regset);
4133 free (buf);
4134 continue;
4135 }
4136 else if (errno == ESRCH)
4137 {
4138 /* At this point, ESRCH should mean the process is
4139 already gone, in which case we simply ignore attempts
4140 to change its registers. See also the related
4141 comment in linux_resume_one_lwp. */
4142 free (buf);
4143 return 0;
4144 }
4145 else
4146 {
4147 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4148 }
4149 }
4150 else if (regset->type == GENERAL_REGS)
4151 saw_general_regs = 1;
4152 regset ++;
4153 free (buf);
4154 }
4155 if (saw_general_regs)
4156 return 0;
4157 else
4158 return 1;
4159 }
4160
4161 #else /* !HAVE_LINUX_REGSETS */
4162
4163 #define use_linux_regsets 0
4164 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4165 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4166
4167 #endif
4168
4169 /* Return 1 if register REGNO is supported by one of the regset ptrace
4170 calls or 0 if it has to be transferred individually. */
4171
4172 static int
4173 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4174 {
4175 unsigned char mask = 1 << (regno % 8);
4176 size_t index = regno / 8;
4177
4178 return (use_linux_regsets
4179 && (regs_info->regset_bitmap == NULL
4180 || (regs_info->regset_bitmap[index] & mask) != 0));
4181 }
4182
4183 #ifdef HAVE_LINUX_USRREGS
4184
4185 int
4186 register_addr (const struct usrregs_info *usrregs, int regnum)
4187 {
4188 int addr;
4189
4190 if (regnum < 0 || regnum >= usrregs->num_regs)
4191 error ("Invalid register number %d.", regnum);
4192
4193 addr = usrregs->regmap[regnum];
4194
4195 return addr;
4196 }
4197
4198 /* Fetch one register. */
4199 static void
4200 fetch_register (const struct usrregs_info *usrregs,
4201 struct regcache *regcache, int regno)
4202 {
4203 CORE_ADDR regaddr;
4204 int i, size;
4205 char *buf;
4206 int pid;
4207
4208 if (regno >= usrregs->num_regs)
4209 return;
4210 if ((*the_low_target.cannot_fetch_register) (regno))
4211 return;
4212
4213 regaddr = register_addr (usrregs, regno);
4214 if (regaddr == -1)
4215 return;
4216
4217 size = ((register_size (regcache->tdesc, regno)
4218 + sizeof (PTRACE_XFER_TYPE) - 1)
4219 & -sizeof (PTRACE_XFER_TYPE));
4220 buf = alloca (size);
4221
4222 pid = lwpid_of (get_thread_lwp (current_inferior));
4223 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4224 {
4225 errno = 0;
4226 *(PTRACE_XFER_TYPE *) (buf + i) =
4227 ptrace (PTRACE_PEEKUSER, pid,
4228 /* Coerce to a uintptr_t first to avoid potential gcc warning
4229 of coercing an 8 byte integer to a 4 byte pointer. */
4230 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4231 regaddr += sizeof (PTRACE_XFER_TYPE);
4232 if (errno != 0)
4233 error ("reading register %d: %s", regno, strerror (errno));
4234 }
4235
4236 if (the_low_target.supply_ptrace_register)
4237 the_low_target.supply_ptrace_register (regcache, regno, buf);
4238 else
4239 supply_register (regcache, regno, buf);
4240 }
4241
4242 /* Store one register. */
4243 static void
4244 store_register (const struct usrregs_info *usrregs,
4245 struct regcache *regcache, int regno)
4246 {
4247 CORE_ADDR regaddr;
4248 int i, size;
4249 char *buf;
4250 int pid;
4251
4252 if (regno >= usrregs->num_regs)
4253 return;
4254 if ((*the_low_target.cannot_store_register) (regno))
4255 return;
4256
4257 regaddr = register_addr (usrregs, regno);
4258 if (regaddr == -1)
4259 return;
4260
4261 size = ((register_size (regcache->tdesc, regno)
4262 + sizeof (PTRACE_XFER_TYPE) - 1)
4263 & -sizeof (PTRACE_XFER_TYPE));
4264 buf = alloca (size);
4265 memset (buf, 0, size);
4266
4267 if (the_low_target.collect_ptrace_register)
4268 the_low_target.collect_ptrace_register (regcache, regno, buf);
4269 else
4270 collect_register (regcache, regno, buf);
4271
4272 pid = lwpid_of (get_thread_lwp (current_inferior));
4273 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4274 {
4275 errno = 0;
4276 ptrace (PTRACE_POKEUSER, pid,
4277 /* Coerce to a uintptr_t first to avoid potential gcc warning
4278 about coercing an 8 byte integer to a 4 byte pointer. */
4279 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4280 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4281 if (errno != 0)
4282 {
4283 /* At this point, ESRCH should mean the process is
4284 already gone, in which case we simply ignore attempts
4285 to change its registers. See also the related
4286 comment in linux_resume_one_lwp. */
4287 if (errno == ESRCH)
4288 return;
4289
4290 if ((*the_low_target.cannot_store_register) (regno) == 0)
4291 error ("writing register %d: %s", regno, strerror (errno));
4292 }
4293 regaddr += sizeof (PTRACE_XFER_TYPE);
4294 }
4295 }
4296
4297 /* Fetch all registers, or just one, from the child process.
4298 If REGNO is -1, do this for all registers, skipping any that are
4299 assumed to have been retrieved by regsets_fetch_inferior_registers,
4300 unless ALL is non-zero.
4301 Otherwise, REGNO specifies which register (so we can save time). */
4302 static void
4303 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4304 struct regcache *regcache, int regno, int all)
4305 {
4306 struct usrregs_info *usr = regs_info->usrregs;
4307
4308 if (regno == -1)
4309 {
4310 for (regno = 0; regno < usr->num_regs; regno++)
4311 if (all || !linux_register_in_regsets (regs_info, regno))
4312 fetch_register (usr, regcache, regno);
4313 }
4314 else
4315 fetch_register (usr, regcache, regno);
4316 }
4317
4318 /* Store our register values back into the inferior.
4319 If REGNO is -1, do this for all registers, skipping any that are
4320 assumed to have been saved by regsets_store_inferior_registers,
4321 unless ALL is non-zero.
4322 Otherwise, REGNO specifies which register (so we can save time). */
4323 static void
4324 usr_store_inferior_registers (const struct regs_info *regs_info,
4325 struct regcache *regcache, int regno, int all)
4326 {
4327 struct usrregs_info *usr = regs_info->usrregs;
4328
4329 if (regno == -1)
4330 {
4331 for (regno = 0; regno < usr->num_regs; regno++)
4332 if (all || !linux_register_in_regsets (regs_info, regno))
4333 store_register (usr, regcache, regno);
4334 }
4335 else
4336 store_register (usr, regcache, regno);
4337 }
4338
4339 #else /* !HAVE_LINUX_USRREGS */
4340
4341 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4342 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4343
4344 #endif
4345
4346
4347 void
4348 linux_fetch_registers (struct regcache *regcache, int regno)
4349 {
4350 int use_regsets;
4351 int all = 0;
4352 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4353
4354 if (regno == -1)
4355 {
4356 if (the_low_target.fetch_register != NULL
4357 && regs_info->usrregs != NULL)
4358 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4359 (*the_low_target.fetch_register) (regcache, regno);
4360
4361 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4362 if (regs_info->usrregs != NULL)
4363 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4364 }
4365 else
4366 {
4367 if (the_low_target.fetch_register != NULL
4368 && (*the_low_target.fetch_register) (regcache, regno))
4369 return;
4370
4371 use_regsets = linux_register_in_regsets (regs_info, regno);
4372 if (use_regsets)
4373 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4374 regcache);
4375 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4376 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4377 }
4378 }
4379
4380 void
4381 linux_store_registers (struct regcache *regcache, int regno)
4382 {
4383 int use_regsets;
4384 int all = 0;
4385 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4386
4387 if (regno == -1)
4388 {
4389 all = regsets_store_inferior_registers (regs_info->regsets_info,
4390 regcache);
4391 if (regs_info->usrregs != NULL)
4392 usr_store_inferior_registers (regs_info, regcache, regno, all);
4393 }
4394 else
4395 {
4396 use_regsets = linux_register_in_regsets (regs_info, regno);
4397 if (use_regsets)
4398 all = regsets_store_inferior_registers (regs_info->regsets_info,
4399 regcache);
4400 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4401 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4402 }
4403 }
4404
4405
4406 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4407 to debugger memory starting at MYADDR. */
4408
4409 static int
4410 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4411 {
4412 int pid = lwpid_of (get_thread_lwp (current_inferior));
4413 register PTRACE_XFER_TYPE *buffer;
4414 register CORE_ADDR addr;
4415 register int count;
4416 char filename[64];
4417 register int i;
4418 int ret;
4419 int fd;
4420
4421 /* Try using /proc. Don't bother for one word. */
4422 if (len >= 3 * sizeof (long))
4423 {
4424 int bytes;
4425
4426 /* We could keep this file open and cache it - possibly one per
4427 thread. That requires some juggling, but is even faster. */
4428 sprintf (filename, "/proc/%d/mem", pid);
4429 fd = open (filename, O_RDONLY | O_LARGEFILE);
4430 if (fd == -1)
4431 goto no_proc;
4432
4433 /* If pread64 is available, use it. It's faster if the kernel
4434 supports it (only one syscall), and it's 64-bit safe even on
4435 32-bit platforms (for instance, SPARC debugging a SPARC64
4436 application). */
4437 #ifdef HAVE_PREAD64
4438 bytes = pread64 (fd, myaddr, len, memaddr);
4439 #else
4440 bytes = -1;
4441 if (lseek (fd, memaddr, SEEK_SET) != -1)
4442 bytes = read (fd, myaddr, len);
4443 #endif
4444
4445 close (fd);
4446 if (bytes == len)
4447 return 0;
4448
4449 /* Some data was read, we'll try to get the rest with ptrace. */
4450 if (bytes > 0)
4451 {
4452 memaddr += bytes;
4453 myaddr += bytes;
4454 len -= bytes;
4455 }
4456 }
4457
4458 no_proc:
4459 /* Round starting address down to longword boundary. */
4460 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4461 /* Round ending address up; get number of longwords that makes. */
4462 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4463 / sizeof (PTRACE_XFER_TYPE));
4464 /* Allocate buffer of that many longwords. */
4465 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4466
4467 /* Read all the longwords */
4468 errno = 0;
4469 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4470 {
4471 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4472 about coercing an 8 byte integer to a 4 byte pointer. */
4473 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4474 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4475 (PTRACE_TYPE_ARG4) 0);
4476 if (errno)
4477 break;
4478 }
4479 ret = errno;
4480
4481 /* Copy appropriate bytes out of the buffer. */
4482 if (i > 0)
4483 {
4484 i *= sizeof (PTRACE_XFER_TYPE);
4485 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4486 memcpy (myaddr,
4487 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4488 i < len ? i : len);
4489 }
4490
4491 return ret;
4492 }
4493
4494 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4495 memory at MEMADDR. On failure (cannot write to the inferior)
4496 returns the value of errno. Always succeeds if LEN is zero. */
4497
4498 static int
4499 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4500 {
4501 register int i;
4502 /* Round starting address down to longword boundary. */
4503 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4504 /* Round ending address up; get number of longwords that makes. */
4505 register int count
4506 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4507 / sizeof (PTRACE_XFER_TYPE);
4508
4509 /* Allocate buffer of that many longwords. */
4510 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4511 alloca (count * sizeof (PTRACE_XFER_TYPE));
4512
4513 int pid = lwpid_of (get_thread_lwp (current_inferior));
4514
4515 if (len == 0)
4516 {
4517 /* Zero length write always succeeds. */
4518 return 0;
4519 }
4520
4521 if (debug_threads)
4522 {
4523 /* Dump up to four bytes. */
4524 unsigned int val = * (unsigned int *) myaddr;
4525 if (len == 1)
4526 val = val & 0xff;
4527 else if (len == 2)
4528 val = val & 0xffff;
4529 else if (len == 3)
4530 val = val & 0xffffff;
4531 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4532 val, (long)memaddr);
4533 }
4534
4535 /* Fill start and end extra bytes of buffer with existing memory data. */
4536
4537 errno = 0;
4538 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4539 about coercing an 8 byte integer to a 4 byte pointer. */
4540 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4541 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4542 (PTRACE_TYPE_ARG4) 0);
4543 if (errno)
4544 return errno;
4545
4546 if (count > 1)
4547 {
4548 errno = 0;
4549 buffer[count - 1]
4550 = ptrace (PTRACE_PEEKTEXT, pid,
4551 /* Coerce to a uintptr_t first to avoid potential gcc warning
4552 about coercing an 8 byte integer to a 4 byte pointer. */
4553 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4554 * sizeof (PTRACE_XFER_TYPE)),
4555 (PTRACE_TYPE_ARG4) 0);
4556 if (errno)
4557 return errno;
4558 }
4559
4560 /* Copy data to be written over corresponding part of buffer. */
4561
4562 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4563 myaddr, len);
4564
4565 /* Write the entire buffer. */
4566
4567 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4568 {
4569 errno = 0;
4570 ptrace (PTRACE_POKETEXT, pid,
4571 /* Coerce to a uintptr_t first to avoid potential gcc warning
4572 about coercing an 8 byte integer to a 4 byte pointer. */
4573 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4574 (PTRACE_TYPE_ARG4) buffer[i]);
4575 if (errno)
4576 return errno;
4577 }
4578
4579 return 0;
4580 }
4581
4582 static void
4583 linux_look_up_symbols (void)
4584 {
4585 #ifdef USE_THREAD_DB
4586 struct process_info *proc = current_process ();
4587
4588 if (proc->private->thread_db != NULL)
4589 return;
4590
4591 /* If the kernel supports tracing clones, then we don't need to
4592 use the magic thread event breakpoint to learn about
4593 threads. */
4594 thread_db_init (!linux_supports_traceclone ());
4595 #endif
4596 }
4597
4598 static void
4599 linux_request_interrupt (void)
4600 {
4601 extern unsigned long signal_pid;
4602
4603 if (!ptid_equal (cont_thread, null_ptid)
4604 && !ptid_equal (cont_thread, minus_one_ptid))
4605 {
4606 struct lwp_info *lwp;
4607 int lwpid;
4608
4609 lwp = get_thread_lwp (current_inferior);
4610 lwpid = lwpid_of (lwp);
4611 kill_lwp (lwpid, SIGINT);
4612 }
4613 else
4614 kill_lwp (signal_pid, SIGINT);
4615 }
4616
4617 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4618 to debugger memory starting at MYADDR. */
4619
4620 static int
4621 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4622 {
4623 char filename[PATH_MAX];
4624 int fd, n;
4625 int pid = lwpid_of (get_thread_lwp (current_inferior));
4626
4627 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4628
4629 fd = open (filename, O_RDONLY);
4630 if (fd < 0)
4631 return -1;
4632
4633 if (offset != (CORE_ADDR) 0
4634 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4635 n = -1;
4636 else
4637 n = read (fd, myaddr, len);
4638
4639 close (fd);
4640
4641 return n;
4642 }
4643
4644 /* These breakpoint and watchpoint related wrapper functions simply
4645 pass on the function call if the target has registered a
4646 corresponding function. */
4647
4648 static int
4649 linux_insert_point (char type, CORE_ADDR addr, int len)
4650 {
4651 if (the_low_target.insert_point != NULL)
4652 return the_low_target.insert_point (type, addr, len);
4653 else
4654 /* Unsupported (see target.h). */
4655 return 1;
4656 }
4657
4658 static int
4659 linux_remove_point (char type, CORE_ADDR addr, int len)
4660 {
4661 if (the_low_target.remove_point != NULL)
4662 return the_low_target.remove_point (type, addr, len);
4663 else
4664 /* Unsupported (see target.h). */
4665 return 1;
4666 }
4667
4668 static int
4669 linux_stopped_by_watchpoint (void)
4670 {
4671 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4672
4673 return lwp->stopped_by_watchpoint;
4674 }
4675
4676 static CORE_ADDR
4677 linux_stopped_data_address (void)
4678 {
4679 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4680
4681 return lwp->stopped_data_address;
4682 }
4683
4684 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4685 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4686 && defined(PT_TEXT_END_ADDR)
4687
4688 /* This is only used for targets that define PT_TEXT_ADDR,
4689 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4690 the target has different ways of acquiring this information, like
4691 loadmaps. */
4692
4693 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4694 to tell gdb about. */
4695
4696 static int
4697 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4698 {
4699 unsigned long text, text_end, data;
4700 int pid = lwpid_of (get_thread_lwp (current_inferior));
4701
4702 errno = 0;
4703
4704 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4705 (PTRACE_TYPE_ARG4) 0);
4706 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4707 (PTRACE_TYPE_ARG4) 0);
4708 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4709 (PTRACE_TYPE_ARG4) 0);
4710
4711 if (errno == 0)
4712 {
4713 /* Both text and data offsets produced at compile-time (and so
4714 used by gdb) are relative to the beginning of the program,
4715 with the data segment immediately following the text segment.
4716 However, the actual runtime layout in memory may put the data
4717 somewhere else, so when we send gdb a data base-address, we
4718 use the real data base address and subtract the compile-time
4719 data base-address from it (which is just the length of the
4720 text segment). BSS immediately follows data in both
4721 cases. */
4722 *text_p = text;
4723 *data_p = data - (text_end - text);
4724
4725 return 1;
4726 }
4727 return 0;
4728 }
4729 #endif
4730
4731 static int
4732 linux_qxfer_osdata (const char *annex,
4733 unsigned char *readbuf, unsigned const char *writebuf,
4734 CORE_ADDR offset, int len)
4735 {
4736 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4737 }
4738
4739 /* Convert a native/host siginfo object, into/from the siginfo in the
4740 layout of the inferiors' architecture. */
4741
4742 static void
4743 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4744 {
4745 int done = 0;
4746
4747 if (the_low_target.siginfo_fixup != NULL)
4748 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4749
4750 /* If there was no callback, or the callback didn't do anything,
4751 then just do a straight memcpy. */
4752 if (!done)
4753 {
4754 if (direction == 1)
4755 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4756 else
4757 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4758 }
4759 }
4760
4761 static int
4762 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4763 unsigned const char *writebuf, CORE_ADDR offset, int len)
4764 {
4765 int pid;
4766 siginfo_t siginfo;
4767 char inf_siginfo[sizeof (siginfo_t)];
4768
4769 if (current_inferior == NULL)
4770 return -1;
4771
4772 pid = lwpid_of (get_thread_lwp (current_inferior));
4773
4774 if (debug_threads)
4775 fprintf (stderr, "%s siginfo for lwp %d.\n",
4776 readbuf != NULL ? "Reading" : "Writing",
4777 pid);
4778
4779 if (offset >= sizeof (siginfo))
4780 return -1;
4781
4782 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4783 return -1;
4784
4785 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4786 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4787 inferior with a 64-bit GDBSERVER should look the same as debugging it
4788 with a 32-bit GDBSERVER, we need to convert it. */
4789 siginfo_fixup (&siginfo, inf_siginfo, 0);
4790
4791 if (offset + len > sizeof (siginfo))
4792 len = sizeof (siginfo) - offset;
4793
4794 if (readbuf != NULL)
4795 memcpy (readbuf, inf_siginfo + offset, len);
4796 else
4797 {
4798 memcpy (inf_siginfo + offset, writebuf, len);
4799
4800 /* Convert back to ptrace layout before flushing it out. */
4801 siginfo_fixup (&siginfo, inf_siginfo, 1);
4802
4803 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4804 return -1;
4805 }
4806
4807 return len;
4808 }
4809
4810 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4811 so we notice when children change state; as the handler for the
4812 sigsuspend in my_waitpid. */
4813
4814 static void
4815 sigchld_handler (int signo)
4816 {
4817 int old_errno = errno;
4818
4819 if (debug_threads)
4820 {
4821 do
4822 {
4823 /* fprintf is not async-signal-safe, so call write
4824 directly. */
4825 if (write (2, "sigchld_handler\n",
4826 sizeof ("sigchld_handler\n") - 1) < 0)
4827 break; /* just ignore */
4828 } while (0);
4829 }
4830
4831 if (target_is_async_p ())
4832 async_file_mark (); /* trigger a linux_wait */
4833
4834 errno = old_errno;
4835 }
4836
4837 static int
4838 linux_supports_non_stop (void)
4839 {
4840 return 1;
4841 }
4842
4843 static int
4844 linux_async (int enable)
4845 {
4846 int previous = (linux_event_pipe[0] != -1);
4847
4848 if (debug_threads)
4849 fprintf (stderr, "linux_async (%d), previous=%d\n",
4850 enable, previous);
4851
4852 if (previous != enable)
4853 {
4854 sigset_t mask;
4855 sigemptyset (&mask);
4856 sigaddset (&mask, SIGCHLD);
4857
4858 sigprocmask (SIG_BLOCK, &mask, NULL);
4859
4860 if (enable)
4861 {
4862 if (pipe (linux_event_pipe) == -1)
4863 fatal ("creating event pipe failed.");
4864
4865 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4866 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4867
4868 /* Register the event loop handler. */
4869 add_file_handler (linux_event_pipe[0],
4870 handle_target_event, NULL);
4871
4872 /* Always trigger a linux_wait. */
4873 async_file_mark ();
4874 }
4875 else
4876 {
4877 delete_file_handler (linux_event_pipe[0]);
4878
4879 close (linux_event_pipe[0]);
4880 close (linux_event_pipe[1]);
4881 linux_event_pipe[0] = -1;
4882 linux_event_pipe[1] = -1;
4883 }
4884
4885 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4886 }
4887
4888 return previous;
4889 }
4890
4891 static int
4892 linux_start_non_stop (int nonstop)
4893 {
4894 /* Register or unregister from event-loop accordingly. */
4895 linux_async (nonstop);
4896 return 0;
4897 }
4898
4899 static int
4900 linux_supports_multi_process (void)
4901 {
4902 return 1;
4903 }
4904
4905 static int
4906 linux_supports_disable_randomization (void)
4907 {
4908 #ifdef HAVE_PERSONALITY
4909 return 1;
4910 #else
4911 return 0;
4912 #endif
4913 }
4914
4915 static int
4916 linux_supports_agent (void)
4917 {
4918 return 1;
4919 }
4920
4921 static int
4922 linux_supports_range_stepping (void)
4923 {
4924 if (*the_low_target.supports_range_stepping == NULL)
4925 return 0;
4926
4927 return (*the_low_target.supports_range_stepping) ();
4928 }
4929
4930 /* Enumerate spufs IDs for process PID. */
4931 static int
4932 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4933 {
4934 int pos = 0;
4935 int written = 0;
4936 char path[128];
4937 DIR *dir;
4938 struct dirent *entry;
4939
4940 sprintf (path, "/proc/%ld/fd", pid);
4941 dir = opendir (path);
4942 if (!dir)
4943 return -1;
4944
4945 rewinddir (dir);
4946 while ((entry = readdir (dir)) != NULL)
4947 {
4948 struct stat st;
4949 struct statfs stfs;
4950 int fd;
4951
4952 fd = atoi (entry->d_name);
4953 if (!fd)
4954 continue;
4955
4956 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4957 if (stat (path, &st) != 0)
4958 continue;
4959 if (!S_ISDIR (st.st_mode))
4960 continue;
4961
4962 if (statfs (path, &stfs) != 0)
4963 continue;
4964 if (stfs.f_type != SPUFS_MAGIC)
4965 continue;
4966
4967 if (pos >= offset && pos + 4 <= offset + len)
4968 {
4969 *(unsigned int *)(buf + pos - offset) = fd;
4970 written += 4;
4971 }
4972 pos += 4;
4973 }
4974
4975 closedir (dir);
4976 return written;
4977 }
4978
4979 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4980 object type, using the /proc file system. */
4981 static int
4982 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4983 unsigned const char *writebuf,
4984 CORE_ADDR offset, int len)
4985 {
4986 long pid = lwpid_of (get_thread_lwp (current_inferior));
4987 char buf[128];
4988 int fd = 0;
4989 int ret = 0;
4990
4991 if (!writebuf && !readbuf)
4992 return -1;
4993
4994 if (!*annex)
4995 {
4996 if (!readbuf)
4997 return -1;
4998 else
4999 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5000 }
5001
5002 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5003 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5004 if (fd <= 0)
5005 return -1;
5006
5007 if (offset != 0
5008 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5009 {
5010 close (fd);
5011 return 0;
5012 }
5013
5014 if (writebuf)
5015 ret = write (fd, writebuf, (size_t) len);
5016 else
5017 ret = read (fd, readbuf, (size_t) len);
5018
5019 close (fd);
5020 return ret;
5021 }
5022
5023 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5024 struct target_loadseg
5025 {
5026 /* Core address to which the segment is mapped. */
5027 Elf32_Addr addr;
5028 /* VMA recorded in the program header. */
5029 Elf32_Addr p_vaddr;
5030 /* Size of this segment in memory. */
5031 Elf32_Word p_memsz;
5032 };
5033
5034 # if defined PT_GETDSBT
5035 struct target_loadmap
5036 {
5037 /* Protocol version number, must be zero. */
5038 Elf32_Word version;
5039 /* Pointer to the DSBT table, its size, and the DSBT index. */
5040 unsigned *dsbt_table;
5041 unsigned dsbt_size, dsbt_index;
5042 /* Number of segments in this map. */
5043 Elf32_Word nsegs;
5044 /* The actual memory map. */
5045 struct target_loadseg segs[/*nsegs*/];
5046 };
5047 # define LINUX_LOADMAP PT_GETDSBT
5048 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5049 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5050 # else
5051 struct target_loadmap
5052 {
5053 /* Protocol version number, must be zero. */
5054 Elf32_Half version;
5055 /* Number of segments in this map. */
5056 Elf32_Half nsegs;
5057 /* The actual memory map. */
5058 struct target_loadseg segs[/*nsegs*/];
5059 };
5060 # define LINUX_LOADMAP PTRACE_GETFDPIC
5061 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5062 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5063 # endif
5064
5065 static int
5066 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5067 unsigned char *myaddr, unsigned int len)
5068 {
5069 int pid = lwpid_of (get_thread_lwp (current_inferior));
5070 int addr = -1;
5071 struct target_loadmap *data = NULL;
5072 unsigned int actual_length, copy_length;
5073
5074 if (strcmp (annex, "exec") == 0)
5075 addr = (int) LINUX_LOADMAP_EXEC;
5076 else if (strcmp (annex, "interp") == 0)
5077 addr = (int) LINUX_LOADMAP_INTERP;
5078 else
5079 return -1;
5080
5081 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5082 return -1;
5083
5084 if (data == NULL)
5085 return -1;
5086
5087 actual_length = sizeof (struct target_loadmap)
5088 + sizeof (struct target_loadseg) * data->nsegs;
5089
5090 if (offset < 0 || offset > actual_length)
5091 return -1;
5092
5093 copy_length = actual_length - offset < len ? actual_length - offset : len;
5094 memcpy (myaddr, (char *) data + offset, copy_length);
5095 return copy_length;
5096 }
5097 #else
5098 # define linux_read_loadmap NULL
5099 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5100
5101 static void
5102 linux_process_qsupported (const char *query)
5103 {
5104 if (the_low_target.process_qsupported != NULL)
5105 the_low_target.process_qsupported (query);
5106 }
5107
5108 static int
5109 linux_supports_tracepoints (void)
5110 {
5111 if (*the_low_target.supports_tracepoints == NULL)
5112 return 0;
5113
5114 return (*the_low_target.supports_tracepoints) ();
5115 }
5116
5117 static CORE_ADDR
5118 linux_read_pc (struct regcache *regcache)
5119 {
5120 if (the_low_target.get_pc == NULL)
5121 return 0;
5122
5123 return (*the_low_target.get_pc) (regcache);
5124 }
5125
5126 static void
5127 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5128 {
5129 gdb_assert (the_low_target.set_pc != NULL);
5130
5131 (*the_low_target.set_pc) (regcache, pc);
5132 }
5133
5134 static int
5135 linux_thread_stopped (struct thread_info *thread)
5136 {
5137 return get_thread_lwp (thread)->stopped;
5138 }
5139
5140 /* This exposes stop-all-threads functionality to other modules. */
5141
5142 static void
5143 linux_pause_all (int freeze)
5144 {
5145 stop_all_lwps (freeze, NULL);
5146 }
5147
5148 /* This exposes unstop-all-threads functionality to other gdbserver
5149 modules. */
5150
5151 static void
5152 linux_unpause_all (int unfreeze)
5153 {
5154 unstop_all_lwps (unfreeze, NULL);
5155 }
5156
5157 static int
5158 linux_prepare_to_access_memory (void)
5159 {
5160 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5161 running LWP. */
5162 if (non_stop)
5163 linux_pause_all (1);
5164 return 0;
5165 }
5166
5167 static void
5168 linux_done_accessing_memory (void)
5169 {
5170 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5171 running LWP. */
5172 if (non_stop)
5173 linux_unpause_all (1);
5174 }
5175
5176 static int
5177 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5178 CORE_ADDR collector,
5179 CORE_ADDR lockaddr,
5180 ULONGEST orig_size,
5181 CORE_ADDR *jump_entry,
5182 CORE_ADDR *trampoline,
5183 ULONGEST *trampoline_size,
5184 unsigned char *jjump_pad_insn,
5185 ULONGEST *jjump_pad_insn_size,
5186 CORE_ADDR *adjusted_insn_addr,
5187 CORE_ADDR *adjusted_insn_addr_end,
5188 char *err)
5189 {
5190 return (*the_low_target.install_fast_tracepoint_jump_pad)
5191 (tpoint, tpaddr, collector, lockaddr, orig_size,
5192 jump_entry, trampoline, trampoline_size,
5193 jjump_pad_insn, jjump_pad_insn_size,
5194 adjusted_insn_addr, adjusted_insn_addr_end,
5195 err);
5196 }
5197
5198 static struct emit_ops *
5199 linux_emit_ops (void)
5200 {
5201 if (the_low_target.emit_ops != NULL)
5202 return (*the_low_target.emit_ops) ();
5203 else
5204 return NULL;
5205 }
5206
5207 static int
5208 linux_get_min_fast_tracepoint_insn_len (void)
5209 {
5210 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5211 }
5212
5213 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5214
5215 static int
5216 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5217 CORE_ADDR *phdr_memaddr, int *num_phdr)
5218 {
5219 char filename[PATH_MAX];
5220 int fd;
5221 const int auxv_size = is_elf64
5222 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5223 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5224
5225 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5226
5227 fd = open (filename, O_RDONLY);
5228 if (fd < 0)
5229 return 1;
5230
5231 *phdr_memaddr = 0;
5232 *num_phdr = 0;
5233 while (read (fd, buf, auxv_size) == auxv_size
5234 && (*phdr_memaddr == 0 || *num_phdr == 0))
5235 {
5236 if (is_elf64)
5237 {
5238 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5239
5240 switch (aux->a_type)
5241 {
5242 case AT_PHDR:
5243 *phdr_memaddr = aux->a_un.a_val;
5244 break;
5245 case AT_PHNUM:
5246 *num_phdr = aux->a_un.a_val;
5247 break;
5248 }
5249 }
5250 else
5251 {
5252 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5253
5254 switch (aux->a_type)
5255 {
5256 case AT_PHDR:
5257 *phdr_memaddr = aux->a_un.a_val;
5258 break;
5259 case AT_PHNUM:
5260 *num_phdr = aux->a_un.a_val;
5261 break;
5262 }
5263 }
5264 }
5265
5266 close (fd);
5267
5268 if (*phdr_memaddr == 0 || *num_phdr == 0)
5269 {
5270 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5271 "phdr_memaddr = %ld, phdr_num = %d",
5272 (long) *phdr_memaddr, *num_phdr);
5273 return 2;
5274 }
5275
5276 return 0;
5277 }
5278
5279 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5280
5281 static CORE_ADDR
5282 get_dynamic (const int pid, const int is_elf64)
5283 {
5284 CORE_ADDR phdr_memaddr, relocation;
5285 int num_phdr, i;
5286 unsigned char *phdr_buf;
5287 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5288
5289 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5290 return 0;
5291
5292 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5293 phdr_buf = alloca (num_phdr * phdr_size);
5294
5295 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5296 return 0;
5297
5298 /* Compute relocation: it is expected to be 0 for "regular" executables,
5299 non-zero for PIE ones. */
5300 relocation = -1;
5301 for (i = 0; relocation == -1 && i < num_phdr; i++)
5302 if (is_elf64)
5303 {
5304 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5305
5306 if (p->p_type == PT_PHDR)
5307 relocation = phdr_memaddr - p->p_vaddr;
5308 }
5309 else
5310 {
5311 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5312
5313 if (p->p_type == PT_PHDR)
5314 relocation = phdr_memaddr - p->p_vaddr;
5315 }
5316
5317 if (relocation == -1)
5318 {
5319 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5320 any real world executables, including PIE executables, have always
5321 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5322 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5323 or present DT_DEBUG anyway (fpc binaries are statically linked).
5324
5325 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5326
5327 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5328
5329 return 0;
5330 }
5331
5332 for (i = 0; i < num_phdr; i++)
5333 {
5334 if (is_elf64)
5335 {
5336 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5337
5338 if (p->p_type == PT_DYNAMIC)
5339 return p->p_vaddr + relocation;
5340 }
5341 else
5342 {
5343 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5344
5345 if (p->p_type == PT_DYNAMIC)
5346 return p->p_vaddr + relocation;
5347 }
5348 }
5349
5350 return 0;
5351 }
5352
5353 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5354 can be 0 if the inferior does not yet have the library list initialized.
5355 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5356 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5357
5358 static CORE_ADDR
5359 get_r_debug (const int pid, const int is_elf64)
5360 {
5361 CORE_ADDR dynamic_memaddr;
5362 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5363 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5364 CORE_ADDR map = -1;
5365
5366 dynamic_memaddr = get_dynamic (pid, is_elf64);
5367 if (dynamic_memaddr == 0)
5368 return map;
5369
5370 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5371 {
5372 if (is_elf64)
5373 {
5374 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5375 #ifdef DT_MIPS_RLD_MAP
5376 union
5377 {
5378 Elf64_Xword map;
5379 unsigned char buf[sizeof (Elf64_Xword)];
5380 }
5381 rld_map;
5382
5383 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5384 {
5385 if (linux_read_memory (dyn->d_un.d_val,
5386 rld_map.buf, sizeof (rld_map.buf)) == 0)
5387 return rld_map.map;
5388 else
5389 break;
5390 }
5391 #endif /* DT_MIPS_RLD_MAP */
5392
5393 if (dyn->d_tag == DT_DEBUG && map == -1)
5394 map = dyn->d_un.d_val;
5395
5396 if (dyn->d_tag == DT_NULL)
5397 break;
5398 }
5399 else
5400 {
5401 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5402 #ifdef DT_MIPS_RLD_MAP
5403 union
5404 {
5405 Elf32_Word map;
5406 unsigned char buf[sizeof (Elf32_Word)];
5407 }
5408 rld_map;
5409
5410 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5411 {
5412 if (linux_read_memory (dyn->d_un.d_val,
5413 rld_map.buf, sizeof (rld_map.buf)) == 0)
5414 return rld_map.map;
5415 else
5416 break;
5417 }
5418 #endif /* DT_MIPS_RLD_MAP */
5419
5420 if (dyn->d_tag == DT_DEBUG && map == -1)
5421 map = dyn->d_un.d_val;
5422
5423 if (dyn->d_tag == DT_NULL)
5424 break;
5425 }
5426
5427 dynamic_memaddr += dyn_size;
5428 }
5429
5430 return map;
5431 }
5432
5433 /* Read one pointer from MEMADDR in the inferior. */
5434
5435 static int
5436 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5437 {
5438 int ret;
5439
5440 /* Go through a union so this works on either big or little endian
5441 hosts, when the inferior's pointer size is smaller than the size
5442 of CORE_ADDR. It is assumed the inferior's endianness is the
5443 same of the superior's. */
5444 union
5445 {
5446 CORE_ADDR core_addr;
5447 unsigned int ui;
5448 unsigned char uc;
5449 } addr;
5450
5451 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5452 if (ret == 0)
5453 {
5454 if (ptr_size == sizeof (CORE_ADDR))
5455 *ptr = addr.core_addr;
5456 else if (ptr_size == sizeof (unsigned int))
5457 *ptr = addr.ui;
5458 else
5459 gdb_assert_not_reached ("unhandled pointer size");
5460 }
5461 return ret;
5462 }
5463
5464 struct link_map_offsets
5465 {
5466 /* Offset and size of r_debug.r_version. */
5467 int r_version_offset;
5468
5469 /* Offset and size of r_debug.r_map. */
5470 int r_map_offset;
5471
5472 /* Offset to l_addr field in struct link_map. */
5473 int l_addr_offset;
5474
5475 /* Offset to l_name field in struct link_map. */
5476 int l_name_offset;
5477
5478 /* Offset to l_ld field in struct link_map. */
5479 int l_ld_offset;
5480
5481 /* Offset to l_next field in struct link_map. */
5482 int l_next_offset;
5483
5484 /* Offset to l_prev field in struct link_map. */
5485 int l_prev_offset;
5486 };
5487
5488 /* Construct qXfer:libraries-svr4:read reply. */
5489
5490 static int
5491 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5492 unsigned const char *writebuf,
5493 CORE_ADDR offset, int len)
5494 {
5495 char *document;
5496 unsigned document_len;
5497 struct process_info_private *const priv = current_process ()->private;
5498 char filename[PATH_MAX];
5499 int pid, is_elf64;
5500
5501 static const struct link_map_offsets lmo_32bit_offsets =
5502 {
5503 0, /* r_version offset. */
5504 4, /* r_debug.r_map offset. */
5505 0, /* l_addr offset in link_map. */
5506 4, /* l_name offset in link_map. */
5507 8, /* l_ld offset in link_map. */
5508 12, /* l_next offset in link_map. */
5509 16 /* l_prev offset in link_map. */
5510 };
5511
5512 static const struct link_map_offsets lmo_64bit_offsets =
5513 {
5514 0, /* r_version offset. */
5515 8, /* r_debug.r_map offset. */
5516 0, /* l_addr offset in link_map. */
5517 8, /* l_name offset in link_map. */
5518 16, /* l_ld offset in link_map. */
5519 24, /* l_next offset in link_map. */
5520 32 /* l_prev offset in link_map. */
5521 };
5522 const struct link_map_offsets *lmo;
5523 unsigned int machine;
5524 int ptr_size;
5525 CORE_ADDR lm_addr = 0, lm_prev = 0;
5526 int allocated = 1024;
5527 char *p;
5528 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5529 int header_done = 0;
5530
5531 if (writebuf != NULL)
5532 return -2;
5533 if (readbuf == NULL)
5534 return -1;
5535
5536 pid = lwpid_of (get_thread_lwp (current_inferior));
5537 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5538 is_elf64 = elf_64_file_p (filename, &machine);
5539 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5540 ptr_size = is_elf64 ? 8 : 4;
5541
5542 while (annex[0] != '\0')
5543 {
5544 const char *sep;
5545 CORE_ADDR *addrp;
5546 int len;
5547
5548 sep = strchr (annex, '=');
5549 if (sep == NULL)
5550 break;
5551
5552 len = sep - annex;
5553 if (len == 5 && strncmp (annex, "start", 5) == 0)
5554 addrp = &lm_addr;
5555 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5556 addrp = &lm_prev;
5557 else
5558 {
5559 annex = strchr (sep, ';');
5560 if (annex == NULL)
5561 break;
5562 annex++;
5563 continue;
5564 }
5565
5566 annex = decode_address_to_semicolon (addrp, sep + 1);
5567 }
5568
5569 if (lm_addr == 0)
5570 {
5571 int r_version = 0;
5572
5573 if (priv->r_debug == 0)
5574 priv->r_debug = get_r_debug (pid, is_elf64);
5575
5576 /* We failed to find DT_DEBUG. Such situation will not change
5577 for this inferior - do not retry it. Report it to GDB as
5578 E01, see for the reasons at the GDB solib-svr4.c side. */
5579 if (priv->r_debug == (CORE_ADDR) -1)
5580 return -1;
5581
5582 if (priv->r_debug != 0)
5583 {
5584 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5585 (unsigned char *) &r_version,
5586 sizeof (r_version)) != 0
5587 || r_version != 1)
5588 {
5589 warning ("unexpected r_debug version %d", r_version);
5590 }
5591 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5592 &lm_addr, ptr_size) != 0)
5593 {
5594 warning ("unable to read r_map from 0x%lx",
5595 (long) priv->r_debug + lmo->r_map_offset);
5596 }
5597 }
5598 }
5599
5600 document = xmalloc (allocated);
5601 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5602 p = document + strlen (document);
5603
5604 while (lm_addr
5605 && read_one_ptr (lm_addr + lmo->l_name_offset,
5606 &l_name, ptr_size) == 0
5607 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5608 &l_addr, ptr_size) == 0
5609 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5610 &l_ld, ptr_size) == 0
5611 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5612 &l_prev, ptr_size) == 0
5613 && read_one_ptr (lm_addr + lmo->l_next_offset,
5614 &l_next, ptr_size) == 0)
5615 {
5616 unsigned char libname[PATH_MAX];
5617
5618 if (lm_prev != l_prev)
5619 {
5620 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5621 (long) lm_prev, (long) l_prev);
5622 break;
5623 }
5624
5625 /* Ignore the first entry even if it has valid name as the first entry
5626 corresponds to the main executable. The first entry should not be
5627 skipped if the dynamic loader was loaded late by a static executable
5628 (see solib-svr4.c parameter ignore_first). But in such case the main
5629 executable does not have PT_DYNAMIC present and this function already
5630 exited above due to failed get_r_debug. */
5631 if (lm_prev == 0)
5632 {
5633 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5634 p = p + strlen (p);
5635 }
5636 else
5637 {
5638 /* Not checking for error because reading may stop before
5639 we've got PATH_MAX worth of characters. */
5640 libname[0] = '\0';
5641 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5642 libname[sizeof (libname) - 1] = '\0';
5643 if (libname[0] != '\0')
5644 {
5645 /* 6x the size for xml_escape_text below. */
5646 size_t len = 6 * strlen ((char *) libname);
5647 char *name;
5648
5649 if (!header_done)
5650 {
5651 /* Terminate `<library-list-svr4'. */
5652 *p++ = '>';
5653 header_done = 1;
5654 }
5655
5656 while (allocated < p - document + len + 200)
5657 {
5658 /* Expand to guarantee sufficient storage. */
5659 uintptr_t document_len = p - document;
5660
5661 document = xrealloc (document, 2 * allocated);
5662 allocated *= 2;
5663 p = document + document_len;
5664 }
5665
5666 name = xml_escape_text ((char *) libname);
5667 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5668 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5669 name, (unsigned long) lm_addr,
5670 (unsigned long) l_addr, (unsigned long) l_ld);
5671 free (name);
5672 }
5673 }
5674
5675 lm_prev = lm_addr;
5676 lm_addr = l_next;
5677 }
5678
5679 if (!header_done)
5680 {
5681 /* Empty list; terminate `<library-list-svr4'. */
5682 strcpy (p, "/>");
5683 }
5684 else
5685 strcpy (p, "</library-list-svr4>");
5686
5687 document_len = strlen (document);
5688 if (offset < document_len)
5689 document_len -= offset;
5690 else
5691 document_len = 0;
5692 if (len > document_len)
5693 len = document_len;
5694
5695 memcpy (readbuf, document + offset, len);
5696 xfree (document);
5697
5698 return len;
5699 }
5700
5701 #ifdef HAVE_LINUX_BTRACE
5702
5703 /* Enable branch tracing. */
5704
5705 static struct btrace_target_info *
5706 linux_low_enable_btrace (ptid_t ptid)
5707 {
5708 struct btrace_target_info *tinfo;
5709
5710 tinfo = linux_enable_btrace (ptid);
5711
5712 if (tinfo != NULL)
5713 {
5714 struct thread_info *thread = find_thread_ptid (ptid);
5715 struct regcache *regcache = get_thread_regcache (thread, 0);
5716
5717 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5718 }
5719
5720 return tinfo;
5721 }
5722
5723 /* Read branch trace data as btrace xml document. */
5724
5725 static void
5726 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5727 int type)
5728 {
5729 VEC (btrace_block_s) *btrace;
5730 struct btrace_block *block;
5731 int i;
5732
5733 btrace = linux_read_btrace (tinfo, type);
5734
5735 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5736 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5737
5738 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5739 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5740 paddress (block->begin), paddress (block->end));
5741
5742 buffer_grow_str (buffer, "</btrace>\n");
5743
5744 VEC_free (btrace_block_s, btrace);
5745 }
5746 #endif /* HAVE_LINUX_BTRACE */
5747
5748 static struct target_ops linux_target_ops = {
5749 linux_create_inferior,
5750 linux_attach,
5751 linux_kill,
5752 linux_detach,
5753 linux_mourn,
5754 linux_join,
5755 linux_thread_alive,
5756 linux_resume,
5757 linux_wait,
5758 linux_fetch_registers,
5759 linux_store_registers,
5760 linux_prepare_to_access_memory,
5761 linux_done_accessing_memory,
5762 linux_read_memory,
5763 linux_write_memory,
5764 linux_look_up_symbols,
5765 linux_request_interrupt,
5766 linux_read_auxv,
5767 linux_insert_point,
5768 linux_remove_point,
5769 linux_stopped_by_watchpoint,
5770 linux_stopped_data_address,
5771 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5772 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5773 && defined(PT_TEXT_END_ADDR)
5774 linux_read_offsets,
5775 #else
5776 NULL,
5777 #endif
5778 #ifdef USE_THREAD_DB
5779 thread_db_get_tls_address,
5780 #else
5781 NULL,
5782 #endif
5783 linux_qxfer_spu,
5784 hostio_last_error_from_errno,
5785 linux_qxfer_osdata,
5786 linux_xfer_siginfo,
5787 linux_supports_non_stop,
5788 linux_async,
5789 linux_start_non_stop,
5790 linux_supports_multi_process,
5791 #ifdef USE_THREAD_DB
5792 thread_db_handle_monitor_command,
5793 #else
5794 NULL,
5795 #endif
5796 linux_common_core_of_thread,
5797 linux_read_loadmap,
5798 linux_process_qsupported,
5799 linux_supports_tracepoints,
5800 linux_read_pc,
5801 linux_write_pc,
5802 linux_thread_stopped,
5803 NULL,
5804 linux_pause_all,
5805 linux_unpause_all,
5806 linux_cancel_breakpoints,
5807 linux_stabilize_threads,
5808 linux_install_fast_tracepoint_jump_pad,
5809 linux_emit_ops,
5810 linux_supports_disable_randomization,
5811 linux_get_min_fast_tracepoint_insn_len,
5812 linux_qxfer_libraries_svr4,
5813 linux_supports_agent,
5814 #ifdef HAVE_LINUX_BTRACE
5815 linux_supports_btrace,
5816 linux_low_enable_btrace,
5817 linux_disable_btrace,
5818 linux_low_read_btrace,
5819 #else
5820 NULL,
5821 NULL,
5822 NULL,
5823 NULL,
5824 #endif
5825 linux_supports_range_stepping,
5826 };
5827
5828 static void
5829 linux_init_signals ()
5830 {
5831 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5832 to find what the cancel signal actually is. */
5833 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5834 signal (__SIGRTMIN+1, SIG_IGN);
5835 #endif
5836 }
5837
5838 #ifdef HAVE_LINUX_REGSETS
5839 void
5840 initialize_regsets_info (struct regsets_info *info)
5841 {
5842 for (info->num_regsets = 0;
5843 info->regsets[info->num_regsets].size >= 0;
5844 info->num_regsets++)
5845 ;
5846 }
5847 #endif
5848
5849 void
5850 initialize_low (void)
5851 {
5852 struct sigaction sigchld_action;
5853 memset (&sigchld_action, 0, sizeof (sigchld_action));
5854 set_target_ops (&linux_target_ops);
5855 set_breakpoint_data (the_low_target.breakpoint,
5856 the_low_target.breakpoint_len);
5857 linux_init_signals ();
5858 linux_ptrace_init_warnings ();
5859
5860 sigchld_action.sa_handler = sigchld_handler;
5861 sigemptyset (&sigchld_action.sa_mask);
5862 sigchld_action.sa_flags = SA_RESTART;
5863 sigaction (SIGCHLD, &sigchld_action, NULL);
5864
5865 initialize_low_arch ();
5866 }
This page took 0.16532 seconds and 4 git commands to generate.