Replace code accessing list implementation details with API calls.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <stdio.h>
28 #include <sys/ptrace.h>
29 #include "linux-ptrace.h"
30 #include "linux-procfs.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <string.h>
35 #include <stdlib.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include "filestuff.h"
48 #include "tracepoint.h"
49 #include "hostio.h"
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "linux-btrace.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
142 representation of the thread ID.
143
144 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
145 the same as the LWP ID.
146
147 ``all_processes'' is keyed by the "overall process ID", which
148 GNU/Linux calls tgid, "thread group ID". */
149
150 struct inferior_list all_lwps;
151
152 /* A list of all unknown processes which receive stop signals. Some
153 other process will presumably claim each of these as forked
154 children momentarily. */
155
156 struct simple_pid_list
157 {
158 /* The process ID. */
159 int pid;
160
161 /* The status as reported by waitpid. */
162 int status;
163
164 /* Next in chain. */
165 struct simple_pid_list *next;
166 };
167 struct simple_pid_list *stopped_pids;
168
169 /* Trivial list manipulation functions to keep track of a list of new
170 stopped processes. */
171
172 static void
173 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
174 {
175 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
176
177 new_pid->pid = pid;
178 new_pid->status = status;
179 new_pid->next = *listp;
180 *listp = new_pid;
181 }
182
183 static int
184 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
185 {
186 struct simple_pid_list **p;
187
188 for (p = listp; *p != NULL; p = &(*p)->next)
189 if ((*p)->pid == pid)
190 {
191 struct simple_pid_list *next = (*p)->next;
192
193 *statusp = (*p)->status;
194 xfree (*p);
195 *p = next;
196 return 1;
197 }
198 return 0;
199 }
200
201 enum stopping_threads_kind
202 {
203 /* Not stopping threads presently. */
204 NOT_STOPPING_THREADS,
205
206 /* Stopping threads. */
207 STOPPING_THREADS,
208
209 /* Stopping and suspending threads. */
210 STOPPING_AND_SUSPENDING_THREADS
211 };
212
213 /* This is set while stop_all_lwps is in effect. */
214 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
215
216 /* FIXME make into a target method? */
217 int using_threads = 1;
218
219 /* True if we're presently stabilizing threads (moving them out of
220 jump pads). */
221 static int stabilizing_threads;
222
223 static void linux_resume_one_lwp (struct lwp_info *lwp,
224 int step, int signal, siginfo_t *info);
225 static void linux_resume (struct thread_resume *resume_info, size_t n);
226 static void stop_all_lwps (int suspend, struct lwp_info *except);
227 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
228 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
229 static void *add_lwp (ptid_t ptid);
230 static int linux_stopped_by_watchpoint (void);
231 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
232 static void proceed_all_lwps (void);
233 static int finish_step_over (struct lwp_info *lwp);
234 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
235 static int kill_lwp (unsigned long lwpid, int signo);
236
237 /* True if the low target can hardware single-step. Such targets
238 don't need a BREAKPOINT_REINSERT_ADDR callback. */
239
240 static int
241 can_hardware_single_step (void)
242 {
243 return (the_low_target.breakpoint_reinsert_addr == NULL);
244 }
245
246 /* True if the low target supports memory breakpoints. If so, we'll
247 have a GET_PC implementation. */
248
249 static int
250 supports_breakpoints (void)
251 {
252 return (the_low_target.get_pc != NULL);
253 }
254
255 /* Returns true if this target can support fast tracepoints. This
256 does not mean that the in-process agent has been loaded in the
257 inferior. */
258
259 static int
260 supports_fast_tracepoints (void)
261 {
262 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
263 }
264
265 /* True if LWP is stopped in its stepping range. */
266
267 static int
268 lwp_in_step_range (struct lwp_info *lwp)
269 {
270 CORE_ADDR pc = lwp->stop_pc;
271
272 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
273 }
274
275 struct pending_signals
276 {
277 int signal;
278 siginfo_t info;
279 struct pending_signals *prev;
280 };
281
282 /* The read/write ends of the pipe registered as waitable file in the
283 event loop. */
284 static int linux_event_pipe[2] = { -1, -1 };
285
286 /* True if we're currently in async mode. */
287 #define target_is_async_p() (linux_event_pipe[0] != -1)
288
289 static void send_sigstop (struct lwp_info *lwp);
290 static void wait_for_sigstop (struct inferior_list_entry *entry);
291
292 /* Return non-zero if HEADER is a 64-bit ELF file. */
293
294 static int
295 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
296 {
297 if (header->e_ident[EI_MAG0] == ELFMAG0
298 && header->e_ident[EI_MAG1] == ELFMAG1
299 && header->e_ident[EI_MAG2] == ELFMAG2
300 && header->e_ident[EI_MAG3] == ELFMAG3)
301 {
302 *machine = header->e_machine;
303 return header->e_ident[EI_CLASS] == ELFCLASS64;
304
305 }
306 *machine = EM_NONE;
307 return -1;
308 }
309
310 /* Return non-zero if FILE is a 64-bit ELF file,
311 zero if the file is not a 64-bit ELF file,
312 and -1 if the file is not accessible or doesn't exist. */
313
314 static int
315 elf_64_file_p (const char *file, unsigned int *machine)
316 {
317 Elf64_Ehdr header;
318 int fd;
319
320 fd = open (file, O_RDONLY);
321 if (fd < 0)
322 return -1;
323
324 if (read (fd, &header, sizeof (header)) != sizeof (header))
325 {
326 close (fd);
327 return 0;
328 }
329 close (fd);
330
331 return elf_64_header_p (&header, machine);
332 }
333
334 /* Accepts an integer PID; Returns true if the executable PID is
335 running is a 64-bit ELF file.. */
336
337 int
338 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
339 {
340 char file[PATH_MAX];
341
342 sprintf (file, "/proc/%d/exe", pid);
343 return elf_64_file_p (file, machine);
344 }
345
346 static void
347 delete_lwp (struct lwp_info *lwp)
348 {
349 remove_thread (get_lwp_thread (lwp));
350 remove_inferior (&all_lwps, &lwp->entry);
351 free (lwp->arch_private);
352 free (lwp);
353 }
354
355 /* Add a process to the common process list, and set its private
356 data. */
357
358 static struct process_info *
359 linux_add_process (int pid, int attached)
360 {
361 struct process_info *proc;
362
363 proc = add_process (pid, attached);
364 proc->private = xcalloc (1, sizeof (*proc->private));
365
366 /* Set the arch when the first LWP stops. */
367 proc->private->new_inferior = 1;
368
369 if (the_low_target.new_process != NULL)
370 proc->private->arch_private = the_low_target.new_process ();
371
372 return proc;
373 }
374
375 /* Handle a GNU/Linux extended wait response. If we see a clone
376 event, we need to add the new LWP to our list (and not report the
377 trap to higher layers). */
378
379 static void
380 handle_extended_wait (struct lwp_info *event_child, int wstat)
381 {
382 int event = wstat >> 16;
383 struct lwp_info *new_lwp;
384
385 if (event == PTRACE_EVENT_CLONE)
386 {
387 ptid_t ptid;
388 unsigned long new_pid;
389 int ret, status;
390
391 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), (PTRACE_TYPE_ARG3) 0,
392 &new_pid);
393
394 /* If we haven't already seen the new PID stop, wait for it now. */
395 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
396 {
397 /* The new child has a pending SIGSTOP. We can't affect it until it
398 hits the SIGSTOP, but we're already attached. */
399
400 ret = my_waitpid (new_pid, &status, __WALL);
401
402 if (ret == -1)
403 perror_with_name ("waiting for new child");
404 else if (ret != new_pid)
405 warning ("wait returned unexpected PID %d", ret);
406 else if (!WIFSTOPPED (status))
407 warning ("wait returned unexpected status 0x%x", status);
408 }
409
410 ptid = ptid_build (pid_of (event_child), new_pid, 0);
411 new_lwp = (struct lwp_info *) add_lwp (ptid);
412 add_thread (ptid, new_lwp);
413
414 /* Either we're going to immediately resume the new thread
415 or leave it stopped. linux_resume_one_lwp is a nop if it
416 thinks the thread is currently running, so set this first
417 before calling linux_resume_one_lwp. */
418 new_lwp->stopped = 1;
419
420 /* If we're suspending all threads, leave this one suspended
421 too. */
422 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
423 new_lwp->suspended = 1;
424
425 /* Normally we will get the pending SIGSTOP. But in some cases
426 we might get another signal delivered to the group first.
427 If we do get another signal, be sure not to lose it. */
428 if (WSTOPSIG (status) == SIGSTOP)
429 {
430 if (stopping_threads != NOT_STOPPING_THREADS)
431 new_lwp->stop_pc = get_stop_pc (new_lwp);
432 else
433 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
434 }
435 else
436 {
437 new_lwp->stop_expected = 1;
438
439 if (stopping_threads != NOT_STOPPING_THREADS)
440 {
441 new_lwp->stop_pc = get_stop_pc (new_lwp);
442 new_lwp->status_pending_p = 1;
443 new_lwp->status_pending = status;
444 }
445 else
446 /* Pass the signal on. This is what GDB does - except
447 shouldn't we really report it instead? */
448 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
449 }
450
451 /* Always resume the current thread. If we are stopping
452 threads, it will have a pending SIGSTOP; we may as well
453 collect it now. */
454 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
455 }
456 }
457
458 /* Return the PC as read from the regcache of LWP, without any
459 adjustment. */
460
461 static CORE_ADDR
462 get_pc (struct lwp_info *lwp)
463 {
464 struct thread_info *saved_inferior;
465 struct regcache *regcache;
466 CORE_ADDR pc;
467
468 if (the_low_target.get_pc == NULL)
469 return 0;
470
471 saved_inferior = current_inferior;
472 current_inferior = get_lwp_thread (lwp);
473
474 regcache = get_thread_regcache (current_inferior, 1);
475 pc = (*the_low_target.get_pc) (regcache);
476
477 if (debug_threads)
478 debug_printf ("pc is 0x%lx\n", (long) pc);
479
480 current_inferior = saved_inferior;
481 return pc;
482 }
483
484 /* This function should only be called if LWP got a SIGTRAP.
485 The SIGTRAP could mean several things.
486
487 On i386, where decr_pc_after_break is non-zero:
488 If we were single-stepping this process using PTRACE_SINGLESTEP,
489 we will get only the one SIGTRAP (even if the instruction we
490 stepped over was a breakpoint). The value of $eip will be the
491 next instruction.
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If we cancel the SIGTRAP, we
497 must resume at the decremented PC.
498
499 (Presumably, not yet tested) On a non-decr_pc_after_break machine
500 with hardware or kernel single-step:
501 If we single-step over a breakpoint instruction, our PC will
502 point at the following instruction. If we continue and hit a
503 breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506 static CORE_ADDR
507 get_stop_pc (struct lwp_info *lwp)
508 {
509 CORE_ADDR stop_pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 stop_pc = get_pc (lwp);
515
516 if (WSTOPSIG (lwp->last_status) == SIGTRAP
517 && !lwp->stepping
518 && !lwp->stopped_by_watchpoint
519 && lwp->last_status >> 16 == 0)
520 stop_pc -= the_low_target.decr_pc_after_break;
521
522 if (debug_threads)
523 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
524
525 return stop_pc;
526 }
527
528 static void *
529 add_lwp (ptid_t ptid)
530 {
531 struct lwp_info *lwp;
532
533 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
534 memset (lwp, 0, sizeof (*lwp));
535
536 lwp->entry.id = ptid;
537
538 if (the_low_target.new_thread != NULL)
539 lwp->arch_private = the_low_target.new_thread ();
540
541 add_inferior_to_list (&all_lwps, &lwp->entry);
542
543 return lwp;
544 }
545
546 /* Start an inferior process and returns its pid.
547 ALLARGS is a vector of program-name and args. */
548
549 static int
550 linux_create_inferior (char *program, char **allargs)
551 {
552 #ifdef HAVE_PERSONALITY
553 int personality_orig = 0, personality_set = 0;
554 #endif
555 struct lwp_info *new_lwp;
556 int pid;
557 ptid_t ptid;
558
559 #ifdef HAVE_PERSONALITY
560 if (disable_randomization)
561 {
562 errno = 0;
563 personality_orig = personality (0xffffffff);
564 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
565 {
566 personality_set = 1;
567 personality (personality_orig | ADDR_NO_RANDOMIZE);
568 }
569 if (errno != 0 || (personality_set
570 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
571 warning ("Error disabling address space randomization: %s",
572 strerror (errno));
573 }
574 #endif
575
576 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
577 pid = vfork ();
578 #else
579 pid = fork ();
580 #endif
581 if (pid < 0)
582 perror_with_name ("fork");
583
584 if (pid == 0)
585 {
586 close_most_fds ();
587 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
588
589 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
590 signal (__SIGRTMIN + 1, SIG_DFL);
591 #endif
592
593 setpgid (0, 0);
594
595 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
596 stdout to stderr so that inferior i/o doesn't corrupt the connection.
597 Also, redirect stdin to /dev/null. */
598 if (remote_connection_is_stdio ())
599 {
600 close (0);
601 open ("/dev/null", O_RDONLY);
602 dup2 (2, 1);
603 if (write (2, "stdin/stdout redirected\n",
604 sizeof ("stdin/stdout redirected\n") - 1) < 0)
605 {
606 /* Errors ignored. */;
607 }
608 }
609
610 execv (program, allargs);
611 if (errno == ENOENT)
612 execvp (program, allargs);
613
614 fprintf (stderr, "Cannot exec %s: %s.\n", program,
615 strerror (errno));
616 fflush (stderr);
617 _exit (0177);
618 }
619
620 #ifdef HAVE_PERSONALITY
621 if (personality_set)
622 {
623 errno = 0;
624 personality (personality_orig);
625 if (errno != 0)
626 warning ("Error restoring address space randomization: %s",
627 strerror (errno));
628 }
629 #endif
630
631 linux_add_process (pid, 0);
632
633 ptid = ptid_build (pid, pid, 0);
634 new_lwp = add_lwp (ptid);
635 add_thread (ptid, new_lwp);
636 new_lwp->must_set_ptrace_flags = 1;
637
638 return pid;
639 }
640
641 /* Attach to an inferior process. */
642
643 static void
644 linux_attach_lwp_1 (unsigned long lwpid, int initial)
645 {
646 ptid_t ptid;
647 struct lwp_info *new_lwp;
648
649 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
650 != 0)
651 {
652 struct buffer buffer;
653
654 if (!initial)
655 {
656 /* If we fail to attach to an LWP, just warn. */
657 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
658 strerror (errno), errno);
659 fflush (stderr);
660 return;
661 }
662
663 /* If we fail to attach to a process, report an error. */
664 buffer_init (&buffer);
665 linux_ptrace_attach_warnings (lwpid, &buffer);
666 buffer_grow_str0 (&buffer, "");
667 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
668 lwpid, strerror (errno), errno);
669 }
670
671 if (initial)
672 /* If lwp is the tgid, we handle adding existing threads later.
673 Otherwise we just add lwp without bothering about any other
674 threads. */
675 ptid = ptid_build (lwpid, lwpid, 0);
676 else
677 {
678 /* Note that extracting the pid from the current inferior is
679 safe, since we're always called in the context of the same
680 process as this new thread. */
681 int pid = pid_of (get_thread_lwp (current_inferior));
682 ptid = ptid_build (pid, lwpid, 0);
683 }
684
685 new_lwp = (struct lwp_info *) add_lwp (ptid);
686 add_thread (ptid, new_lwp);
687
688 /* We need to wait for SIGSTOP before being able to make the next
689 ptrace call on this LWP. */
690 new_lwp->must_set_ptrace_flags = 1;
691
692 if (linux_proc_pid_is_stopped (lwpid))
693 {
694 if (debug_threads)
695 debug_printf ("Attached to a stopped process\n");
696
697 /* The process is definitely stopped. It is in a job control
698 stop, unless the kernel predates the TASK_STOPPED /
699 TASK_TRACED distinction, in which case it might be in a
700 ptrace stop. Make sure it is in a ptrace stop; from there we
701 can kill it, signal it, et cetera.
702
703 First make sure there is a pending SIGSTOP. Since we are
704 already attached, the process can not transition from stopped
705 to running without a PTRACE_CONT; so we know this signal will
706 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
707 probably already in the queue (unless this kernel is old
708 enough to use TASK_STOPPED for ptrace stops); but since
709 SIGSTOP is not an RT signal, it can only be queued once. */
710 kill_lwp (lwpid, SIGSTOP);
711
712 /* Finally, resume the stopped process. This will deliver the
713 SIGSTOP (or a higher priority signal, just like normal
714 PTRACE_ATTACH), which we'll catch later on. */
715 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
716 }
717
718 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
719 brings it to a halt.
720
721 There are several cases to consider here:
722
723 1) gdbserver has already attached to the process and is being notified
724 of a new thread that is being created.
725 In this case we should ignore that SIGSTOP and resume the
726 process. This is handled below by setting stop_expected = 1,
727 and the fact that add_thread sets last_resume_kind ==
728 resume_continue.
729
730 2) This is the first thread (the process thread), and we're attaching
731 to it via attach_inferior.
732 In this case we want the process thread to stop.
733 This is handled by having linux_attach set last_resume_kind ==
734 resume_stop after we return.
735
736 If the pid we are attaching to is also the tgid, we attach to and
737 stop all the existing threads. Otherwise, we attach to pid and
738 ignore any other threads in the same group as this pid.
739
740 3) GDB is connecting to gdbserver and is requesting an enumeration of all
741 existing threads.
742 In this case we want the thread to stop.
743 FIXME: This case is currently not properly handled.
744 We should wait for the SIGSTOP but don't. Things work apparently
745 because enough time passes between when we ptrace (ATTACH) and when
746 gdb makes the next ptrace call on the thread.
747
748 On the other hand, if we are currently trying to stop all threads, we
749 should treat the new thread as if we had sent it a SIGSTOP. This works
750 because we are guaranteed that the add_lwp call above added us to the
751 end of the list, and so the new thread has not yet reached
752 wait_for_sigstop (but will). */
753 new_lwp->stop_expected = 1;
754 }
755
756 void
757 linux_attach_lwp (unsigned long lwpid)
758 {
759 linux_attach_lwp_1 (lwpid, 0);
760 }
761
762 /* Attach to PID. If PID is the tgid, attach to it and all
763 of its threads. */
764
765 static int
766 linux_attach (unsigned long pid)
767 {
768 /* Attach to PID. We will check for other threads
769 soon. */
770 linux_attach_lwp_1 (pid, 1);
771 linux_add_process (pid, 1);
772
773 if (!non_stop)
774 {
775 struct thread_info *thread;
776
777 /* Don't ignore the initial SIGSTOP if we just attached to this
778 process. It will be collected by wait shortly. */
779 thread = find_thread_ptid (ptid_build (pid, pid, 0));
780 thread->last_resume_kind = resume_stop;
781 }
782
783 if (linux_proc_get_tgid (pid) == pid)
784 {
785 DIR *dir;
786 char pathname[128];
787
788 sprintf (pathname, "/proc/%ld/task", pid);
789
790 dir = opendir (pathname);
791
792 if (!dir)
793 {
794 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
795 fflush (stderr);
796 }
797 else
798 {
799 /* At this point we attached to the tgid. Scan the task for
800 existing threads. */
801 unsigned long lwp;
802 int new_threads_found;
803 int iterations = 0;
804 struct dirent *dp;
805
806 while (iterations < 2)
807 {
808 new_threads_found = 0;
809 /* Add all the other threads. While we go through the
810 threads, new threads may be spawned. Cycle through
811 the list of threads until we have done two iterations without
812 finding new threads. */
813 while ((dp = readdir (dir)) != NULL)
814 {
815 /* Fetch one lwp. */
816 lwp = strtoul (dp->d_name, NULL, 10);
817
818 /* Is this a new thread? */
819 if (lwp
820 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
821 {
822 linux_attach_lwp_1 (lwp, 0);
823 new_threads_found++;
824
825 if (debug_threads)
826 debug_printf ("Found and attached to new lwp %ld\n",
827 lwp);
828 }
829 }
830
831 if (!new_threads_found)
832 iterations++;
833 else
834 iterations = 0;
835
836 rewinddir (dir);
837 }
838 closedir (dir);
839 }
840 }
841
842 return 0;
843 }
844
845 struct counter
846 {
847 int pid;
848 int count;
849 };
850
851 static int
852 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
853 {
854 struct counter *counter = args;
855
856 if (ptid_get_pid (entry->id) == counter->pid)
857 {
858 if (++counter->count > 1)
859 return 1;
860 }
861
862 return 0;
863 }
864
865 static int
866 last_thread_of_process_p (struct thread_info *thread)
867 {
868 ptid_t ptid = thread->entry.id;
869 int pid = ptid_get_pid (ptid);
870 struct counter counter = { pid , 0 };
871
872 return (find_inferior (&all_threads,
873 second_thread_of_pid_p, &counter) == NULL);
874 }
875
876 /* Kill LWP. */
877
878 static void
879 linux_kill_one_lwp (struct lwp_info *lwp)
880 {
881 int pid = lwpid_of (lwp);
882
883 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
884 there is no signal context, and ptrace(PTRACE_KILL) (or
885 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
886 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
887 alternative is to kill with SIGKILL. We only need one SIGKILL
888 per process, not one for each thread. But since we still support
889 linuxthreads, and we also support debugging programs using raw
890 clone without CLONE_THREAD, we send one for each thread. For
891 years, we used PTRACE_KILL only, so we're being a bit paranoid
892 about some old kernels where PTRACE_KILL might work better
893 (dubious if there are any such, but that's why it's paranoia), so
894 we try SIGKILL first, PTRACE_KILL second, and so we're fine
895 everywhere. */
896
897 errno = 0;
898 kill (pid, SIGKILL);
899 if (debug_threads)
900 debug_printf ("LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
901 target_pid_to_str (ptid_of (lwp)),
902 errno ? strerror (errno) : "OK");
903
904 errno = 0;
905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
906 if (debug_threads)
907 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
908 target_pid_to_str (ptid_of (lwp)),
909 errno ? strerror (errno) : "OK");
910 }
911
912 /* Callback for `find_inferior'. Kills an lwp of a given process,
913 except the leader. */
914
915 static int
916 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
917 {
918 struct thread_info *thread = (struct thread_info *) entry;
919 struct lwp_info *lwp = get_thread_lwp (thread);
920 int wstat;
921 int pid = * (int *) args;
922
923 if (ptid_get_pid (entry->id) != pid)
924 return 0;
925
926 /* We avoid killing the first thread here, because of a Linux kernel (at
927 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
928 the children get a chance to be reaped, it will remain a zombie
929 forever. */
930
931 if (lwpid_of (lwp) == pid)
932 {
933 if (debug_threads)
934 debug_printf ("lkop: is last of process %s\n",
935 target_pid_to_str (entry->id));
936 return 0;
937 }
938
939 do
940 {
941 linux_kill_one_lwp (lwp);
942
943 /* Make sure it died. The loop is most likely unnecessary. */
944 pid = linux_wait_for_event (lwp->entry.id, &wstat, __WALL);
945 } while (pid > 0 && WIFSTOPPED (wstat));
946
947 return 0;
948 }
949
950 static int
951 linux_kill (int pid)
952 {
953 struct process_info *process;
954 struct lwp_info *lwp;
955 int wstat;
956 int lwpid;
957
958 process = find_process_pid (pid);
959 if (process == NULL)
960 return -1;
961
962 /* If we're killing a running inferior, make sure it is stopped
963 first, as PTRACE_KILL will not work otherwise. */
964 stop_all_lwps (0, NULL);
965
966 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
967
968 /* See the comment in linux_kill_one_lwp. We did not kill the first
969 thread in the list, so do so now. */
970 lwp = find_lwp_pid (pid_to_ptid (pid));
971
972 if (lwp == NULL)
973 {
974 if (debug_threads)
975 debug_printf ("lk_1: cannot find lwp %ld, for pid: %d\n",
976 lwpid_of (lwp), pid);
977 }
978 else
979 {
980 if (debug_threads)
981 debug_printf ("lk_1: killing lwp %ld, for pid: %d\n",
982 lwpid_of (lwp), pid);
983
984 do
985 {
986 linux_kill_one_lwp (lwp);
987
988 /* Make sure it died. The loop is most likely unnecessary. */
989 lwpid = linux_wait_for_event (lwp->entry.id, &wstat, __WALL);
990 } while (lwpid > 0 && WIFSTOPPED (wstat));
991 }
992
993 the_target->mourn (process);
994
995 /* Since we presently can only stop all lwps of all processes, we
996 need to unstop lwps of other processes. */
997 unstop_all_lwps (0, NULL);
998 return 0;
999 }
1000
1001 /* Get pending signal of THREAD, for detaching purposes. This is the
1002 signal the thread last stopped for, which we need to deliver to the
1003 thread when detaching, otherwise, it'd be suppressed/lost. */
1004
1005 static int
1006 get_detach_signal (struct thread_info *thread)
1007 {
1008 enum gdb_signal signo = GDB_SIGNAL_0;
1009 int status;
1010 struct lwp_info *lp = get_thread_lwp (thread);
1011
1012 if (lp->status_pending_p)
1013 status = lp->status_pending;
1014 else
1015 {
1016 /* If the thread had been suspended by gdbserver, and it stopped
1017 cleanly, then it'll have stopped with SIGSTOP. But we don't
1018 want to deliver that SIGSTOP. */
1019 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1020 || thread->last_status.value.sig == GDB_SIGNAL_0)
1021 return 0;
1022
1023 /* Otherwise, we may need to deliver the signal we
1024 intercepted. */
1025 status = lp->last_status;
1026 }
1027
1028 if (!WIFSTOPPED (status))
1029 {
1030 if (debug_threads)
1031 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1032 target_pid_to_str (ptid_of (lp)));
1033 return 0;
1034 }
1035
1036 /* Extended wait statuses aren't real SIGTRAPs. */
1037 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1038 {
1039 if (debug_threads)
1040 debug_printf ("GPS: lwp %s had stopped with extended "
1041 "status: no pending signal\n",
1042 target_pid_to_str (ptid_of (lp)));
1043 return 0;
1044 }
1045
1046 signo = gdb_signal_from_host (WSTOPSIG (status));
1047
1048 if (program_signals_p && !program_signals[signo])
1049 {
1050 if (debug_threads)
1051 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1052 target_pid_to_str (ptid_of (lp)),
1053 gdb_signal_to_string (signo));
1054 return 0;
1055 }
1056 else if (!program_signals_p
1057 /* If we have no way to know which signals GDB does not
1058 want to have passed to the program, assume
1059 SIGTRAP/SIGINT, which is GDB's default. */
1060 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1061 {
1062 if (debug_threads)
1063 debug_printf ("GPS: lwp %s had signal %s, "
1064 "but we don't know if we should pass it. "
1065 "Default to not.\n",
1066 target_pid_to_str (ptid_of (lp)),
1067 gdb_signal_to_string (signo));
1068 return 0;
1069 }
1070 else
1071 {
1072 if (debug_threads)
1073 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1074 target_pid_to_str (ptid_of (lp)),
1075 gdb_signal_to_string (signo));
1076
1077 return WSTOPSIG (status);
1078 }
1079 }
1080
1081 static int
1082 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1083 {
1084 struct thread_info *thread = (struct thread_info *) entry;
1085 struct lwp_info *lwp = get_thread_lwp (thread);
1086 int pid = * (int *) args;
1087 int sig;
1088
1089 if (ptid_get_pid (entry->id) != pid)
1090 return 0;
1091
1092 /* If there is a pending SIGSTOP, get rid of it. */
1093 if (lwp->stop_expected)
1094 {
1095 if (debug_threads)
1096 debug_printf ("Sending SIGCONT to %s\n",
1097 target_pid_to_str (ptid_of (lwp)));
1098
1099 kill_lwp (lwpid_of (lwp), SIGCONT);
1100 lwp->stop_expected = 0;
1101 }
1102
1103 /* Flush any pending changes to the process's registers. */
1104 regcache_invalidate_thread (get_lwp_thread (lwp));
1105
1106 /* Pass on any pending signal for this thread. */
1107 sig = get_detach_signal (thread);
1108
1109 /* Finally, let it resume. */
1110 if (the_low_target.prepare_to_resume != NULL)
1111 the_low_target.prepare_to_resume (lwp);
1112 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1113 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1114 error (_("Can't detach %s: %s"),
1115 target_pid_to_str (ptid_of (lwp)),
1116 strerror (errno));
1117
1118 delete_lwp (lwp);
1119 return 0;
1120 }
1121
1122 static int
1123 linux_detach (int pid)
1124 {
1125 struct process_info *process;
1126
1127 process = find_process_pid (pid);
1128 if (process == NULL)
1129 return -1;
1130
1131 /* Stop all threads before detaching. First, ptrace requires that
1132 the thread is stopped to sucessfully detach. Second, thread_db
1133 may need to uninstall thread event breakpoints from memory, which
1134 only works with a stopped process anyway. */
1135 stop_all_lwps (0, NULL);
1136
1137 #ifdef USE_THREAD_DB
1138 thread_db_detach (process);
1139 #endif
1140
1141 /* Stabilize threads (move out of jump pads). */
1142 stabilize_threads ();
1143
1144 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1145
1146 the_target->mourn (process);
1147
1148 /* Since we presently can only stop all lwps of all processes, we
1149 need to unstop lwps of other processes. */
1150 unstop_all_lwps (0, NULL);
1151 return 0;
1152 }
1153
1154 /* Remove all LWPs that belong to process PROC from the lwp list. */
1155
1156 static int
1157 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1158 {
1159 struct lwp_info *lwp = (struct lwp_info *) entry;
1160 struct process_info *process = proc;
1161
1162 if (pid_of (lwp) == pid_of (process))
1163 delete_lwp (lwp);
1164
1165 return 0;
1166 }
1167
1168 static void
1169 linux_mourn (struct process_info *process)
1170 {
1171 struct process_info_private *priv;
1172
1173 #ifdef USE_THREAD_DB
1174 thread_db_mourn (process);
1175 #endif
1176
1177 find_inferior (&all_lwps, delete_lwp_callback, process);
1178
1179 /* Freeing all private data. */
1180 priv = process->private;
1181 free (priv->arch_private);
1182 free (priv);
1183 process->private = NULL;
1184
1185 remove_process (process);
1186 }
1187
1188 static void
1189 linux_join (int pid)
1190 {
1191 int status, ret;
1192
1193 do {
1194 ret = my_waitpid (pid, &status, 0);
1195 if (WIFEXITED (status) || WIFSIGNALED (status))
1196 break;
1197 } while (ret != -1 || errno != ECHILD);
1198 }
1199
1200 /* Return nonzero if the given thread is still alive. */
1201 static int
1202 linux_thread_alive (ptid_t ptid)
1203 {
1204 struct lwp_info *lwp = find_lwp_pid (ptid);
1205
1206 /* We assume we always know if a thread exits. If a whole process
1207 exited but we still haven't been able to report it to GDB, we'll
1208 hold on to the last lwp of the dead process. */
1209 if (lwp != NULL)
1210 return !lwp->dead;
1211 else
1212 return 0;
1213 }
1214
1215 /* Return 1 if this lwp has an interesting status pending. */
1216 static int
1217 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1218 {
1219 struct lwp_info *lwp = (struct lwp_info *) entry;
1220 ptid_t ptid = * (ptid_t *) arg;
1221 struct thread_info *thread;
1222
1223 /* Check if we're only interested in events from a specific process
1224 or its lwps. */
1225 if (!ptid_equal (minus_one_ptid, ptid)
1226 && ptid_get_pid (ptid) != ptid_get_pid (lwp->entry.id))
1227 return 0;
1228
1229 thread = get_lwp_thread (lwp);
1230
1231 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1232 report any status pending the LWP may have. */
1233 if (thread->last_resume_kind == resume_stop
1234 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1235 return 0;
1236
1237 return lwp->status_pending_p;
1238 }
1239
1240 static int
1241 same_lwp (struct inferior_list_entry *entry, void *data)
1242 {
1243 ptid_t ptid = *(ptid_t *) data;
1244 int lwp;
1245
1246 if (ptid_get_lwp (ptid) != 0)
1247 lwp = ptid_get_lwp (ptid);
1248 else
1249 lwp = ptid_get_pid (ptid);
1250
1251 if (ptid_get_lwp (entry->id) == lwp)
1252 return 1;
1253
1254 return 0;
1255 }
1256
1257 struct lwp_info *
1258 find_lwp_pid (ptid_t ptid)
1259 {
1260 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1261 }
1262
1263 static struct lwp_info *
1264 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1265 {
1266 int ret;
1267 int to_wait_for = -1;
1268 struct lwp_info *child = NULL;
1269
1270 if (debug_threads)
1271 debug_printf ("linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1272
1273 if (ptid_equal (ptid, minus_one_ptid))
1274 to_wait_for = -1; /* any child */
1275 else
1276 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1277
1278 options |= __WALL;
1279
1280 retry:
1281
1282 ret = my_waitpid (to_wait_for, wstatp, options);
1283 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1284 return NULL;
1285 else if (ret == -1)
1286 perror_with_name ("waitpid");
1287
1288 if (debug_threads
1289 && (!WIFSTOPPED (*wstatp)
1290 || (WSTOPSIG (*wstatp) != 32
1291 && WSTOPSIG (*wstatp) != 33)))
1292 debug_printf ("Got an event from %d (%x)\n", ret, *wstatp);
1293
1294 child = find_lwp_pid (pid_to_ptid (ret));
1295
1296 /* If we didn't find a process, one of two things presumably happened:
1297 - A process we started and then detached from has exited. Ignore it.
1298 - A process we are controlling has forked and the new child's stop
1299 was reported to us by the kernel. Save its PID. */
1300 if (child == NULL && WIFSTOPPED (*wstatp))
1301 {
1302 add_to_pid_list (&stopped_pids, ret, *wstatp);
1303 goto retry;
1304 }
1305 else if (child == NULL)
1306 goto retry;
1307
1308 child->stopped = 1;
1309
1310 child->last_status = *wstatp;
1311
1312 if (WIFSTOPPED (*wstatp))
1313 {
1314 struct process_info *proc;
1315
1316 /* Architecture-specific setup after inferior is running. This
1317 needs to happen after we have attached to the inferior and it
1318 is stopped for the first time, but before we access any
1319 inferior registers. */
1320 proc = find_process_pid (pid_of (child));
1321 if (proc->private->new_inferior)
1322 {
1323 struct thread_info *saved_inferior;
1324
1325 saved_inferior = current_inferior;
1326 current_inferior = get_lwp_thread (child);
1327
1328 the_low_target.arch_setup ();
1329
1330 current_inferior = saved_inferior;
1331
1332 proc->private->new_inferior = 0;
1333 }
1334 }
1335
1336 /* Fetch the possibly triggered data watchpoint info and store it in
1337 CHILD.
1338
1339 On some archs, like x86, that use debug registers to set
1340 watchpoints, it's possible that the way to know which watched
1341 address trapped, is to check the register that is used to select
1342 which address to watch. Problem is, between setting the
1343 watchpoint and reading back which data address trapped, the user
1344 may change the set of watchpoints, and, as a consequence, GDB
1345 changes the debug registers in the inferior. To avoid reading
1346 back a stale stopped-data-address when that happens, we cache in
1347 LP the fact that a watchpoint trapped, and the corresponding data
1348 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1349 changes the debug registers meanwhile, we have the cached data we
1350 can rely on. */
1351
1352 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1353 {
1354 if (the_low_target.stopped_by_watchpoint == NULL)
1355 {
1356 child->stopped_by_watchpoint = 0;
1357 }
1358 else
1359 {
1360 struct thread_info *saved_inferior;
1361
1362 saved_inferior = current_inferior;
1363 current_inferior = get_lwp_thread (child);
1364
1365 child->stopped_by_watchpoint
1366 = the_low_target.stopped_by_watchpoint ();
1367
1368 if (child->stopped_by_watchpoint)
1369 {
1370 if (the_low_target.stopped_data_address != NULL)
1371 child->stopped_data_address
1372 = the_low_target.stopped_data_address ();
1373 else
1374 child->stopped_data_address = 0;
1375 }
1376
1377 current_inferior = saved_inferior;
1378 }
1379 }
1380
1381 /* Store the STOP_PC, with adjustment applied. This depends on the
1382 architecture being defined already (so that CHILD has a valid
1383 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1384 not). */
1385 if (WIFSTOPPED (*wstatp))
1386 child->stop_pc = get_stop_pc (child);
1387
1388 if (debug_threads
1389 && WIFSTOPPED (*wstatp)
1390 && the_low_target.get_pc != NULL)
1391 {
1392 struct thread_info *saved_inferior = current_inferior;
1393 struct regcache *regcache;
1394 CORE_ADDR pc;
1395
1396 current_inferior = get_lwp_thread (child);
1397 regcache = get_thread_regcache (current_inferior, 1);
1398 pc = (*the_low_target.get_pc) (regcache);
1399 debug_printf ("linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1400 current_inferior = saved_inferior;
1401 }
1402
1403 return child;
1404 }
1405
1406 /* This function should only be called if the LWP got a SIGTRAP.
1407
1408 Handle any tracepoint steps or hits. Return true if a tracepoint
1409 event was handled, 0 otherwise. */
1410
1411 static int
1412 handle_tracepoints (struct lwp_info *lwp)
1413 {
1414 struct thread_info *tinfo = get_lwp_thread (lwp);
1415 int tpoint_related_event = 0;
1416
1417 /* If this tracepoint hit causes a tracing stop, we'll immediately
1418 uninsert tracepoints. To do this, we temporarily pause all
1419 threads, unpatch away, and then unpause threads. We need to make
1420 sure the unpausing doesn't resume LWP too. */
1421 lwp->suspended++;
1422
1423 /* And we need to be sure that any all-threads-stopping doesn't try
1424 to move threads out of the jump pads, as it could deadlock the
1425 inferior (LWP could be in the jump pad, maybe even holding the
1426 lock.) */
1427
1428 /* Do any necessary step collect actions. */
1429 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1430
1431 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1432
1433 /* See if we just hit a tracepoint and do its main collect
1434 actions. */
1435 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1436
1437 lwp->suspended--;
1438
1439 gdb_assert (lwp->suspended == 0);
1440 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1441
1442 if (tpoint_related_event)
1443 {
1444 if (debug_threads)
1445 debug_printf ("got a tracepoint event\n");
1446 return 1;
1447 }
1448
1449 return 0;
1450 }
1451
1452 /* Convenience wrapper. Returns true if LWP is presently collecting a
1453 fast tracepoint. */
1454
1455 static int
1456 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1457 struct fast_tpoint_collect_status *status)
1458 {
1459 CORE_ADDR thread_area;
1460
1461 if (the_low_target.get_thread_area == NULL)
1462 return 0;
1463
1464 /* Get the thread area address. This is used to recognize which
1465 thread is which when tracing with the in-process agent library.
1466 We don't read anything from the address, and treat it as opaque;
1467 it's the address itself that we assume is unique per-thread. */
1468 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1469 return 0;
1470
1471 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1472 }
1473
1474 /* The reason we resume in the caller, is because we want to be able
1475 to pass lwp->status_pending as WSTAT, and we need to clear
1476 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1477 refuses to resume. */
1478
1479 static int
1480 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1481 {
1482 struct thread_info *saved_inferior;
1483
1484 saved_inferior = current_inferior;
1485 current_inferior = get_lwp_thread (lwp);
1486
1487 if ((wstat == NULL
1488 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1489 && supports_fast_tracepoints ()
1490 && agent_loaded_p ())
1491 {
1492 struct fast_tpoint_collect_status status;
1493 int r;
1494
1495 if (debug_threads)
1496 debug_printf ("Checking whether LWP %ld needs to move out of the "
1497 "jump pad.\n",
1498 lwpid_of (lwp));
1499
1500 r = linux_fast_tracepoint_collecting (lwp, &status);
1501
1502 if (wstat == NULL
1503 || (WSTOPSIG (*wstat) != SIGILL
1504 && WSTOPSIG (*wstat) != SIGFPE
1505 && WSTOPSIG (*wstat) != SIGSEGV
1506 && WSTOPSIG (*wstat) != SIGBUS))
1507 {
1508 lwp->collecting_fast_tracepoint = r;
1509
1510 if (r != 0)
1511 {
1512 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1513 {
1514 /* Haven't executed the original instruction yet.
1515 Set breakpoint there, and wait till it's hit,
1516 then single-step until exiting the jump pad. */
1517 lwp->exit_jump_pad_bkpt
1518 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1519 }
1520
1521 if (debug_threads)
1522 debug_printf ("Checking whether LWP %ld needs to move out of "
1523 "the jump pad...it does\n",
1524 lwpid_of (lwp));
1525 current_inferior = saved_inferior;
1526
1527 return 1;
1528 }
1529 }
1530 else
1531 {
1532 /* If we get a synchronous signal while collecting, *and*
1533 while executing the (relocated) original instruction,
1534 reset the PC to point at the tpoint address, before
1535 reporting to GDB. Otherwise, it's an IPA lib bug: just
1536 report the signal to GDB, and pray for the best. */
1537
1538 lwp->collecting_fast_tracepoint = 0;
1539
1540 if (r != 0
1541 && (status.adjusted_insn_addr <= lwp->stop_pc
1542 && lwp->stop_pc < status.adjusted_insn_addr_end))
1543 {
1544 siginfo_t info;
1545 struct regcache *regcache;
1546
1547 /* The si_addr on a few signals references the address
1548 of the faulting instruction. Adjust that as
1549 well. */
1550 if ((WSTOPSIG (*wstat) == SIGILL
1551 || WSTOPSIG (*wstat) == SIGFPE
1552 || WSTOPSIG (*wstat) == SIGBUS
1553 || WSTOPSIG (*wstat) == SIGSEGV)
1554 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp),
1555 (PTRACE_TYPE_ARG3) 0, &info) == 0
1556 /* Final check just to make sure we don't clobber
1557 the siginfo of non-kernel-sent signals. */
1558 && (uintptr_t) info.si_addr == lwp->stop_pc)
1559 {
1560 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1561 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp),
1562 (PTRACE_TYPE_ARG3) 0, &info);
1563 }
1564
1565 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1566 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1567 lwp->stop_pc = status.tpoint_addr;
1568
1569 /* Cancel any fast tracepoint lock this thread was
1570 holding. */
1571 force_unlock_trace_buffer ();
1572 }
1573
1574 if (lwp->exit_jump_pad_bkpt != NULL)
1575 {
1576 if (debug_threads)
1577 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1578 "stopping all threads momentarily.\n");
1579
1580 stop_all_lwps (1, lwp);
1581 cancel_breakpoints ();
1582
1583 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1584 lwp->exit_jump_pad_bkpt = NULL;
1585
1586 unstop_all_lwps (1, lwp);
1587
1588 gdb_assert (lwp->suspended >= 0);
1589 }
1590 }
1591 }
1592
1593 if (debug_threads)
1594 debug_printf ("Checking whether LWP %ld needs to move out of the "
1595 "jump pad...no\n",
1596 lwpid_of (lwp));
1597
1598 current_inferior = saved_inferior;
1599 return 0;
1600 }
1601
1602 /* Enqueue one signal in the "signals to report later when out of the
1603 jump pad" list. */
1604
1605 static void
1606 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1607 {
1608 struct pending_signals *p_sig;
1609
1610 if (debug_threads)
1611 debug_printf ("Deferring signal %d for LWP %ld.\n",
1612 WSTOPSIG (*wstat), lwpid_of (lwp));
1613
1614 if (debug_threads)
1615 {
1616 struct pending_signals *sig;
1617
1618 for (sig = lwp->pending_signals_to_report;
1619 sig != NULL;
1620 sig = sig->prev)
1621 debug_printf (" Already queued %d\n",
1622 sig->signal);
1623
1624 debug_printf (" (no more currently queued signals)\n");
1625 }
1626
1627 /* Don't enqueue non-RT signals if they are already in the deferred
1628 queue. (SIGSTOP being the easiest signal to see ending up here
1629 twice) */
1630 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1631 {
1632 struct pending_signals *sig;
1633
1634 for (sig = lwp->pending_signals_to_report;
1635 sig != NULL;
1636 sig = sig->prev)
1637 {
1638 if (sig->signal == WSTOPSIG (*wstat))
1639 {
1640 if (debug_threads)
1641 debug_printf ("Not requeuing already queued non-RT signal %d"
1642 " for LWP %ld\n",
1643 sig->signal,
1644 lwpid_of (lwp));
1645 return;
1646 }
1647 }
1648 }
1649
1650 p_sig = xmalloc (sizeof (*p_sig));
1651 p_sig->prev = lwp->pending_signals_to_report;
1652 p_sig->signal = WSTOPSIG (*wstat);
1653 memset (&p_sig->info, 0, sizeof (siginfo_t));
1654 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1655 &p_sig->info);
1656
1657 lwp->pending_signals_to_report = p_sig;
1658 }
1659
1660 /* Dequeue one signal from the "signals to report later when out of
1661 the jump pad" list. */
1662
1663 static int
1664 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1665 {
1666 if (lwp->pending_signals_to_report != NULL)
1667 {
1668 struct pending_signals **p_sig;
1669
1670 p_sig = &lwp->pending_signals_to_report;
1671 while ((*p_sig)->prev != NULL)
1672 p_sig = &(*p_sig)->prev;
1673
1674 *wstat = W_STOPCODE ((*p_sig)->signal);
1675 if ((*p_sig)->info.si_signo != 0)
1676 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
1677 &(*p_sig)->info);
1678 free (*p_sig);
1679 *p_sig = NULL;
1680
1681 if (debug_threads)
1682 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1683 WSTOPSIG (*wstat), lwpid_of (lwp));
1684
1685 if (debug_threads)
1686 {
1687 struct pending_signals *sig;
1688
1689 for (sig = lwp->pending_signals_to_report;
1690 sig != NULL;
1691 sig = sig->prev)
1692 debug_printf (" Still queued %d\n",
1693 sig->signal);
1694
1695 debug_printf (" (no more queued signals)\n");
1696 }
1697
1698 return 1;
1699 }
1700
1701 return 0;
1702 }
1703
1704 /* Arrange for a breakpoint to be hit again later. We don't keep the
1705 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1706 will handle the current event, eventually we will resume this LWP,
1707 and this breakpoint will trap again. */
1708
1709 static int
1710 cancel_breakpoint (struct lwp_info *lwp)
1711 {
1712 struct thread_info *saved_inferior;
1713
1714 /* There's nothing to do if we don't support breakpoints. */
1715 if (!supports_breakpoints ())
1716 return 0;
1717
1718 /* breakpoint_at reads from current inferior. */
1719 saved_inferior = current_inferior;
1720 current_inferior = get_lwp_thread (lwp);
1721
1722 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1723 {
1724 if (debug_threads)
1725 debug_printf ("CB: Push back breakpoint for %s\n",
1726 target_pid_to_str (ptid_of (lwp)));
1727
1728 /* Back up the PC if necessary. */
1729 if (the_low_target.decr_pc_after_break)
1730 {
1731 struct regcache *regcache
1732 = get_thread_regcache (current_inferior, 1);
1733 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1734 }
1735
1736 current_inferior = saved_inferior;
1737 return 1;
1738 }
1739 else
1740 {
1741 if (debug_threads)
1742 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1743 paddress (lwp->stop_pc),
1744 target_pid_to_str (ptid_of (lwp)));
1745 }
1746
1747 current_inferior = saved_inferior;
1748 return 0;
1749 }
1750
1751 /* When the event-loop is doing a step-over, this points at the thread
1752 being stepped. */
1753 ptid_t step_over_bkpt;
1754
1755 /* Wait for an event from child PID. If PID is -1, wait for any
1756 child. Store the stop status through the status pointer WSTAT.
1757 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1758 event was found and OPTIONS contains WNOHANG. Return the PID of
1759 the stopped child and update current_inferior otherwise. */
1760
1761 static int
1762 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1763 {
1764 struct lwp_info *event_child, *requested_child;
1765 ptid_t wait_ptid;
1766
1767 event_child = NULL;
1768 requested_child = NULL;
1769
1770 /* Check for a lwp with a pending status. */
1771
1772 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1773 {
1774 event_child = (struct lwp_info *)
1775 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1776 if (debug_threads && event_child)
1777 debug_printf ("Got a pending child %ld\n", lwpid_of (event_child));
1778 }
1779 else
1780 {
1781 requested_child = find_lwp_pid (ptid);
1782
1783 if (stopping_threads == NOT_STOPPING_THREADS
1784 && requested_child->status_pending_p
1785 && requested_child->collecting_fast_tracepoint)
1786 {
1787 enqueue_one_deferred_signal (requested_child,
1788 &requested_child->status_pending);
1789 requested_child->status_pending_p = 0;
1790 requested_child->status_pending = 0;
1791 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1792 }
1793
1794 if (requested_child->suspended
1795 && requested_child->status_pending_p)
1796 fatal ("requesting an event out of a suspended child?");
1797
1798 if (requested_child->status_pending_p)
1799 event_child = requested_child;
1800 }
1801
1802 if (event_child != NULL)
1803 {
1804 if (debug_threads)
1805 debug_printf ("Got an event from pending child %ld (%04x)\n",
1806 lwpid_of (event_child), event_child->status_pending);
1807 *wstat = event_child->status_pending;
1808 event_child->status_pending_p = 0;
1809 event_child->status_pending = 0;
1810 current_inferior = get_lwp_thread (event_child);
1811 return lwpid_of (event_child);
1812 }
1813
1814 if (ptid_is_pid (ptid))
1815 {
1816 /* A request to wait for a specific tgid. This is not possible
1817 with waitpid, so instead, we wait for any child, and leave
1818 children we're not interested in right now with a pending
1819 status to report later. */
1820 wait_ptid = minus_one_ptid;
1821 }
1822 else
1823 wait_ptid = ptid;
1824
1825 /* We only enter this loop if no process has a pending wait status. Thus
1826 any action taken in response to a wait status inside this loop is
1827 responding as soon as we detect the status, not after any pending
1828 events. */
1829 while (1)
1830 {
1831 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1832
1833 if ((options & WNOHANG) && event_child == NULL)
1834 {
1835 if (debug_threads)
1836 debug_printf ("WNOHANG set, no event found\n");
1837 return 0;
1838 }
1839
1840 if (event_child == NULL)
1841 error ("event from unknown child");
1842
1843 if (ptid_is_pid (ptid)
1844 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1845 {
1846 if (! WIFSTOPPED (*wstat))
1847 mark_lwp_dead (event_child, *wstat);
1848 else
1849 {
1850 event_child->status_pending_p = 1;
1851 event_child->status_pending = *wstat;
1852 }
1853 continue;
1854 }
1855
1856 current_inferior = get_lwp_thread (event_child);
1857
1858 /* Check for thread exit. */
1859 if (! WIFSTOPPED (*wstat))
1860 {
1861 if (debug_threads)
1862 debug_printf ("LWP %ld exiting\n", lwpid_of (event_child));
1863
1864 /* If the last thread is exiting, just return. */
1865 if (last_thread_of_process_p (current_inferior))
1866 {
1867 if (debug_threads)
1868 debug_printf ("LWP %ld is last lwp of process\n",
1869 lwpid_of (event_child));
1870 return lwpid_of (event_child);
1871 }
1872
1873 if (!non_stop)
1874 {
1875 current_inferior = get_first_thread ();
1876 if (debug_threads)
1877 debug_printf ("Current inferior is now %ld\n",
1878 lwpid_of (get_thread_lwp (current_inferior)));
1879 }
1880 else
1881 {
1882 current_inferior = NULL;
1883 if (debug_threads)
1884 debug_printf ("Current inferior is now <NULL>\n");
1885 }
1886
1887 /* If we were waiting for this particular child to do something...
1888 well, it did something. */
1889 if (requested_child != NULL)
1890 {
1891 int lwpid = lwpid_of (event_child);
1892
1893 /* Cancel the step-over operation --- the thread that
1894 started it is gone. */
1895 if (finish_step_over (event_child))
1896 unstop_all_lwps (1, event_child);
1897 delete_lwp (event_child);
1898 return lwpid;
1899 }
1900
1901 delete_lwp (event_child);
1902
1903 /* Wait for a more interesting event. */
1904 continue;
1905 }
1906
1907 if (event_child->must_set_ptrace_flags)
1908 {
1909 linux_enable_event_reporting (lwpid_of (event_child));
1910 event_child->must_set_ptrace_flags = 0;
1911 }
1912
1913 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1914 && *wstat >> 16 != 0)
1915 {
1916 handle_extended_wait (event_child, *wstat);
1917 continue;
1918 }
1919
1920 if (WIFSTOPPED (*wstat)
1921 && WSTOPSIG (*wstat) == SIGSTOP
1922 && event_child->stop_expected)
1923 {
1924 int should_stop;
1925
1926 if (debug_threads)
1927 debug_printf ("Expected stop.\n");
1928 event_child->stop_expected = 0;
1929
1930 should_stop = (current_inferior->last_resume_kind == resume_stop
1931 || stopping_threads != NOT_STOPPING_THREADS);
1932
1933 if (!should_stop)
1934 {
1935 linux_resume_one_lwp (event_child,
1936 event_child->stepping, 0, NULL);
1937 continue;
1938 }
1939 }
1940
1941 return lwpid_of (event_child);
1942 }
1943
1944 /* NOTREACHED */
1945 return 0;
1946 }
1947
1948 /* Count the LWP's that have had events. */
1949
1950 static int
1951 count_events_callback (struct inferior_list_entry *entry, void *data)
1952 {
1953 struct lwp_info *lp = (struct lwp_info *) entry;
1954 struct thread_info *thread = get_lwp_thread (lp);
1955 int *count = data;
1956
1957 gdb_assert (count != NULL);
1958
1959 /* Count only resumed LWPs that have a SIGTRAP event pending that
1960 should be reported to GDB. */
1961 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1962 && thread->last_resume_kind != resume_stop
1963 && lp->status_pending_p
1964 && WIFSTOPPED (lp->status_pending)
1965 && WSTOPSIG (lp->status_pending) == SIGTRAP
1966 && !breakpoint_inserted_here (lp->stop_pc))
1967 (*count)++;
1968
1969 return 0;
1970 }
1971
1972 /* Select the LWP (if any) that is currently being single-stepped. */
1973
1974 static int
1975 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1976 {
1977 struct lwp_info *lp = (struct lwp_info *) entry;
1978 struct thread_info *thread = get_lwp_thread (lp);
1979
1980 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1981 && thread->last_resume_kind == resume_step
1982 && lp->status_pending_p)
1983 return 1;
1984 else
1985 return 0;
1986 }
1987
1988 /* Select the Nth LWP that has had a SIGTRAP event that should be
1989 reported to GDB. */
1990
1991 static int
1992 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1993 {
1994 struct lwp_info *lp = (struct lwp_info *) entry;
1995 struct thread_info *thread = get_lwp_thread (lp);
1996 int *selector = data;
1997
1998 gdb_assert (selector != NULL);
1999
2000 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2001 if (thread->last_resume_kind != resume_stop
2002 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2003 && lp->status_pending_p
2004 && WIFSTOPPED (lp->status_pending)
2005 && WSTOPSIG (lp->status_pending) == SIGTRAP
2006 && !breakpoint_inserted_here (lp->stop_pc))
2007 if ((*selector)-- == 0)
2008 return 1;
2009
2010 return 0;
2011 }
2012
2013 static int
2014 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2015 {
2016 struct lwp_info *lp = (struct lwp_info *) entry;
2017 struct thread_info *thread = get_lwp_thread (lp);
2018 struct lwp_info *event_lp = data;
2019
2020 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2021 if (lp == event_lp)
2022 return 0;
2023
2024 /* If a LWP other than the LWP that we're reporting an event for has
2025 hit a GDB breakpoint (as opposed to some random trap signal),
2026 then just arrange for it to hit it again later. We don't keep
2027 the SIGTRAP status and don't forward the SIGTRAP signal to the
2028 LWP. We will handle the current event, eventually we will resume
2029 all LWPs, and this one will get its breakpoint trap again.
2030
2031 If we do not do this, then we run the risk that the user will
2032 delete or disable the breakpoint, but the LWP will have already
2033 tripped on it. */
2034
2035 if (thread->last_resume_kind != resume_stop
2036 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2037 && lp->status_pending_p
2038 && WIFSTOPPED (lp->status_pending)
2039 && WSTOPSIG (lp->status_pending) == SIGTRAP
2040 && !lp->stepping
2041 && !lp->stopped_by_watchpoint
2042 && cancel_breakpoint (lp))
2043 /* Throw away the SIGTRAP. */
2044 lp->status_pending_p = 0;
2045
2046 return 0;
2047 }
2048
2049 static void
2050 linux_cancel_breakpoints (void)
2051 {
2052 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2053 }
2054
2055 /* Select one LWP out of those that have events pending. */
2056
2057 static void
2058 select_event_lwp (struct lwp_info **orig_lp)
2059 {
2060 int num_events = 0;
2061 int random_selector;
2062 struct lwp_info *event_lp;
2063
2064 /* Give preference to any LWP that is being single-stepped. */
2065 event_lp
2066 = (struct lwp_info *) find_inferior (&all_lwps,
2067 select_singlestep_lwp_callback, NULL);
2068 if (event_lp != NULL)
2069 {
2070 if (debug_threads)
2071 debug_printf ("SEL: Select single-step %s\n",
2072 target_pid_to_str (ptid_of (event_lp)));
2073 }
2074 else
2075 {
2076 /* No single-stepping LWP. Select one at random, out of those
2077 which have had SIGTRAP events. */
2078
2079 /* First see how many SIGTRAP events we have. */
2080 find_inferior (&all_lwps, count_events_callback, &num_events);
2081
2082 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2083 random_selector = (int)
2084 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2085
2086 if (debug_threads && num_events > 1)
2087 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2088 num_events, random_selector);
2089
2090 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2091 select_event_lwp_callback,
2092 &random_selector);
2093 }
2094
2095 if (event_lp != NULL)
2096 {
2097 /* Switch the event LWP. */
2098 *orig_lp = event_lp;
2099 }
2100 }
2101
2102 /* Decrement the suspend count of an LWP. */
2103
2104 static int
2105 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2106 {
2107 struct lwp_info *lwp = (struct lwp_info *) entry;
2108
2109 /* Ignore EXCEPT. */
2110 if (lwp == except)
2111 return 0;
2112
2113 lwp->suspended--;
2114
2115 gdb_assert (lwp->suspended >= 0);
2116 return 0;
2117 }
2118
2119 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2120 NULL. */
2121
2122 static void
2123 unsuspend_all_lwps (struct lwp_info *except)
2124 {
2125 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2126 }
2127
2128 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2129 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2130 void *data);
2131 static int lwp_running (struct inferior_list_entry *entry, void *data);
2132 static ptid_t linux_wait_1 (ptid_t ptid,
2133 struct target_waitstatus *ourstatus,
2134 int target_options);
2135
2136 /* Stabilize threads (move out of jump pads).
2137
2138 If a thread is midway collecting a fast tracepoint, we need to
2139 finish the collection and move it out of the jump pad before
2140 reporting the signal.
2141
2142 This avoids recursion while collecting (when a signal arrives
2143 midway, and the signal handler itself collects), which would trash
2144 the trace buffer. In case the user set a breakpoint in a signal
2145 handler, this avoids the backtrace showing the jump pad, etc..
2146 Most importantly, there are certain things we can't do safely if
2147 threads are stopped in a jump pad (or in its callee's). For
2148 example:
2149
2150 - starting a new trace run. A thread still collecting the
2151 previous run, could trash the trace buffer when resumed. The trace
2152 buffer control structures would have been reset but the thread had
2153 no way to tell. The thread could even midway memcpy'ing to the
2154 buffer, which would mean that when resumed, it would clobber the
2155 trace buffer that had been set for a new run.
2156
2157 - we can't rewrite/reuse the jump pads for new tracepoints
2158 safely. Say you do tstart while a thread is stopped midway while
2159 collecting. When the thread is later resumed, it finishes the
2160 collection, and returns to the jump pad, to execute the original
2161 instruction that was under the tracepoint jump at the time the
2162 older run had been started. If the jump pad had been rewritten
2163 since for something else in the new run, the thread would now
2164 execute the wrong / random instructions. */
2165
2166 static void
2167 linux_stabilize_threads (void)
2168 {
2169 struct thread_info *save_inferior;
2170 struct lwp_info *lwp_stuck;
2171
2172 lwp_stuck
2173 = (struct lwp_info *) find_inferior (&all_lwps,
2174 stuck_in_jump_pad_callback, NULL);
2175 if (lwp_stuck != NULL)
2176 {
2177 if (debug_threads)
2178 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2179 lwpid_of (lwp_stuck));
2180 return;
2181 }
2182
2183 save_inferior = current_inferior;
2184
2185 stabilizing_threads = 1;
2186
2187 /* Kick 'em all. */
2188 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2189
2190 /* Loop until all are stopped out of the jump pads. */
2191 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2192 {
2193 struct target_waitstatus ourstatus;
2194 struct lwp_info *lwp;
2195 int wstat;
2196
2197 /* Note that we go through the full wait even loop. While
2198 moving threads out of jump pad, we need to be able to step
2199 over internal breakpoints and such. */
2200 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2201
2202 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2203 {
2204 lwp = get_thread_lwp (current_inferior);
2205
2206 /* Lock it. */
2207 lwp->suspended++;
2208
2209 if (ourstatus.value.sig != GDB_SIGNAL_0
2210 || current_inferior->last_resume_kind == resume_stop)
2211 {
2212 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2213 enqueue_one_deferred_signal (lwp, &wstat);
2214 }
2215 }
2216 }
2217
2218 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2219
2220 stabilizing_threads = 0;
2221
2222 current_inferior = save_inferior;
2223
2224 if (debug_threads)
2225 {
2226 lwp_stuck
2227 = (struct lwp_info *) find_inferior (&all_lwps,
2228 stuck_in_jump_pad_callback, NULL);
2229 if (lwp_stuck != NULL)
2230 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2231 lwpid_of (lwp_stuck));
2232 }
2233 }
2234
2235 /* Wait for process, returns status. */
2236
2237 static ptid_t
2238 linux_wait_1 (ptid_t ptid,
2239 struct target_waitstatus *ourstatus, int target_options)
2240 {
2241 int w;
2242 struct lwp_info *event_child;
2243 int options;
2244 int pid;
2245 int step_over_finished;
2246 int bp_explains_trap;
2247 int maybe_internal_trap;
2248 int report_to_gdb;
2249 int trace_event;
2250 int in_step_range;
2251
2252 if (debug_threads)
2253 {
2254 debug_enter ();
2255 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2256 }
2257
2258 /* Translate generic target options into linux options. */
2259 options = __WALL;
2260 if (target_options & TARGET_WNOHANG)
2261 options |= WNOHANG;
2262
2263 retry:
2264 bp_explains_trap = 0;
2265 trace_event = 0;
2266 in_step_range = 0;
2267 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2268
2269 /* If we were only supposed to resume one thread, only wait for
2270 that thread - if it's still alive. If it died, however - which
2271 can happen if we're coming from the thread death case below -
2272 then we need to make sure we restart the other threads. We could
2273 pick a thread at random or restart all; restarting all is less
2274 arbitrary. */
2275 if (!non_stop
2276 && !ptid_equal (cont_thread, null_ptid)
2277 && !ptid_equal (cont_thread, minus_one_ptid))
2278 {
2279 struct thread_info *thread;
2280
2281 thread = (struct thread_info *) find_inferior_id (&all_threads,
2282 cont_thread);
2283
2284 /* No stepping, no signal - unless one is pending already, of course. */
2285 if (thread == NULL)
2286 {
2287 struct thread_resume resume_info;
2288 resume_info.thread = minus_one_ptid;
2289 resume_info.kind = resume_continue;
2290 resume_info.sig = 0;
2291 linux_resume (&resume_info, 1);
2292 }
2293 else
2294 ptid = cont_thread;
2295 }
2296
2297 if (ptid_equal (step_over_bkpt, null_ptid))
2298 pid = linux_wait_for_event (ptid, &w, options);
2299 else
2300 {
2301 if (debug_threads)
2302 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2303 target_pid_to_str (step_over_bkpt));
2304 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2305 }
2306
2307 if (pid == 0) /* only if TARGET_WNOHANG */
2308 {
2309 if (debug_threads)
2310 {
2311 debug_printf ("linux_wait_1 ret = null_ptid\n");
2312 debug_exit ();
2313 }
2314 return null_ptid;
2315 }
2316
2317 event_child = get_thread_lwp (current_inferior);
2318
2319 /* If we are waiting for a particular child, and it exited,
2320 linux_wait_for_event will return its exit status. Similarly if
2321 the last child exited. If this is not the last child, however,
2322 do not report it as exited until there is a 'thread exited' response
2323 available in the remote protocol. Instead, just wait for another event.
2324 This should be safe, because if the thread crashed we will already
2325 have reported the termination signal to GDB; that should stop any
2326 in-progress stepping operations, etc.
2327
2328 Report the exit status of the last thread to exit. This matches
2329 LinuxThreads' behavior. */
2330
2331 if (last_thread_of_process_p (current_inferior))
2332 {
2333 if (WIFEXITED (w) || WIFSIGNALED (w))
2334 {
2335 if (WIFEXITED (w))
2336 {
2337 ourstatus->kind = TARGET_WAITKIND_EXITED;
2338 ourstatus->value.integer = WEXITSTATUS (w);
2339
2340 if (debug_threads)
2341 {
2342 debug_printf ("linux_wait_1 ret = %s, exited with "
2343 "retcode %d\n",
2344 target_pid_to_str (ptid_of (event_child)),
2345 WEXITSTATUS (w));
2346 debug_exit ();
2347 }
2348 }
2349 else
2350 {
2351 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2352 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2353
2354 if (debug_threads)
2355 {
2356 debug_printf ("linux_wait_1 ret = %s, terminated with "
2357 "signal %d\n",
2358 target_pid_to_str (ptid_of (event_child)),
2359 WTERMSIG (w));
2360 debug_exit ();
2361 }
2362 }
2363
2364 return ptid_of (event_child);
2365 }
2366 }
2367 else
2368 {
2369 if (!WIFSTOPPED (w))
2370 goto retry;
2371 }
2372
2373 /* If this event was not handled before, and is not a SIGTRAP, we
2374 report it. SIGILL and SIGSEGV are also treated as traps in case
2375 a breakpoint is inserted at the current PC. If this target does
2376 not support internal breakpoints at all, we also report the
2377 SIGTRAP without further processing; it's of no concern to us. */
2378 maybe_internal_trap
2379 = (supports_breakpoints ()
2380 && (WSTOPSIG (w) == SIGTRAP
2381 || ((WSTOPSIG (w) == SIGILL
2382 || WSTOPSIG (w) == SIGSEGV)
2383 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2384
2385 if (maybe_internal_trap)
2386 {
2387 /* Handle anything that requires bookkeeping before deciding to
2388 report the event or continue waiting. */
2389
2390 /* First check if we can explain the SIGTRAP with an internal
2391 breakpoint, or if we should possibly report the event to GDB.
2392 Do this before anything that may remove or insert a
2393 breakpoint. */
2394 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2395
2396 /* We have a SIGTRAP, possibly a step-over dance has just
2397 finished. If so, tweak the state machine accordingly,
2398 reinsert breakpoints and delete any reinsert (software
2399 single-step) breakpoints. */
2400 step_over_finished = finish_step_over (event_child);
2401
2402 /* Now invoke the callbacks of any internal breakpoints there. */
2403 check_breakpoints (event_child->stop_pc);
2404
2405 /* Handle tracepoint data collecting. This may overflow the
2406 trace buffer, and cause a tracing stop, removing
2407 breakpoints. */
2408 trace_event = handle_tracepoints (event_child);
2409
2410 if (bp_explains_trap)
2411 {
2412 /* If we stepped or ran into an internal breakpoint, we've
2413 already handled it. So next time we resume (from this
2414 PC), we should step over it. */
2415 if (debug_threads)
2416 debug_printf ("Hit a gdbserver breakpoint.\n");
2417
2418 if (breakpoint_here (event_child->stop_pc))
2419 event_child->need_step_over = 1;
2420 }
2421 }
2422 else
2423 {
2424 /* We have some other signal, possibly a step-over dance was in
2425 progress, and it should be cancelled too. */
2426 step_over_finished = finish_step_over (event_child);
2427 }
2428
2429 /* We have all the data we need. Either report the event to GDB, or
2430 resume threads and keep waiting for more. */
2431
2432 /* If we're collecting a fast tracepoint, finish the collection and
2433 move out of the jump pad before delivering a signal. See
2434 linux_stabilize_threads. */
2435
2436 if (WIFSTOPPED (w)
2437 && WSTOPSIG (w) != SIGTRAP
2438 && supports_fast_tracepoints ()
2439 && agent_loaded_p ())
2440 {
2441 if (debug_threads)
2442 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2443 "to defer or adjust it.\n",
2444 WSTOPSIG (w), lwpid_of (event_child));
2445
2446 /* Allow debugging the jump pad itself. */
2447 if (current_inferior->last_resume_kind != resume_step
2448 && maybe_move_out_of_jump_pad (event_child, &w))
2449 {
2450 enqueue_one_deferred_signal (event_child, &w);
2451
2452 if (debug_threads)
2453 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2454 WSTOPSIG (w), lwpid_of (event_child));
2455
2456 linux_resume_one_lwp (event_child, 0, 0, NULL);
2457 goto retry;
2458 }
2459 }
2460
2461 if (event_child->collecting_fast_tracepoint)
2462 {
2463 if (debug_threads)
2464 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2465 "Check if we're already there.\n",
2466 lwpid_of (event_child),
2467 event_child->collecting_fast_tracepoint);
2468
2469 trace_event = 1;
2470
2471 event_child->collecting_fast_tracepoint
2472 = linux_fast_tracepoint_collecting (event_child, NULL);
2473
2474 if (event_child->collecting_fast_tracepoint != 1)
2475 {
2476 /* No longer need this breakpoint. */
2477 if (event_child->exit_jump_pad_bkpt != NULL)
2478 {
2479 if (debug_threads)
2480 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2481 "stopping all threads momentarily.\n");
2482
2483 /* Other running threads could hit this breakpoint.
2484 We don't handle moribund locations like GDB does,
2485 instead we always pause all threads when removing
2486 breakpoints, so that any step-over or
2487 decr_pc_after_break adjustment is always taken
2488 care of while the breakpoint is still
2489 inserted. */
2490 stop_all_lwps (1, event_child);
2491 cancel_breakpoints ();
2492
2493 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2494 event_child->exit_jump_pad_bkpt = NULL;
2495
2496 unstop_all_lwps (1, event_child);
2497
2498 gdb_assert (event_child->suspended >= 0);
2499 }
2500 }
2501
2502 if (event_child->collecting_fast_tracepoint == 0)
2503 {
2504 if (debug_threads)
2505 debug_printf ("fast tracepoint finished "
2506 "collecting successfully.\n");
2507
2508 /* We may have a deferred signal to report. */
2509 if (dequeue_one_deferred_signal (event_child, &w))
2510 {
2511 if (debug_threads)
2512 debug_printf ("dequeued one signal.\n");
2513 }
2514 else
2515 {
2516 if (debug_threads)
2517 debug_printf ("no deferred signals.\n");
2518
2519 if (stabilizing_threads)
2520 {
2521 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2522 ourstatus->value.sig = GDB_SIGNAL_0;
2523
2524 if (debug_threads)
2525 {
2526 debug_printf ("linux_wait_1 ret = %s, stopped "
2527 "while stabilizing threads\n",
2528 target_pid_to_str (ptid_of (event_child)));
2529 debug_exit ();
2530 }
2531
2532 return ptid_of (event_child);
2533 }
2534 }
2535 }
2536 }
2537
2538 /* Check whether GDB would be interested in this event. */
2539
2540 /* If GDB is not interested in this signal, don't stop other
2541 threads, and don't report it to GDB. Just resume the inferior
2542 right away. We do this for threading-related signals as well as
2543 any that GDB specifically requested we ignore. But never ignore
2544 SIGSTOP if we sent it ourselves, and do not ignore signals when
2545 stepping - they may require special handling to skip the signal
2546 handler. */
2547 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2548 thread library? */
2549 if (WIFSTOPPED (w)
2550 && current_inferior->last_resume_kind != resume_step
2551 && (
2552 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2553 (current_process ()->private->thread_db != NULL
2554 && (WSTOPSIG (w) == __SIGRTMIN
2555 || WSTOPSIG (w) == __SIGRTMIN + 1))
2556 ||
2557 #endif
2558 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2559 && !(WSTOPSIG (w) == SIGSTOP
2560 && current_inferior->last_resume_kind == resume_stop))))
2561 {
2562 siginfo_t info, *info_p;
2563
2564 if (debug_threads)
2565 debug_printf ("Ignored signal %d for LWP %ld.\n",
2566 WSTOPSIG (w), lwpid_of (event_child));
2567
2568 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child),
2569 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2570 info_p = &info;
2571 else
2572 info_p = NULL;
2573 linux_resume_one_lwp (event_child, event_child->stepping,
2574 WSTOPSIG (w), info_p);
2575 goto retry;
2576 }
2577
2578 /* Note that all addresses are always "out of the step range" when
2579 there's no range to begin with. */
2580 in_step_range = lwp_in_step_range (event_child);
2581
2582 /* If GDB wanted this thread to single step, and the thread is out
2583 of the step range, we always want to report the SIGTRAP, and let
2584 GDB handle it. Watchpoints should always be reported. So should
2585 signals we can't explain. A SIGTRAP we can't explain could be a
2586 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2587 do, we're be able to handle GDB breakpoints on top of internal
2588 breakpoints, by handling the internal breakpoint and still
2589 reporting the event to GDB. If we don't, we're out of luck, GDB
2590 won't see the breakpoint hit. */
2591 report_to_gdb = (!maybe_internal_trap
2592 || (current_inferior->last_resume_kind == resume_step
2593 && !in_step_range)
2594 || event_child->stopped_by_watchpoint
2595 || (!step_over_finished && !in_step_range
2596 && !bp_explains_trap && !trace_event)
2597 || (gdb_breakpoint_here (event_child->stop_pc)
2598 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2599 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2600
2601 run_breakpoint_commands (event_child->stop_pc);
2602
2603 /* We found no reason GDB would want us to stop. We either hit one
2604 of our own breakpoints, or finished an internal step GDB
2605 shouldn't know about. */
2606 if (!report_to_gdb)
2607 {
2608 if (debug_threads)
2609 {
2610 if (bp_explains_trap)
2611 debug_printf ("Hit a gdbserver breakpoint.\n");
2612 if (step_over_finished)
2613 debug_printf ("Step-over finished.\n");
2614 if (trace_event)
2615 debug_printf ("Tracepoint event.\n");
2616 if (lwp_in_step_range (event_child))
2617 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2618 paddress (event_child->stop_pc),
2619 paddress (event_child->step_range_start),
2620 paddress (event_child->step_range_end));
2621 }
2622
2623 /* We're not reporting this breakpoint to GDB, so apply the
2624 decr_pc_after_break adjustment to the inferior's regcache
2625 ourselves. */
2626
2627 if (the_low_target.set_pc != NULL)
2628 {
2629 struct regcache *regcache
2630 = get_thread_regcache (get_lwp_thread (event_child), 1);
2631 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2632 }
2633
2634 /* We may have finished stepping over a breakpoint. If so,
2635 we've stopped and suspended all LWPs momentarily except the
2636 stepping one. This is where we resume them all again. We're
2637 going to keep waiting, so use proceed, which handles stepping
2638 over the next breakpoint. */
2639 if (debug_threads)
2640 debug_printf ("proceeding all threads.\n");
2641
2642 if (step_over_finished)
2643 unsuspend_all_lwps (event_child);
2644
2645 proceed_all_lwps ();
2646 goto retry;
2647 }
2648
2649 if (debug_threads)
2650 {
2651 if (current_inferior->last_resume_kind == resume_step)
2652 {
2653 if (event_child->step_range_start == event_child->step_range_end)
2654 debug_printf ("GDB wanted to single-step, reporting event.\n");
2655 else if (!lwp_in_step_range (event_child))
2656 debug_printf ("Out of step range, reporting event.\n");
2657 }
2658 if (event_child->stopped_by_watchpoint)
2659 debug_printf ("Stopped by watchpoint.\n");
2660 if (gdb_breakpoint_here (event_child->stop_pc))
2661 debug_printf ("Stopped by GDB breakpoint.\n");
2662 if (debug_threads)
2663 debug_printf ("Hit a non-gdbserver trap event.\n");
2664 }
2665
2666 /* Alright, we're going to report a stop. */
2667
2668 if (!non_stop && !stabilizing_threads)
2669 {
2670 /* In all-stop, stop all threads. */
2671 stop_all_lwps (0, NULL);
2672
2673 /* If we're not waiting for a specific LWP, choose an event LWP
2674 from among those that have had events. Giving equal priority
2675 to all LWPs that have had events helps prevent
2676 starvation. */
2677 if (ptid_equal (ptid, minus_one_ptid))
2678 {
2679 event_child->status_pending_p = 1;
2680 event_child->status_pending = w;
2681
2682 select_event_lwp (&event_child);
2683
2684 /* current_inferior and event_child must stay in sync. */
2685 current_inferior = get_lwp_thread (event_child);
2686
2687 event_child->status_pending_p = 0;
2688 w = event_child->status_pending;
2689 }
2690
2691 /* Now that we've selected our final event LWP, cancel any
2692 breakpoints in other LWPs that have hit a GDB breakpoint.
2693 See the comment in cancel_breakpoints_callback to find out
2694 why. */
2695 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2696
2697 /* If we were going a step-over, all other threads but the stepping one
2698 had been paused in start_step_over, with their suspend counts
2699 incremented. We don't want to do a full unstop/unpause, because we're
2700 in all-stop mode (so we want threads stopped), but we still need to
2701 unsuspend the other threads, to decrement their `suspended' count
2702 back. */
2703 if (step_over_finished)
2704 unsuspend_all_lwps (event_child);
2705
2706 /* Stabilize threads (move out of jump pads). */
2707 stabilize_threads ();
2708 }
2709 else
2710 {
2711 /* If we just finished a step-over, then all threads had been
2712 momentarily paused. In all-stop, that's fine, we want
2713 threads stopped by now anyway. In non-stop, we need to
2714 re-resume threads that GDB wanted to be running. */
2715 if (step_over_finished)
2716 unstop_all_lwps (1, event_child);
2717 }
2718
2719 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2720
2721 if (current_inferior->last_resume_kind == resume_stop
2722 && WSTOPSIG (w) == SIGSTOP)
2723 {
2724 /* A thread that has been requested to stop by GDB with vCont;t,
2725 and it stopped cleanly, so report as SIG0. The use of
2726 SIGSTOP is an implementation detail. */
2727 ourstatus->value.sig = GDB_SIGNAL_0;
2728 }
2729 else if (current_inferior->last_resume_kind == resume_stop
2730 && WSTOPSIG (w) != SIGSTOP)
2731 {
2732 /* A thread that has been requested to stop by GDB with vCont;t,
2733 but, it stopped for other reasons. */
2734 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2735 }
2736 else
2737 {
2738 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2739 }
2740
2741 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2742
2743 if (debug_threads)
2744 {
2745 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2746 target_pid_to_str (ptid_of (event_child)),
2747 ourstatus->kind, ourstatus->value.sig);
2748 debug_exit ();
2749 }
2750
2751 return ptid_of (event_child);
2752 }
2753
2754 /* Get rid of any pending event in the pipe. */
2755 static void
2756 async_file_flush (void)
2757 {
2758 int ret;
2759 char buf;
2760
2761 do
2762 ret = read (linux_event_pipe[0], &buf, 1);
2763 while (ret >= 0 || (ret == -1 && errno == EINTR));
2764 }
2765
2766 /* Put something in the pipe, so the event loop wakes up. */
2767 static void
2768 async_file_mark (void)
2769 {
2770 int ret;
2771
2772 async_file_flush ();
2773
2774 do
2775 ret = write (linux_event_pipe[1], "+", 1);
2776 while (ret == 0 || (ret == -1 && errno == EINTR));
2777
2778 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2779 be awakened anyway. */
2780 }
2781
2782 static ptid_t
2783 linux_wait (ptid_t ptid,
2784 struct target_waitstatus *ourstatus, int target_options)
2785 {
2786 ptid_t event_ptid;
2787
2788 /* Flush the async file first. */
2789 if (target_is_async_p ())
2790 async_file_flush ();
2791
2792 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2793
2794 /* If at least one stop was reported, there may be more. A single
2795 SIGCHLD can signal more than one child stop. */
2796 if (target_is_async_p ()
2797 && (target_options & TARGET_WNOHANG) != 0
2798 && !ptid_equal (event_ptid, null_ptid))
2799 async_file_mark ();
2800
2801 return event_ptid;
2802 }
2803
2804 /* Send a signal to an LWP. */
2805
2806 static int
2807 kill_lwp (unsigned long lwpid, int signo)
2808 {
2809 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2810 fails, then we are not using nptl threads and we should be using kill. */
2811
2812 #ifdef __NR_tkill
2813 {
2814 static int tkill_failed;
2815
2816 if (!tkill_failed)
2817 {
2818 int ret;
2819
2820 errno = 0;
2821 ret = syscall (__NR_tkill, lwpid, signo);
2822 if (errno != ENOSYS)
2823 return ret;
2824 tkill_failed = 1;
2825 }
2826 }
2827 #endif
2828
2829 return kill (lwpid, signo);
2830 }
2831
2832 void
2833 linux_stop_lwp (struct lwp_info *lwp)
2834 {
2835 send_sigstop (lwp);
2836 }
2837
2838 static void
2839 send_sigstop (struct lwp_info *lwp)
2840 {
2841 int pid;
2842
2843 pid = lwpid_of (lwp);
2844
2845 /* If we already have a pending stop signal for this process, don't
2846 send another. */
2847 if (lwp->stop_expected)
2848 {
2849 if (debug_threads)
2850 debug_printf ("Have pending sigstop for lwp %d\n", pid);
2851
2852 return;
2853 }
2854
2855 if (debug_threads)
2856 debug_printf ("Sending sigstop to lwp %d\n", pid);
2857
2858 lwp->stop_expected = 1;
2859 kill_lwp (pid, SIGSTOP);
2860 }
2861
2862 static int
2863 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2864 {
2865 struct lwp_info *lwp = (struct lwp_info *) entry;
2866
2867 /* Ignore EXCEPT. */
2868 if (lwp == except)
2869 return 0;
2870
2871 if (lwp->stopped)
2872 return 0;
2873
2874 send_sigstop (lwp);
2875 return 0;
2876 }
2877
2878 /* Increment the suspend count of an LWP, and stop it, if not stopped
2879 yet. */
2880 static int
2881 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2882 void *except)
2883 {
2884 struct lwp_info *lwp = (struct lwp_info *) entry;
2885
2886 /* Ignore EXCEPT. */
2887 if (lwp == except)
2888 return 0;
2889
2890 lwp->suspended++;
2891
2892 return send_sigstop_callback (entry, except);
2893 }
2894
2895 static void
2896 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2897 {
2898 /* It's dead, really. */
2899 lwp->dead = 1;
2900
2901 /* Store the exit status for later. */
2902 lwp->status_pending_p = 1;
2903 lwp->status_pending = wstat;
2904
2905 /* Prevent trying to stop it. */
2906 lwp->stopped = 1;
2907
2908 /* No further stops are expected from a dead lwp. */
2909 lwp->stop_expected = 0;
2910 }
2911
2912 static void
2913 wait_for_sigstop (struct inferior_list_entry *entry)
2914 {
2915 struct lwp_info *lwp = (struct lwp_info *) entry;
2916 struct thread_info *saved_inferior;
2917 int wstat;
2918 ptid_t saved_tid;
2919 ptid_t ptid;
2920 int pid;
2921
2922 if (lwp->stopped)
2923 {
2924 if (debug_threads)
2925 debug_printf ("wait_for_sigstop: LWP %ld already stopped\n",
2926 lwpid_of (lwp));
2927 return;
2928 }
2929
2930 saved_inferior = current_inferior;
2931 if (saved_inferior != NULL)
2932 saved_tid = saved_inferior->entry.id;
2933 else
2934 saved_tid = null_ptid; /* avoid bogus unused warning */
2935
2936 ptid = lwp->entry.id;
2937
2938 if (debug_threads)
2939 debug_printf ("wait_for_sigstop: pulling one event\n");
2940
2941 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2942
2943 /* If we stopped with a non-SIGSTOP signal, save it for later
2944 and record the pending SIGSTOP. If the process exited, just
2945 return. */
2946 if (WIFSTOPPED (wstat))
2947 {
2948 if (debug_threads)
2949 debug_printf ("LWP %ld stopped with signal %d\n",
2950 lwpid_of (lwp), WSTOPSIG (wstat));
2951
2952 if (WSTOPSIG (wstat) != SIGSTOP)
2953 {
2954 if (debug_threads)
2955 debug_printf ("LWP %ld stopped with non-sigstop status %06x\n",
2956 lwpid_of (lwp), wstat);
2957
2958 lwp->status_pending_p = 1;
2959 lwp->status_pending = wstat;
2960 }
2961 }
2962 else
2963 {
2964 if (debug_threads)
2965 debug_printf ("Process %d exited while stopping LWPs\n", pid);
2966
2967 lwp = find_lwp_pid (pid_to_ptid (pid));
2968 if (lwp)
2969 {
2970 /* Leave this status pending for the next time we're able to
2971 report it. In the mean time, we'll report this lwp as
2972 dead to GDB, so GDB doesn't try to read registers and
2973 memory from it. This can only happen if this was the
2974 last thread of the process; otherwise, PID is removed
2975 from the thread tables before linux_wait_for_event
2976 returns. */
2977 mark_lwp_dead (lwp, wstat);
2978 }
2979 }
2980
2981 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2982 current_inferior = saved_inferior;
2983 else
2984 {
2985 if (debug_threads)
2986 debug_printf ("Previously current thread died.\n");
2987
2988 if (non_stop)
2989 {
2990 /* We can't change the current inferior behind GDB's back,
2991 otherwise, a subsequent command may apply to the wrong
2992 process. */
2993 current_inferior = NULL;
2994 }
2995 else
2996 {
2997 /* Set a valid thread as current. */
2998 set_desired_inferior (0);
2999 }
3000 }
3001 }
3002
3003 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3004 move it out, because we need to report the stop event to GDB. For
3005 example, if the user puts a breakpoint in the jump pad, it's
3006 because she wants to debug it. */
3007
3008 static int
3009 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3010 {
3011 struct lwp_info *lwp = (struct lwp_info *) entry;
3012 struct thread_info *thread = get_lwp_thread (lwp);
3013
3014 gdb_assert (lwp->suspended == 0);
3015 gdb_assert (lwp->stopped);
3016
3017 /* Allow debugging the jump pad, gdb_collect, etc.. */
3018 return (supports_fast_tracepoints ()
3019 && agent_loaded_p ()
3020 && (gdb_breakpoint_here (lwp->stop_pc)
3021 || lwp->stopped_by_watchpoint
3022 || thread->last_resume_kind == resume_step)
3023 && linux_fast_tracepoint_collecting (lwp, NULL));
3024 }
3025
3026 static void
3027 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3028 {
3029 struct lwp_info *lwp = (struct lwp_info *) entry;
3030 struct thread_info *thread = get_lwp_thread (lwp);
3031 int *wstat;
3032
3033 gdb_assert (lwp->suspended == 0);
3034 gdb_assert (lwp->stopped);
3035
3036 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3037
3038 /* Allow debugging the jump pad, gdb_collect, etc. */
3039 if (!gdb_breakpoint_here (lwp->stop_pc)
3040 && !lwp->stopped_by_watchpoint
3041 && thread->last_resume_kind != resume_step
3042 && maybe_move_out_of_jump_pad (lwp, wstat))
3043 {
3044 if (debug_threads)
3045 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3046 lwpid_of (lwp));
3047
3048 if (wstat)
3049 {
3050 lwp->status_pending_p = 0;
3051 enqueue_one_deferred_signal (lwp, wstat);
3052
3053 if (debug_threads)
3054 debug_printf ("Signal %d for LWP %ld deferred "
3055 "(in jump pad)\n",
3056 WSTOPSIG (*wstat), lwpid_of (lwp));
3057 }
3058
3059 linux_resume_one_lwp (lwp, 0, 0, NULL);
3060 }
3061 else
3062 lwp->suspended++;
3063 }
3064
3065 static int
3066 lwp_running (struct inferior_list_entry *entry, void *data)
3067 {
3068 struct lwp_info *lwp = (struct lwp_info *) entry;
3069
3070 if (lwp->dead)
3071 return 0;
3072 if (lwp->stopped)
3073 return 0;
3074 return 1;
3075 }
3076
3077 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3078 If SUSPEND, then also increase the suspend count of every LWP,
3079 except EXCEPT. */
3080
3081 static void
3082 stop_all_lwps (int suspend, struct lwp_info *except)
3083 {
3084 /* Should not be called recursively. */
3085 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3086
3087 if (debug_threads)
3088 {
3089 debug_enter ();
3090 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3091 suspend ? "stop-and-suspend" : "stop",
3092 except != NULL
3093 ? target_pid_to_str (ptid_of (except))
3094 : "none");
3095 }
3096
3097 stopping_threads = (suspend
3098 ? STOPPING_AND_SUSPENDING_THREADS
3099 : STOPPING_THREADS);
3100
3101 if (suspend)
3102 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3103 else
3104 find_inferior (&all_lwps, send_sigstop_callback, except);
3105 for_each_inferior (&all_lwps, wait_for_sigstop);
3106 stopping_threads = NOT_STOPPING_THREADS;
3107
3108 if (debug_threads)
3109 {
3110 debug_printf ("stop_all_lwps done, setting stopping_threads "
3111 "back to !stopping\n");
3112 debug_exit ();
3113 }
3114 }
3115
3116 /* Resume execution of the inferior process.
3117 If STEP is nonzero, single-step it.
3118 If SIGNAL is nonzero, give it that signal. */
3119
3120 static void
3121 linux_resume_one_lwp (struct lwp_info *lwp,
3122 int step, int signal, siginfo_t *info)
3123 {
3124 struct thread_info *saved_inferior;
3125 int fast_tp_collecting;
3126
3127 if (lwp->stopped == 0)
3128 return;
3129
3130 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3131
3132 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3133
3134 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3135 user used the "jump" command, or "set $pc = foo"). */
3136 if (lwp->stop_pc != get_pc (lwp))
3137 {
3138 /* Collecting 'while-stepping' actions doesn't make sense
3139 anymore. */
3140 release_while_stepping_state_list (get_lwp_thread (lwp));
3141 }
3142
3143 /* If we have pending signals or status, and a new signal, enqueue the
3144 signal. Also enqueue the signal if we are waiting to reinsert a
3145 breakpoint; it will be picked up again below. */
3146 if (signal != 0
3147 && (lwp->status_pending_p
3148 || lwp->pending_signals != NULL
3149 || lwp->bp_reinsert != 0
3150 || fast_tp_collecting))
3151 {
3152 struct pending_signals *p_sig;
3153 p_sig = xmalloc (sizeof (*p_sig));
3154 p_sig->prev = lwp->pending_signals;
3155 p_sig->signal = signal;
3156 if (info == NULL)
3157 memset (&p_sig->info, 0, sizeof (siginfo_t));
3158 else
3159 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3160 lwp->pending_signals = p_sig;
3161 }
3162
3163 if (lwp->status_pending_p)
3164 {
3165 if (debug_threads)
3166 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3167 " has pending status\n",
3168 lwpid_of (lwp), step ? "step" : "continue", signal,
3169 lwp->stop_expected ? "expected" : "not expected");
3170 return;
3171 }
3172
3173 saved_inferior = current_inferior;
3174 current_inferior = get_lwp_thread (lwp);
3175
3176 if (debug_threads)
3177 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3178 lwpid_of (lwp), step ? "step" : "continue", signal,
3179 lwp->stop_expected ? "expected" : "not expected");
3180
3181 /* This bit needs some thinking about. If we get a signal that
3182 we must report while a single-step reinsert is still pending,
3183 we often end up resuming the thread. It might be better to
3184 (ew) allow a stack of pending events; then we could be sure that
3185 the reinsert happened right away and not lose any signals.
3186
3187 Making this stack would also shrink the window in which breakpoints are
3188 uninserted (see comment in linux_wait_for_lwp) but not enough for
3189 complete correctness, so it won't solve that problem. It may be
3190 worthwhile just to solve this one, however. */
3191 if (lwp->bp_reinsert != 0)
3192 {
3193 if (debug_threads)
3194 debug_printf (" pending reinsert at 0x%s\n",
3195 paddress (lwp->bp_reinsert));
3196
3197 if (can_hardware_single_step ())
3198 {
3199 if (fast_tp_collecting == 0)
3200 {
3201 if (step == 0)
3202 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3203 if (lwp->suspended)
3204 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3205 lwp->suspended);
3206 }
3207
3208 step = 1;
3209 }
3210
3211 /* Postpone any pending signal. It was enqueued above. */
3212 signal = 0;
3213 }
3214
3215 if (fast_tp_collecting == 1)
3216 {
3217 if (debug_threads)
3218 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3219 " (exit-jump-pad-bkpt)\n",
3220 lwpid_of (lwp));
3221
3222 /* Postpone any pending signal. It was enqueued above. */
3223 signal = 0;
3224 }
3225 else if (fast_tp_collecting == 2)
3226 {
3227 if (debug_threads)
3228 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3229 " single-stepping\n",
3230 lwpid_of (lwp));
3231
3232 if (can_hardware_single_step ())
3233 step = 1;
3234 else
3235 fatal ("moving out of jump pad single-stepping"
3236 " not implemented on this target");
3237
3238 /* Postpone any pending signal. It was enqueued above. */
3239 signal = 0;
3240 }
3241
3242 /* If we have while-stepping actions in this thread set it stepping.
3243 If we have a signal to deliver, it may or may not be set to
3244 SIG_IGN, we don't know. Assume so, and allow collecting
3245 while-stepping into a signal handler. A possible smart thing to
3246 do would be to set an internal breakpoint at the signal return
3247 address, continue, and carry on catching this while-stepping
3248 action only when that breakpoint is hit. A future
3249 enhancement. */
3250 if (get_lwp_thread (lwp)->while_stepping != NULL
3251 && can_hardware_single_step ())
3252 {
3253 if (debug_threads)
3254 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3255 lwpid_of (lwp));
3256 step = 1;
3257 }
3258
3259 if (debug_threads && the_low_target.get_pc != NULL)
3260 {
3261 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3262 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3263 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3264 }
3265
3266 /* If we have pending signals, consume one unless we are trying to
3267 reinsert a breakpoint or we're trying to finish a fast tracepoint
3268 collect. */
3269 if (lwp->pending_signals != NULL
3270 && lwp->bp_reinsert == 0
3271 && fast_tp_collecting == 0)
3272 {
3273 struct pending_signals **p_sig;
3274
3275 p_sig = &lwp->pending_signals;
3276 while ((*p_sig)->prev != NULL)
3277 p_sig = &(*p_sig)->prev;
3278
3279 signal = (*p_sig)->signal;
3280 if ((*p_sig)->info.si_signo != 0)
3281 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3282 &(*p_sig)->info);
3283
3284 free (*p_sig);
3285 *p_sig = NULL;
3286 }
3287
3288 if (the_low_target.prepare_to_resume != NULL)
3289 the_low_target.prepare_to_resume (lwp);
3290
3291 regcache_invalidate_thread (get_lwp_thread (lwp));
3292 errno = 0;
3293 lwp->stopped = 0;
3294 lwp->stopped_by_watchpoint = 0;
3295 lwp->stepping = step;
3296 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp),
3297 (PTRACE_TYPE_ARG3) 0,
3298 /* Coerce to a uintptr_t first to avoid potential gcc warning
3299 of coercing an 8 byte integer to a 4 byte pointer. */
3300 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3301
3302 current_inferior = saved_inferior;
3303 if (errno)
3304 {
3305 /* ESRCH from ptrace either means that the thread was already
3306 running (an error) or that it is gone (a race condition). If
3307 it's gone, we will get a notification the next time we wait,
3308 so we can ignore the error. We could differentiate these
3309 two, but it's tricky without waiting; the thread still exists
3310 as a zombie, so sending it signal 0 would succeed. So just
3311 ignore ESRCH. */
3312 if (errno == ESRCH)
3313 return;
3314
3315 perror_with_name ("ptrace");
3316 }
3317 }
3318
3319 struct thread_resume_array
3320 {
3321 struct thread_resume *resume;
3322 size_t n;
3323 };
3324
3325 /* This function is called once per thread via find_inferior.
3326 ARG is a pointer to a thread_resume_array struct.
3327 We look up the thread specified by ENTRY in ARG, and mark the thread
3328 with a pointer to the appropriate resume request.
3329
3330 This algorithm is O(threads * resume elements), but resume elements
3331 is small (and will remain small at least until GDB supports thread
3332 suspension). */
3333
3334 static int
3335 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3336 {
3337 struct lwp_info *lwp;
3338 struct thread_info *thread;
3339 int ndx;
3340 struct thread_resume_array *r;
3341
3342 thread = (struct thread_info *) entry;
3343 lwp = get_thread_lwp (thread);
3344 r = arg;
3345
3346 for (ndx = 0; ndx < r->n; ndx++)
3347 {
3348 ptid_t ptid = r->resume[ndx].thread;
3349 if (ptid_equal (ptid, minus_one_ptid)
3350 || ptid_equal (ptid, entry->id)
3351 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3352 of PID'. */
3353 || (ptid_get_pid (ptid) == pid_of (lwp)
3354 && (ptid_is_pid (ptid)
3355 || ptid_get_lwp (ptid) == -1)))
3356 {
3357 if (r->resume[ndx].kind == resume_stop
3358 && thread->last_resume_kind == resume_stop)
3359 {
3360 if (debug_threads)
3361 debug_printf ("already %s LWP %ld at GDB's request\n",
3362 (thread->last_status.kind
3363 == TARGET_WAITKIND_STOPPED)
3364 ? "stopped"
3365 : "stopping",
3366 lwpid_of (lwp));
3367
3368 continue;
3369 }
3370
3371 lwp->resume = &r->resume[ndx];
3372 thread->last_resume_kind = lwp->resume->kind;
3373
3374 lwp->step_range_start = lwp->resume->step_range_start;
3375 lwp->step_range_end = lwp->resume->step_range_end;
3376
3377 /* If we had a deferred signal to report, dequeue one now.
3378 This can happen if LWP gets more than one signal while
3379 trying to get out of a jump pad. */
3380 if (lwp->stopped
3381 && !lwp->status_pending_p
3382 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3383 {
3384 lwp->status_pending_p = 1;
3385
3386 if (debug_threads)
3387 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3388 "leaving status pending.\n",
3389 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3390 }
3391
3392 return 0;
3393 }
3394 }
3395
3396 /* No resume action for this thread. */
3397 lwp->resume = NULL;
3398
3399 return 0;
3400 }
3401
3402 /* find_inferior callback for linux_resume.
3403 Set *FLAG_P if this lwp has an interesting status pending. */
3404
3405 static int
3406 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3407 {
3408 struct lwp_info *lwp = (struct lwp_info *) entry;
3409
3410 /* LWPs which will not be resumed are not interesting, because
3411 we might not wait for them next time through linux_wait. */
3412 if (lwp->resume == NULL)
3413 return 0;
3414
3415 if (lwp->status_pending_p)
3416 * (int *) flag_p = 1;
3417
3418 return 0;
3419 }
3420
3421 /* Return 1 if this lwp that GDB wants running is stopped at an
3422 internal breakpoint that we need to step over. It assumes that any
3423 required STOP_PC adjustment has already been propagated to the
3424 inferior's regcache. */
3425
3426 static int
3427 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3428 {
3429 struct lwp_info *lwp = (struct lwp_info *) entry;
3430 struct thread_info *thread;
3431 struct thread_info *saved_inferior;
3432 CORE_ADDR pc;
3433
3434 /* LWPs which will not be resumed are not interesting, because we
3435 might not wait for them next time through linux_wait. */
3436
3437 if (!lwp->stopped)
3438 {
3439 if (debug_threads)
3440 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3441 lwpid_of (lwp));
3442 return 0;
3443 }
3444
3445 thread = get_lwp_thread (lwp);
3446
3447 if (thread->last_resume_kind == resume_stop)
3448 {
3449 if (debug_threads)
3450 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3451 " stopped\n",
3452 lwpid_of (lwp));
3453 return 0;
3454 }
3455
3456 gdb_assert (lwp->suspended >= 0);
3457
3458 if (lwp->suspended)
3459 {
3460 if (debug_threads)
3461 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3462 lwpid_of (lwp));
3463 return 0;
3464 }
3465
3466 if (!lwp->need_step_over)
3467 {
3468 if (debug_threads)
3469 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3470 }
3471
3472 if (lwp->status_pending_p)
3473 {
3474 if (debug_threads)
3475 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3476 " status.\n",
3477 lwpid_of (lwp));
3478 return 0;
3479 }
3480
3481 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3482 or we have. */
3483 pc = get_pc (lwp);
3484
3485 /* If the PC has changed since we stopped, then don't do anything,
3486 and let the breakpoint/tracepoint be hit. This happens if, for
3487 instance, GDB handled the decr_pc_after_break subtraction itself,
3488 GDB is OOL stepping this thread, or the user has issued a "jump"
3489 command, or poked thread's registers herself. */
3490 if (pc != lwp->stop_pc)
3491 {
3492 if (debug_threads)
3493 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3494 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3495 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3496
3497 lwp->need_step_over = 0;
3498 return 0;
3499 }
3500
3501 saved_inferior = current_inferior;
3502 current_inferior = thread;
3503
3504 /* We can only step over breakpoints we know about. */
3505 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3506 {
3507 /* Don't step over a breakpoint that GDB expects to hit
3508 though. If the condition is being evaluated on the target's side
3509 and it evaluate to false, step over this breakpoint as well. */
3510 if (gdb_breakpoint_here (pc)
3511 && gdb_condition_true_at_breakpoint (pc)
3512 && gdb_no_commands_at_breakpoint (pc))
3513 {
3514 if (debug_threads)
3515 debug_printf ("Need step over [LWP %ld]? yes, but found"
3516 " GDB breakpoint at 0x%s; skipping step over\n",
3517 lwpid_of (lwp), paddress (pc));
3518
3519 current_inferior = saved_inferior;
3520 return 0;
3521 }
3522 else
3523 {
3524 if (debug_threads)
3525 debug_printf ("Need step over [LWP %ld]? yes, "
3526 "found breakpoint at 0x%s\n",
3527 lwpid_of (lwp), paddress (pc));
3528
3529 /* We've found an lwp that needs stepping over --- return 1 so
3530 that find_inferior stops looking. */
3531 current_inferior = saved_inferior;
3532
3533 /* If the step over is cancelled, this is set again. */
3534 lwp->need_step_over = 0;
3535 return 1;
3536 }
3537 }
3538
3539 current_inferior = saved_inferior;
3540
3541 if (debug_threads)
3542 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3543 " at 0x%s\n",
3544 lwpid_of (lwp), paddress (pc));
3545
3546 return 0;
3547 }
3548
3549 /* Start a step-over operation on LWP. When LWP stopped at a
3550 breakpoint, to make progress, we need to remove the breakpoint out
3551 of the way. If we let other threads run while we do that, they may
3552 pass by the breakpoint location and miss hitting it. To avoid
3553 that, a step-over momentarily stops all threads while LWP is
3554 single-stepped while the breakpoint is temporarily uninserted from
3555 the inferior. When the single-step finishes, we reinsert the
3556 breakpoint, and let all threads that are supposed to be running,
3557 run again.
3558
3559 On targets that don't support hardware single-step, we don't
3560 currently support full software single-stepping. Instead, we only
3561 support stepping over the thread event breakpoint, by asking the
3562 low target where to place a reinsert breakpoint. Since this
3563 routine assumes the breakpoint being stepped over is a thread event
3564 breakpoint, it usually assumes the return address of the current
3565 function is a good enough place to set the reinsert breakpoint. */
3566
3567 static int
3568 start_step_over (struct lwp_info *lwp)
3569 {
3570 struct thread_info *saved_inferior;
3571 CORE_ADDR pc;
3572 int step;
3573
3574 if (debug_threads)
3575 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3576 lwpid_of (lwp));
3577
3578 stop_all_lwps (1, lwp);
3579 gdb_assert (lwp->suspended == 0);
3580
3581 if (debug_threads)
3582 debug_printf ("Done stopping all threads for step-over.\n");
3583
3584 /* Note, we should always reach here with an already adjusted PC,
3585 either by GDB (if we're resuming due to GDB's request), or by our
3586 caller, if we just finished handling an internal breakpoint GDB
3587 shouldn't care about. */
3588 pc = get_pc (lwp);
3589
3590 saved_inferior = current_inferior;
3591 current_inferior = get_lwp_thread (lwp);
3592
3593 lwp->bp_reinsert = pc;
3594 uninsert_breakpoints_at (pc);
3595 uninsert_fast_tracepoint_jumps_at (pc);
3596
3597 if (can_hardware_single_step ())
3598 {
3599 step = 1;
3600 }
3601 else
3602 {
3603 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3604 set_reinsert_breakpoint (raddr);
3605 step = 0;
3606 }
3607
3608 current_inferior = saved_inferior;
3609
3610 linux_resume_one_lwp (lwp, step, 0, NULL);
3611
3612 /* Require next event from this LWP. */
3613 step_over_bkpt = lwp->entry.id;
3614 return 1;
3615 }
3616
3617 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3618 start_step_over, if still there, and delete any reinsert
3619 breakpoints we've set, on non hardware single-step targets. */
3620
3621 static int
3622 finish_step_over (struct lwp_info *lwp)
3623 {
3624 if (lwp->bp_reinsert != 0)
3625 {
3626 if (debug_threads)
3627 debug_printf ("Finished step over.\n");
3628
3629 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3630 may be no breakpoint to reinsert there by now. */
3631 reinsert_breakpoints_at (lwp->bp_reinsert);
3632 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3633
3634 lwp->bp_reinsert = 0;
3635
3636 /* Delete any software-single-step reinsert breakpoints. No
3637 longer needed. We don't have to worry about other threads
3638 hitting this trap, and later not being able to explain it,
3639 because we were stepping over a breakpoint, and we hold all
3640 threads but LWP stopped while doing that. */
3641 if (!can_hardware_single_step ())
3642 delete_reinsert_breakpoints ();
3643
3644 step_over_bkpt = null_ptid;
3645 return 1;
3646 }
3647 else
3648 return 0;
3649 }
3650
3651 /* This function is called once per thread. We check the thread's resume
3652 request, which will tell us whether to resume, step, or leave the thread
3653 stopped; and what signal, if any, it should be sent.
3654
3655 For threads which we aren't explicitly told otherwise, we preserve
3656 the stepping flag; this is used for stepping over gdbserver-placed
3657 breakpoints.
3658
3659 If pending_flags was set in any thread, we queue any needed
3660 signals, since we won't actually resume. We already have a pending
3661 event to report, so we don't need to preserve any step requests;
3662 they should be re-issued if necessary. */
3663
3664 static int
3665 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3666 {
3667 struct lwp_info *lwp;
3668 struct thread_info *thread;
3669 int step;
3670 int leave_all_stopped = * (int *) arg;
3671 int leave_pending;
3672
3673 thread = (struct thread_info *) entry;
3674 lwp = get_thread_lwp (thread);
3675
3676 if (lwp->resume == NULL)
3677 return 0;
3678
3679 if (lwp->resume->kind == resume_stop)
3680 {
3681 if (debug_threads)
3682 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (lwp));
3683
3684 if (!lwp->stopped)
3685 {
3686 if (debug_threads)
3687 debug_printf ("stopping LWP %ld\n", lwpid_of (lwp));
3688
3689 /* Stop the thread, and wait for the event asynchronously,
3690 through the event loop. */
3691 send_sigstop (lwp);
3692 }
3693 else
3694 {
3695 if (debug_threads)
3696 debug_printf ("already stopped LWP %ld\n",
3697 lwpid_of (lwp));
3698
3699 /* The LWP may have been stopped in an internal event that
3700 was not meant to be notified back to GDB (e.g., gdbserver
3701 breakpoint), so we should be reporting a stop event in
3702 this case too. */
3703
3704 /* If the thread already has a pending SIGSTOP, this is a
3705 no-op. Otherwise, something later will presumably resume
3706 the thread and this will cause it to cancel any pending
3707 operation, due to last_resume_kind == resume_stop. If
3708 the thread already has a pending status to report, we
3709 will still report it the next time we wait - see
3710 status_pending_p_callback. */
3711
3712 /* If we already have a pending signal to report, then
3713 there's no need to queue a SIGSTOP, as this means we're
3714 midway through moving the LWP out of the jumppad, and we
3715 will report the pending signal as soon as that is
3716 finished. */
3717 if (lwp->pending_signals_to_report == NULL)
3718 send_sigstop (lwp);
3719 }
3720
3721 /* For stop requests, we're done. */
3722 lwp->resume = NULL;
3723 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3724 return 0;
3725 }
3726
3727 /* If this thread which is about to be resumed has a pending status,
3728 then don't resume any threads - we can just report the pending
3729 status. Make sure to queue any signals that would otherwise be
3730 sent. In all-stop mode, we do this decision based on if *any*
3731 thread has a pending status. If there's a thread that needs the
3732 step-over-breakpoint dance, then don't resume any other thread
3733 but that particular one. */
3734 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3735
3736 if (!leave_pending)
3737 {
3738 if (debug_threads)
3739 debug_printf ("resuming LWP %ld\n", lwpid_of (lwp));
3740
3741 step = (lwp->resume->kind == resume_step);
3742 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3743 }
3744 else
3745 {
3746 if (debug_threads)
3747 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (lwp));
3748
3749 /* If we have a new signal, enqueue the signal. */
3750 if (lwp->resume->sig != 0)
3751 {
3752 struct pending_signals *p_sig;
3753 p_sig = xmalloc (sizeof (*p_sig));
3754 p_sig->prev = lwp->pending_signals;
3755 p_sig->signal = lwp->resume->sig;
3756 memset (&p_sig->info, 0, sizeof (siginfo_t));
3757
3758 /* If this is the same signal we were previously stopped by,
3759 make sure to queue its siginfo. We can ignore the return
3760 value of ptrace; if it fails, we'll skip
3761 PTRACE_SETSIGINFO. */
3762 if (WIFSTOPPED (lwp->last_status)
3763 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3764 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), (PTRACE_TYPE_ARG3) 0,
3765 &p_sig->info);
3766
3767 lwp->pending_signals = p_sig;
3768 }
3769 }
3770
3771 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3772 lwp->resume = NULL;
3773 return 0;
3774 }
3775
3776 static void
3777 linux_resume (struct thread_resume *resume_info, size_t n)
3778 {
3779 struct thread_resume_array array = { resume_info, n };
3780 struct lwp_info *need_step_over = NULL;
3781 int any_pending;
3782 int leave_all_stopped;
3783
3784 if (debug_threads)
3785 {
3786 debug_enter ();
3787 debug_printf ("linux_resume:\n");
3788 }
3789
3790 find_inferior (&all_threads, linux_set_resume_request, &array);
3791
3792 /* If there is a thread which would otherwise be resumed, which has
3793 a pending status, then don't resume any threads - we can just
3794 report the pending status. Make sure to queue any signals that
3795 would otherwise be sent. In non-stop mode, we'll apply this
3796 logic to each thread individually. We consume all pending events
3797 before considering to start a step-over (in all-stop). */
3798 any_pending = 0;
3799 if (!non_stop)
3800 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3801
3802 /* If there is a thread which would otherwise be resumed, which is
3803 stopped at a breakpoint that needs stepping over, then don't
3804 resume any threads - have it step over the breakpoint with all
3805 other threads stopped, then resume all threads again. Make sure
3806 to queue any signals that would otherwise be delivered or
3807 queued. */
3808 if (!any_pending && supports_breakpoints ())
3809 need_step_over
3810 = (struct lwp_info *) find_inferior (&all_lwps,
3811 need_step_over_p, NULL);
3812
3813 leave_all_stopped = (need_step_over != NULL || any_pending);
3814
3815 if (debug_threads)
3816 {
3817 if (need_step_over != NULL)
3818 debug_printf ("Not resuming all, need step over\n");
3819 else if (any_pending)
3820 debug_printf ("Not resuming, all-stop and found "
3821 "an LWP with pending status\n");
3822 else
3823 debug_printf ("Resuming, no pending status or step over needed\n");
3824 }
3825
3826 /* Even if we're leaving threads stopped, queue all signals we'd
3827 otherwise deliver. */
3828 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3829
3830 if (need_step_over)
3831 start_step_over (need_step_over);
3832
3833 if (debug_threads)
3834 {
3835 debug_printf ("linux_resume done\n");
3836 debug_exit ();
3837 }
3838 }
3839
3840 /* This function is called once per thread. We check the thread's
3841 last resume request, which will tell us whether to resume, step, or
3842 leave the thread stopped. Any signal the client requested to be
3843 delivered has already been enqueued at this point.
3844
3845 If any thread that GDB wants running is stopped at an internal
3846 breakpoint that needs stepping over, we start a step-over operation
3847 on that particular thread, and leave all others stopped. */
3848
3849 static int
3850 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3851 {
3852 struct lwp_info *lwp = (struct lwp_info *) entry;
3853 struct thread_info *thread;
3854 int step;
3855
3856 if (lwp == except)
3857 return 0;
3858
3859 if (debug_threads)
3860 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3861
3862 if (!lwp->stopped)
3863 {
3864 if (debug_threads)
3865 debug_printf (" LWP %ld already running\n", lwpid_of (lwp));
3866 return 0;
3867 }
3868
3869 thread = get_lwp_thread (lwp);
3870
3871 if (thread->last_resume_kind == resume_stop
3872 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3873 {
3874 if (debug_threads)
3875 debug_printf (" client wants LWP to remain %ld stopped\n",
3876 lwpid_of (lwp));
3877 return 0;
3878 }
3879
3880 if (lwp->status_pending_p)
3881 {
3882 if (debug_threads)
3883 debug_printf (" LWP %ld has pending status, leaving stopped\n",
3884 lwpid_of (lwp));
3885 return 0;
3886 }
3887
3888 gdb_assert (lwp->suspended >= 0);
3889
3890 if (lwp->suspended)
3891 {
3892 if (debug_threads)
3893 debug_printf (" LWP %ld is suspended\n", lwpid_of (lwp));
3894 return 0;
3895 }
3896
3897 if (thread->last_resume_kind == resume_stop
3898 && lwp->pending_signals_to_report == NULL
3899 && lwp->collecting_fast_tracepoint == 0)
3900 {
3901 /* We haven't reported this LWP as stopped yet (otherwise, the
3902 last_status.kind check above would catch it, and we wouldn't
3903 reach here. This LWP may have been momentarily paused by a
3904 stop_all_lwps call while handling for example, another LWP's
3905 step-over. In that case, the pending expected SIGSTOP signal
3906 that was queued at vCont;t handling time will have already
3907 been consumed by wait_for_sigstop, and so we need to requeue
3908 another one here. Note that if the LWP already has a SIGSTOP
3909 pending, this is a no-op. */
3910
3911 if (debug_threads)
3912 debug_printf ("Client wants LWP %ld to stop. "
3913 "Making sure it has a SIGSTOP pending\n",
3914 lwpid_of (lwp));
3915
3916 send_sigstop (lwp);
3917 }
3918
3919 step = thread->last_resume_kind == resume_step;
3920 linux_resume_one_lwp (lwp, step, 0, NULL);
3921 return 0;
3922 }
3923
3924 static int
3925 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3926 {
3927 struct lwp_info *lwp = (struct lwp_info *) entry;
3928
3929 if (lwp == except)
3930 return 0;
3931
3932 lwp->suspended--;
3933 gdb_assert (lwp->suspended >= 0);
3934
3935 return proceed_one_lwp (entry, except);
3936 }
3937
3938 /* When we finish a step-over, set threads running again. If there's
3939 another thread that may need a step-over, now's the time to start
3940 it. Eventually, we'll move all threads past their breakpoints. */
3941
3942 static void
3943 proceed_all_lwps (void)
3944 {
3945 struct lwp_info *need_step_over;
3946
3947 /* If there is a thread which would otherwise be resumed, which is
3948 stopped at a breakpoint that needs stepping over, then don't
3949 resume any threads - have it step over the breakpoint with all
3950 other threads stopped, then resume all threads again. */
3951
3952 if (supports_breakpoints ())
3953 {
3954 need_step_over
3955 = (struct lwp_info *) find_inferior (&all_lwps,
3956 need_step_over_p, NULL);
3957
3958 if (need_step_over != NULL)
3959 {
3960 if (debug_threads)
3961 debug_printf ("proceed_all_lwps: found "
3962 "thread %ld needing a step-over\n",
3963 lwpid_of (need_step_over));
3964
3965 start_step_over (need_step_over);
3966 return;
3967 }
3968 }
3969
3970 if (debug_threads)
3971 debug_printf ("Proceeding, no step-over needed\n");
3972
3973 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3974 }
3975
3976 /* Stopped LWPs that the client wanted to be running, that don't have
3977 pending statuses, are set to run again, except for EXCEPT, if not
3978 NULL. This undoes a stop_all_lwps call. */
3979
3980 static void
3981 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3982 {
3983 if (debug_threads)
3984 {
3985 debug_enter ();
3986 if (except)
3987 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
3988 lwpid_of (except));
3989 else
3990 debug_printf ("unstopping all lwps\n");
3991 }
3992
3993 if (unsuspend)
3994 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3995 else
3996 find_inferior (&all_lwps, proceed_one_lwp, except);
3997
3998 if (debug_threads)
3999 {
4000 debug_printf ("unstop_all_lwps done\n");
4001 debug_exit ();
4002 }
4003 }
4004
4005
4006 #ifdef HAVE_LINUX_REGSETS
4007
4008 #define use_linux_regsets 1
4009
4010 /* Returns true if REGSET has been disabled. */
4011
4012 static int
4013 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4014 {
4015 return (info->disabled_regsets != NULL
4016 && info->disabled_regsets[regset - info->regsets]);
4017 }
4018
4019 /* Disable REGSET. */
4020
4021 static void
4022 disable_regset (struct regsets_info *info, struct regset_info *regset)
4023 {
4024 int dr_offset;
4025
4026 dr_offset = regset - info->regsets;
4027 if (info->disabled_regsets == NULL)
4028 info->disabled_regsets = xcalloc (1, info->num_regsets);
4029 info->disabled_regsets[dr_offset] = 1;
4030 }
4031
4032 static int
4033 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4034 struct regcache *regcache)
4035 {
4036 struct regset_info *regset;
4037 int saw_general_regs = 0;
4038 int pid;
4039 struct iovec iov;
4040
4041 regset = regsets_info->regsets;
4042
4043 pid = lwpid_of (get_thread_lwp (current_inferior));
4044 while (regset->size >= 0)
4045 {
4046 void *buf, *data;
4047 int nt_type, res;
4048
4049 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4050 {
4051 regset ++;
4052 continue;
4053 }
4054
4055 buf = xmalloc (regset->size);
4056
4057 nt_type = regset->nt_type;
4058 if (nt_type)
4059 {
4060 iov.iov_base = buf;
4061 iov.iov_len = regset->size;
4062 data = (void *) &iov;
4063 }
4064 else
4065 data = buf;
4066
4067 #ifndef __sparc__
4068 res = ptrace (regset->get_request, pid,
4069 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4070 #else
4071 res = ptrace (regset->get_request, pid, data, nt_type);
4072 #endif
4073 if (res < 0)
4074 {
4075 if (errno == EIO)
4076 {
4077 /* If we get EIO on a regset, do not try it again for
4078 this process mode. */
4079 disable_regset (regsets_info, regset);
4080 free (buf);
4081 continue;
4082 }
4083 else
4084 {
4085 char s[256];
4086 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4087 pid);
4088 perror (s);
4089 }
4090 }
4091 else if (regset->type == GENERAL_REGS)
4092 saw_general_regs = 1;
4093 regset->store_function (regcache, buf);
4094 regset ++;
4095 free (buf);
4096 }
4097 if (saw_general_regs)
4098 return 0;
4099 else
4100 return 1;
4101 }
4102
4103 static int
4104 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4105 struct regcache *regcache)
4106 {
4107 struct regset_info *regset;
4108 int saw_general_regs = 0;
4109 int pid;
4110 struct iovec iov;
4111
4112 regset = regsets_info->regsets;
4113
4114 pid = lwpid_of (get_thread_lwp (current_inferior));
4115 while (regset->size >= 0)
4116 {
4117 void *buf, *data;
4118 int nt_type, res;
4119
4120 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4121 {
4122 regset ++;
4123 continue;
4124 }
4125
4126 buf = xmalloc (regset->size);
4127
4128 /* First fill the buffer with the current register set contents,
4129 in case there are any items in the kernel's regset that are
4130 not in gdbserver's regcache. */
4131
4132 nt_type = regset->nt_type;
4133 if (nt_type)
4134 {
4135 iov.iov_base = buf;
4136 iov.iov_len = regset->size;
4137 data = (void *) &iov;
4138 }
4139 else
4140 data = buf;
4141
4142 #ifndef __sparc__
4143 res = ptrace (regset->get_request, pid,
4144 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4145 #else
4146 res = ptrace (regset->get_request, pid, data, nt_type);
4147 #endif
4148
4149 if (res == 0)
4150 {
4151 /* Then overlay our cached registers on that. */
4152 regset->fill_function (regcache, buf);
4153
4154 /* Only now do we write the register set. */
4155 #ifndef __sparc__
4156 res = ptrace (regset->set_request, pid,
4157 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4158 #else
4159 res = ptrace (regset->set_request, pid, data, nt_type);
4160 #endif
4161 }
4162
4163 if (res < 0)
4164 {
4165 if (errno == EIO)
4166 {
4167 /* If we get EIO on a regset, do not try it again for
4168 this process mode. */
4169 disable_regset (regsets_info, regset);
4170 free (buf);
4171 continue;
4172 }
4173 else if (errno == ESRCH)
4174 {
4175 /* At this point, ESRCH should mean the process is
4176 already gone, in which case we simply ignore attempts
4177 to change its registers. See also the related
4178 comment in linux_resume_one_lwp. */
4179 free (buf);
4180 return 0;
4181 }
4182 else
4183 {
4184 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4185 }
4186 }
4187 else if (regset->type == GENERAL_REGS)
4188 saw_general_regs = 1;
4189 regset ++;
4190 free (buf);
4191 }
4192 if (saw_general_regs)
4193 return 0;
4194 else
4195 return 1;
4196 }
4197
4198 #else /* !HAVE_LINUX_REGSETS */
4199
4200 #define use_linux_regsets 0
4201 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4202 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4203
4204 #endif
4205
4206 /* Return 1 if register REGNO is supported by one of the regset ptrace
4207 calls or 0 if it has to be transferred individually. */
4208
4209 static int
4210 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4211 {
4212 unsigned char mask = 1 << (regno % 8);
4213 size_t index = regno / 8;
4214
4215 return (use_linux_regsets
4216 && (regs_info->regset_bitmap == NULL
4217 || (regs_info->regset_bitmap[index] & mask) != 0));
4218 }
4219
4220 #ifdef HAVE_LINUX_USRREGS
4221
4222 int
4223 register_addr (const struct usrregs_info *usrregs, int regnum)
4224 {
4225 int addr;
4226
4227 if (regnum < 0 || regnum >= usrregs->num_regs)
4228 error ("Invalid register number %d.", regnum);
4229
4230 addr = usrregs->regmap[regnum];
4231
4232 return addr;
4233 }
4234
4235 /* Fetch one register. */
4236 static void
4237 fetch_register (const struct usrregs_info *usrregs,
4238 struct regcache *regcache, int regno)
4239 {
4240 CORE_ADDR regaddr;
4241 int i, size;
4242 char *buf;
4243 int pid;
4244
4245 if (regno >= usrregs->num_regs)
4246 return;
4247 if ((*the_low_target.cannot_fetch_register) (regno))
4248 return;
4249
4250 regaddr = register_addr (usrregs, regno);
4251 if (regaddr == -1)
4252 return;
4253
4254 size = ((register_size (regcache->tdesc, regno)
4255 + sizeof (PTRACE_XFER_TYPE) - 1)
4256 & -sizeof (PTRACE_XFER_TYPE));
4257 buf = alloca (size);
4258
4259 pid = lwpid_of (get_thread_lwp (current_inferior));
4260 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4261 {
4262 errno = 0;
4263 *(PTRACE_XFER_TYPE *) (buf + i) =
4264 ptrace (PTRACE_PEEKUSER, pid,
4265 /* Coerce to a uintptr_t first to avoid potential gcc warning
4266 of coercing an 8 byte integer to a 4 byte pointer. */
4267 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4268 regaddr += sizeof (PTRACE_XFER_TYPE);
4269 if (errno != 0)
4270 error ("reading register %d: %s", regno, strerror (errno));
4271 }
4272
4273 if (the_low_target.supply_ptrace_register)
4274 the_low_target.supply_ptrace_register (regcache, regno, buf);
4275 else
4276 supply_register (regcache, regno, buf);
4277 }
4278
4279 /* Store one register. */
4280 static void
4281 store_register (const struct usrregs_info *usrregs,
4282 struct regcache *regcache, int regno)
4283 {
4284 CORE_ADDR regaddr;
4285 int i, size;
4286 char *buf;
4287 int pid;
4288
4289 if (regno >= usrregs->num_regs)
4290 return;
4291 if ((*the_low_target.cannot_store_register) (regno))
4292 return;
4293
4294 regaddr = register_addr (usrregs, regno);
4295 if (regaddr == -1)
4296 return;
4297
4298 size = ((register_size (regcache->tdesc, regno)
4299 + sizeof (PTRACE_XFER_TYPE) - 1)
4300 & -sizeof (PTRACE_XFER_TYPE));
4301 buf = alloca (size);
4302 memset (buf, 0, size);
4303
4304 if (the_low_target.collect_ptrace_register)
4305 the_low_target.collect_ptrace_register (regcache, regno, buf);
4306 else
4307 collect_register (regcache, regno, buf);
4308
4309 pid = lwpid_of (get_thread_lwp (current_inferior));
4310 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4311 {
4312 errno = 0;
4313 ptrace (PTRACE_POKEUSER, pid,
4314 /* Coerce to a uintptr_t first to avoid potential gcc warning
4315 about coercing an 8 byte integer to a 4 byte pointer. */
4316 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4317 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4318 if (errno != 0)
4319 {
4320 /* At this point, ESRCH should mean the process is
4321 already gone, in which case we simply ignore attempts
4322 to change its registers. See also the related
4323 comment in linux_resume_one_lwp. */
4324 if (errno == ESRCH)
4325 return;
4326
4327 if ((*the_low_target.cannot_store_register) (regno) == 0)
4328 error ("writing register %d: %s", regno, strerror (errno));
4329 }
4330 regaddr += sizeof (PTRACE_XFER_TYPE);
4331 }
4332 }
4333
4334 /* Fetch all registers, or just one, from the child process.
4335 If REGNO is -1, do this for all registers, skipping any that are
4336 assumed to have been retrieved by regsets_fetch_inferior_registers,
4337 unless ALL is non-zero.
4338 Otherwise, REGNO specifies which register (so we can save time). */
4339 static void
4340 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4341 struct regcache *regcache, int regno, int all)
4342 {
4343 struct usrregs_info *usr = regs_info->usrregs;
4344
4345 if (regno == -1)
4346 {
4347 for (regno = 0; regno < usr->num_regs; regno++)
4348 if (all || !linux_register_in_regsets (regs_info, regno))
4349 fetch_register (usr, regcache, regno);
4350 }
4351 else
4352 fetch_register (usr, regcache, regno);
4353 }
4354
4355 /* Store our register values back into the inferior.
4356 If REGNO is -1, do this for all registers, skipping any that are
4357 assumed to have been saved by regsets_store_inferior_registers,
4358 unless ALL is non-zero.
4359 Otherwise, REGNO specifies which register (so we can save time). */
4360 static void
4361 usr_store_inferior_registers (const struct regs_info *regs_info,
4362 struct regcache *regcache, int regno, int all)
4363 {
4364 struct usrregs_info *usr = regs_info->usrregs;
4365
4366 if (regno == -1)
4367 {
4368 for (regno = 0; regno < usr->num_regs; regno++)
4369 if (all || !linux_register_in_regsets (regs_info, regno))
4370 store_register (usr, regcache, regno);
4371 }
4372 else
4373 store_register (usr, regcache, regno);
4374 }
4375
4376 #else /* !HAVE_LINUX_USRREGS */
4377
4378 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4379 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4380
4381 #endif
4382
4383
4384 void
4385 linux_fetch_registers (struct regcache *regcache, int regno)
4386 {
4387 int use_regsets;
4388 int all = 0;
4389 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4390
4391 if (regno == -1)
4392 {
4393 if (the_low_target.fetch_register != NULL
4394 && regs_info->usrregs != NULL)
4395 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4396 (*the_low_target.fetch_register) (regcache, regno);
4397
4398 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4399 if (regs_info->usrregs != NULL)
4400 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4401 }
4402 else
4403 {
4404 if (the_low_target.fetch_register != NULL
4405 && (*the_low_target.fetch_register) (regcache, regno))
4406 return;
4407
4408 use_regsets = linux_register_in_regsets (regs_info, regno);
4409 if (use_regsets)
4410 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4411 regcache);
4412 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4413 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4414 }
4415 }
4416
4417 void
4418 linux_store_registers (struct regcache *regcache, int regno)
4419 {
4420 int use_regsets;
4421 int all = 0;
4422 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4423
4424 if (regno == -1)
4425 {
4426 all = regsets_store_inferior_registers (regs_info->regsets_info,
4427 regcache);
4428 if (regs_info->usrregs != NULL)
4429 usr_store_inferior_registers (regs_info, regcache, regno, all);
4430 }
4431 else
4432 {
4433 use_regsets = linux_register_in_regsets (regs_info, regno);
4434 if (use_regsets)
4435 all = regsets_store_inferior_registers (regs_info->regsets_info,
4436 regcache);
4437 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4438 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4439 }
4440 }
4441
4442
4443 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4444 to debugger memory starting at MYADDR. */
4445
4446 static int
4447 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4448 {
4449 int pid = lwpid_of (get_thread_lwp (current_inferior));
4450 register PTRACE_XFER_TYPE *buffer;
4451 register CORE_ADDR addr;
4452 register int count;
4453 char filename[64];
4454 register int i;
4455 int ret;
4456 int fd;
4457
4458 /* Try using /proc. Don't bother for one word. */
4459 if (len >= 3 * sizeof (long))
4460 {
4461 int bytes;
4462
4463 /* We could keep this file open and cache it - possibly one per
4464 thread. That requires some juggling, but is even faster. */
4465 sprintf (filename, "/proc/%d/mem", pid);
4466 fd = open (filename, O_RDONLY | O_LARGEFILE);
4467 if (fd == -1)
4468 goto no_proc;
4469
4470 /* If pread64 is available, use it. It's faster if the kernel
4471 supports it (only one syscall), and it's 64-bit safe even on
4472 32-bit platforms (for instance, SPARC debugging a SPARC64
4473 application). */
4474 #ifdef HAVE_PREAD64
4475 bytes = pread64 (fd, myaddr, len, memaddr);
4476 #else
4477 bytes = -1;
4478 if (lseek (fd, memaddr, SEEK_SET) != -1)
4479 bytes = read (fd, myaddr, len);
4480 #endif
4481
4482 close (fd);
4483 if (bytes == len)
4484 return 0;
4485
4486 /* Some data was read, we'll try to get the rest with ptrace. */
4487 if (bytes > 0)
4488 {
4489 memaddr += bytes;
4490 myaddr += bytes;
4491 len -= bytes;
4492 }
4493 }
4494
4495 no_proc:
4496 /* Round starting address down to longword boundary. */
4497 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4498 /* Round ending address up; get number of longwords that makes. */
4499 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4500 / sizeof (PTRACE_XFER_TYPE));
4501 /* Allocate buffer of that many longwords. */
4502 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4503
4504 /* Read all the longwords */
4505 errno = 0;
4506 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4507 {
4508 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4509 about coercing an 8 byte integer to a 4 byte pointer. */
4510 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4511 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4512 (PTRACE_TYPE_ARG4) 0);
4513 if (errno)
4514 break;
4515 }
4516 ret = errno;
4517
4518 /* Copy appropriate bytes out of the buffer. */
4519 if (i > 0)
4520 {
4521 i *= sizeof (PTRACE_XFER_TYPE);
4522 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4523 memcpy (myaddr,
4524 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4525 i < len ? i : len);
4526 }
4527
4528 return ret;
4529 }
4530
4531 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4532 memory at MEMADDR. On failure (cannot write to the inferior)
4533 returns the value of errno. Always succeeds if LEN is zero. */
4534
4535 static int
4536 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4537 {
4538 register int i;
4539 /* Round starting address down to longword boundary. */
4540 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4541 /* Round ending address up; get number of longwords that makes. */
4542 register int count
4543 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4544 / sizeof (PTRACE_XFER_TYPE);
4545
4546 /* Allocate buffer of that many longwords. */
4547 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4548 alloca (count * sizeof (PTRACE_XFER_TYPE));
4549
4550 int pid = lwpid_of (get_thread_lwp (current_inferior));
4551
4552 if (len == 0)
4553 {
4554 /* Zero length write always succeeds. */
4555 return 0;
4556 }
4557
4558 if (debug_threads)
4559 {
4560 /* Dump up to four bytes. */
4561 unsigned int val = * (unsigned int *) myaddr;
4562 if (len == 1)
4563 val = val & 0xff;
4564 else if (len == 2)
4565 val = val & 0xffff;
4566 else if (len == 3)
4567 val = val & 0xffffff;
4568 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4569 val, (long)memaddr);
4570 }
4571
4572 /* Fill start and end extra bytes of buffer with existing memory data. */
4573
4574 errno = 0;
4575 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4576 about coercing an 8 byte integer to a 4 byte pointer. */
4577 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4578 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4579 (PTRACE_TYPE_ARG4) 0);
4580 if (errno)
4581 return errno;
4582
4583 if (count > 1)
4584 {
4585 errno = 0;
4586 buffer[count - 1]
4587 = ptrace (PTRACE_PEEKTEXT, pid,
4588 /* Coerce to a uintptr_t first to avoid potential gcc warning
4589 about coercing an 8 byte integer to a 4 byte pointer. */
4590 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4591 * sizeof (PTRACE_XFER_TYPE)),
4592 (PTRACE_TYPE_ARG4) 0);
4593 if (errno)
4594 return errno;
4595 }
4596
4597 /* Copy data to be written over corresponding part of buffer. */
4598
4599 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4600 myaddr, len);
4601
4602 /* Write the entire buffer. */
4603
4604 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4605 {
4606 errno = 0;
4607 ptrace (PTRACE_POKETEXT, pid,
4608 /* Coerce to a uintptr_t first to avoid potential gcc warning
4609 about coercing an 8 byte integer to a 4 byte pointer. */
4610 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4611 (PTRACE_TYPE_ARG4) buffer[i]);
4612 if (errno)
4613 return errno;
4614 }
4615
4616 return 0;
4617 }
4618
4619 static void
4620 linux_look_up_symbols (void)
4621 {
4622 #ifdef USE_THREAD_DB
4623 struct process_info *proc = current_process ();
4624
4625 if (proc->private->thread_db != NULL)
4626 return;
4627
4628 /* If the kernel supports tracing clones, then we don't need to
4629 use the magic thread event breakpoint to learn about
4630 threads. */
4631 thread_db_init (!linux_supports_traceclone ());
4632 #endif
4633 }
4634
4635 static void
4636 linux_request_interrupt (void)
4637 {
4638 extern unsigned long signal_pid;
4639
4640 if (!ptid_equal (cont_thread, null_ptid)
4641 && !ptid_equal (cont_thread, minus_one_ptid))
4642 {
4643 struct lwp_info *lwp;
4644 int lwpid;
4645
4646 lwp = get_thread_lwp (current_inferior);
4647 lwpid = lwpid_of (lwp);
4648 kill_lwp (lwpid, SIGINT);
4649 }
4650 else
4651 kill_lwp (signal_pid, SIGINT);
4652 }
4653
4654 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4655 to debugger memory starting at MYADDR. */
4656
4657 static int
4658 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4659 {
4660 char filename[PATH_MAX];
4661 int fd, n;
4662 int pid = lwpid_of (get_thread_lwp (current_inferior));
4663
4664 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4665
4666 fd = open (filename, O_RDONLY);
4667 if (fd < 0)
4668 return -1;
4669
4670 if (offset != (CORE_ADDR) 0
4671 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4672 n = -1;
4673 else
4674 n = read (fd, myaddr, len);
4675
4676 close (fd);
4677
4678 return n;
4679 }
4680
4681 /* These breakpoint and watchpoint related wrapper functions simply
4682 pass on the function call if the target has registered a
4683 corresponding function. */
4684
4685 static int
4686 linux_insert_point (char type, CORE_ADDR addr, int len)
4687 {
4688 if (the_low_target.insert_point != NULL)
4689 return the_low_target.insert_point (type, addr, len);
4690 else
4691 /* Unsupported (see target.h). */
4692 return 1;
4693 }
4694
4695 static int
4696 linux_remove_point (char type, CORE_ADDR addr, int len)
4697 {
4698 if (the_low_target.remove_point != NULL)
4699 return the_low_target.remove_point (type, addr, len);
4700 else
4701 /* Unsupported (see target.h). */
4702 return 1;
4703 }
4704
4705 static int
4706 linux_stopped_by_watchpoint (void)
4707 {
4708 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4709
4710 return lwp->stopped_by_watchpoint;
4711 }
4712
4713 static CORE_ADDR
4714 linux_stopped_data_address (void)
4715 {
4716 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4717
4718 return lwp->stopped_data_address;
4719 }
4720
4721 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4722 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4723 && defined(PT_TEXT_END_ADDR)
4724
4725 /* This is only used for targets that define PT_TEXT_ADDR,
4726 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4727 the target has different ways of acquiring this information, like
4728 loadmaps. */
4729
4730 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4731 to tell gdb about. */
4732
4733 static int
4734 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4735 {
4736 unsigned long text, text_end, data;
4737 int pid = lwpid_of (get_thread_lwp (current_inferior));
4738
4739 errno = 0;
4740
4741 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4742 (PTRACE_TYPE_ARG4) 0);
4743 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4744 (PTRACE_TYPE_ARG4) 0);
4745 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4746 (PTRACE_TYPE_ARG4) 0);
4747
4748 if (errno == 0)
4749 {
4750 /* Both text and data offsets produced at compile-time (and so
4751 used by gdb) are relative to the beginning of the program,
4752 with the data segment immediately following the text segment.
4753 However, the actual runtime layout in memory may put the data
4754 somewhere else, so when we send gdb a data base-address, we
4755 use the real data base address and subtract the compile-time
4756 data base-address from it (which is just the length of the
4757 text segment). BSS immediately follows data in both
4758 cases. */
4759 *text_p = text;
4760 *data_p = data - (text_end - text);
4761
4762 return 1;
4763 }
4764 return 0;
4765 }
4766 #endif
4767
4768 static int
4769 linux_qxfer_osdata (const char *annex,
4770 unsigned char *readbuf, unsigned const char *writebuf,
4771 CORE_ADDR offset, int len)
4772 {
4773 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4774 }
4775
4776 /* Convert a native/host siginfo object, into/from the siginfo in the
4777 layout of the inferiors' architecture. */
4778
4779 static void
4780 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4781 {
4782 int done = 0;
4783
4784 if (the_low_target.siginfo_fixup != NULL)
4785 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4786
4787 /* If there was no callback, or the callback didn't do anything,
4788 then just do a straight memcpy. */
4789 if (!done)
4790 {
4791 if (direction == 1)
4792 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4793 else
4794 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4795 }
4796 }
4797
4798 static int
4799 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4800 unsigned const char *writebuf, CORE_ADDR offset, int len)
4801 {
4802 int pid;
4803 siginfo_t siginfo;
4804 char inf_siginfo[sizeof (siginfo_t)];
4805
4806 if (current_inferior == NULL)
4807 return -1;
4808
4809 pid = lwpid_of (get_thread_lwp (current_inferior));
4810
4811 if (debug_threads)
4812 debug_printf ("%s siginfo for lwp %d.\n",
4813 readbuf != NULL ? "Reading" : "Writing",
4814 pid);
4815
4816 if (offset >= sizeof (siginfo))
4817 return -1;
4818
4819 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4820 return -1;
4821
4822 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4823 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4824 inferior with a 64-bit GDBSERVER should look the same as debugging it
4825 with a 32-bit GDBSERVER, we need to convert it. */
4826 siginfo_fixup (&siginfo, inf_siginfo, 0);
4827
4828 if (offset + len > sizeof (siginfo))
4829 len = sizeof (siginfo) - offset;
4830
4831 if (readbuf != NULL)
4832 memcpy (readbuf, inf_siginfo + offset, len);
4833 else
4834 {
4835 memcpy (inf_siginfo + offset, writebuf, len);
4836
4837 /* Convert back to ptrace layout before flushing it out. */
4838 siginfo_fixup (&siginfo, inf_siginfo, 1);
4839
4840 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4841 return -1;
4842 }
4843
4844 return len;
4845 }
4846
4847 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4848 so we notice when children change state; as the handler for the
4849 sigsuspend in my_waitpid. */
4850
4851 static void
4852 sigchld_handler (int signo)
4853 {
4854 int old_errno = errno;
4855
4856 if (debug_threads)
4857 {
4858 do
4859 {
4860 /* fprintf is not async-signal-safe, so call write
4861 directly. */
4862 if (write (2, "sigchld_handler\n",
4863 sizeof ("sigchld_handler\n") - 1) < 0)
4864 break; /* just ignore */
4865 } while (0);
4866 }
4867
4868 if (target_is_async_p ())
4869 async_file_mark (); /* trigger a linux_wait */
4870
4871 errno = old_errno;
4872 }
4873
4874 static int
4875 linux_supports_non_stop (void)
4876 {
4877 return 1;
4878 }
4879
4880 static int
4881 linux_async (int enable)
4882 {
4883 int previous = (linux_event_pipe[0] != -1);
4884
4885 if (debug_threads)
4886 debug_printf ("linux_async (%d), previous=%d\n",
4887 enable, previous);
4888
4889 if (previous != enable)
4890 {
4891 sigset_t mask;
4892 sigemptyset (&mask);
4893 sigaddset (&mask, SIGCHLD);
4894
4895 sigprocmask (SIG_BLOCK, &mask, NULL);
4896
4897 if (enable)
4898 {
4899 if (pipe (linux_event_pipe) == -1)
4900 fatal ("creating event pipe failed.");
4901
4902 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4903 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4904
4905 /* Register the event loop handler. */
4906 add_file_handler (linux_event_pipe[0],
4907 handle_target_event, NULL);
4908
4909 /* Always trigger a linux_wait. */
4910 async_file_mark ();
4911 }
4912 else
4913 {
4914 delete_file_handler (linux_event_pipe[0]);
4915
4916 close (linux_event_pipe[0]);
4917 close (linux_event_pipe[1]);
4918 linux_event_pipe[0] = -1;
4919 linux_event_pipe[1] = -1;
4920 }
4921
4922 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4923 }
4924
4925 return previous;
4926 }
4927
4928 static int
4929 linux_start_non_stop (int nonstop)
4930 {
4931 /* Register or unregister from event-loop accordingly. */
4932 linux_async (nonstop);
4933 return 0;
4934 }
4935
4936 static int
4937 linux_supports_multi_process (void)
4938 {
4939 return 1;
4940 }
4941
4942 static int
4943 linux_supports_disable_randomization (void)
4944 {
4945 #ifdef HAVE_PERSONALITY
4946 return 1;
4947 #else
4948 return 0;
4949 #endif
4950 }
4951
4952 static int
4953 linux_supports_agent (void)
4954 {
4955 return 1;
4956 }
4957
4958 static int
4959 linux_supports_range_stepping (void)
4960 {
4961 if (*the_low_target.supports_range_stepping == NULL)
4962 return 0;
4963
4964 return (*the_low_target.supports_range_stepping) ();
4965 }
4966
4967 /* Enumerate spufs IDs for process PID. */
4968 static int
4969 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4970 {
4971 int pos = 0;
4972 int written = 0;
4973 char path[128];
4974 DIR *dir;
4975 struct dirent *entry;
4976
4977 sprintf (path, "/proc/%ld/fd", pid);
4978 dir = opendir (path);
4979 if (!dir)
4980 return -1;
4981
4982 rewinddir (dir);
4983 while ((entry = readdir (dir)) != NULL)
4984 {
4985 struct stat st;
4986 struct statfs stfs;
4987 int fd;
4988
4989 fd = atoi (entry->d_name);
4990 if (!fd)
4991 continue;
4992
4993 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4994 if (stat (path, &st) != 0)
4995 continue;
4996 if (!S_ISDIR (st.st_mode))
4997 continue;
4998
4999 if (statfs (path, &stfs) != 0)
5000 continue;
5001 if (stfs.f_type != SPUFS_MAGIC)
5002 continue;
5003
5004 if (pos >= offset && pos + 4 <= offset + len)
5005 {
5006 *(unsigned int *)(buf + pos - offset) = fd;
5007 written += 4;
5008 }
5009 pos += 4;
5010 }
5011
5012 closedir (dir);
5013 return written;
5014 }
5015
5016 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5017 object type, using the /proc file system. */
5018 static int
5019 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5020 unsigned const char *writebuf,
5021 CORE_ADDR offset, int len)
5022 {
5023 long pid = lwpid_of (get_thread_lwp (current_inferior));
5024 char buf[128];
5025 int fd = 0;
5026 int ret = 0;
5027
5028 if (!writebuf && !readbuf)
5029 return -1;
5030
5031 if (!*annex)
5032 {
5033 if (!readbuf)
5034 return -1;
5035 else
5036 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5037 }
5038
5039 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5040 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5041 if (fd <= 0)
5042 return -1;
5043
5044 if (offset != 0
5045 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5046 {
5047 close (fd);
5048 return 0;
5049 }
5050
5051 if (writebuf)
5052 ret = write (fd, writebuf, (size_t) len);
5053 else
5054 ret = read (fd, readbuf, (size_t) len);
5055
5056 close (fd);
5057 return ret;
5058 }
5059
5060 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5061 struct target_loadseg
5062 {
5063 /* Core address to which the segment is mapped. */
5064 Elf32_Addr addr;
5065 /* VMA recorded in the program header. */
5066 Elf32_Addr p_vaddr;
5067 /* Size of this segment in memory. */
5068 Elf32_Word p_memsz;
5069 };
5070
5071 # if defined PT_GETDSBT
5072 struct target_loadmap
5073 {
5074 /* Protocol version number, must be zero. */
5075 Elf32_Word version;
5076 /* Pointer to the DSBT table, its size, and the DSBT index. */
5077 unsigned *dsbt_table;
5078 unsigned dsbt_size, dsbt_index;
5079 /* Number of segments in this map. */
5080 Elf32_Word nsegs;
5081 /* The actual memory map. */
5082 struct target_loadseg segs[/*nsegs*/];
5083 };
5084 # define LINUX_LOADMAP PT_GETDSBT
5085 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5086 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5087 # else
5088 struct target_loadmap
5089 {
5090 /* Protocol version number, must be zero. */
5091 Elf32_Half version;
5092 /* Number of segments in this map. */
5093 Elf32_Half nsegs;
5094 /* The actual memory map. */
5095 struct target_loadseg segs[/*nsegs*/];
5096 };
5097 # define LINUX_LOADMAP PTRACE_GETFDPIC
5098 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5099 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5100 # endif
5101
5102 static int
5103 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5104 unsigned char *myaddr, unsigned int len)
5105 {
5106 int pid = lwpid_of (get_thread_lwp (current_inferior));
5107 int addr = -1;
5108 struct target_loadmap *data = NULL;
5109 unsigned int actual_length, copy_length;
5110
5111 if (strcmp (annex, "exec") == 0)
5112 addr = (int) LINUX_LOADMAP_EXEC;
5113 else if (strcmp (annex, "interp") == 0)
5114 addr = (int) LINUX_LOADMAP_INTERP;
5115 else
5116 return -1;
5117
5118 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5119 return -1;
5120
5121 if (data == NULL)
5122 return -1;
5123
5124 actual_length = sizeof (struct target_loadmap)
5125 + sizeof (struct target_loadseg) * data->nsegs;
5126
5127 if (offset < 0 || offset > actual_length)
5128 return -1;
5129
5130 copy_length = actual_length - offset < len ? actual_length - offset : len;
5131 memcpy (myaddr, (char *) data + offset, copy_length);
5132 return copy_length;
5133 }
5134 #else
5135 # define linux_read_loadmap NULL
5136 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5137
5138 static void
5139 linux_process_qsupported (const char *query)
5140 {
5141 if (the_low_target.process_qsupported != NULL)
5142 the_low_target.process_qsupported (query);
5143 }
5144
5145 static int
5146 linux_supports_tracepoints (void)
5147 {
5148 if (*the_low_target.supports_tracepoints == NULL)
5149 return 0;
5150
5151 return (*the_low_target.supports_tracepoints) ();
5152 }
5153
5154 static CORE_ADDR
5155 linux_read_pc (struct regcache *regcache)
5156 {
5157 if (the_low_target.get_pc == NULL)
5158 return 0;
5159
5160 return (*the_low_target.get_pc) (regcache);
5161 }
5162
5163 static void
5164 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5165 {
5166 gdb_assert (the_low_target.set_pc != NULL);
5167
5168 (*the_low_target.set_pc) (regcache, pc);
5169 }
5170
5171 static int
5172 linux_thread_stopped (struct thread_info *thread)
5173 {
5174 return get_thread_lwp (thread)->stopped;
5175 }
5176
5177 /* This exposes stop-all-threads functionality to other modules. */
5178
5179 static void
5180 linux_pause_all (int freeze)
5181 {
5182 stop_all_lwps (freeze, NULL);
5183 }
5184
5185 /* This exposes unstop-all-threads functionality to other gdbserver
5186 modules. */
5187
5188 static void
5189 linux_unpause_all (int unfreeze)
5190 {
5191 unstop_all_lwps (unfreeze, NULL);
5192 }
5193
5194 static int
5195 linux_prepare_to_access_memory (void)
5196 {
5197 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5198 running LWP. */
5199 if (non_stop)
5200 linux_pause_all (1);
5201 return 0;
5202 }
5203
5204 static void
5205 linux_done_accessing_memory (void)
5206 {
5207 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5208 running LWP. */
5209 if (non_stop)
5210 linux_unpause_all (1);
5211 }
5212
5213 static int
5214 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5215 CORE_ADDR collector,
5216 CORE_ADDR lockaddr,
5217 ULONGEST orig_size,
5218 CORE_ADDR *jump_entry,
5219 CORE_ADDR *trampoline,
5220 ULONGEST *trampoline_size,
5221 unsigned char *jjump_pad_insn,
5222 ULONGEST *jjump_pad_insn_size,
5223 CORE_ADDR *adjusted_insn_addr,
5224 CORE_ADDR *adjusted_insn_addr_end,
5225 char *err)
5226 {
5227 return (*the_low_target.install_fast_tracepoint_jump_pad)
5228 (tpoint, tpaddr, collector, lockaddr, orig_size,
5229 jump_entry, trampoline, trampoline_size,
5230 jjump_pad_insn, jjump_pad_insn_size,
5231 adjusted_insn_addr, adjusted_insn_addr_end,
5232 err);
5233 }
5234
5235 static struct emit_ops *
5236 linux_emit_ops (void)
5237 {
5238 if (the_low_target.emit_ops != NULL)
5239 return (*the_low_target.emit_ops) ();
5240 else
5241 return NULL;
5242 }
5243
5244 static int
5245 linux_get_min_fast_tracepoint_insn_len (void)
5246 {
5247 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5248 }
5249
5250 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5251
5252 static int
5253 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5254 CORE_ADDR *phdr_memaddr, int *num_phdr)
5255 {
5256 char filename[PATH_MAX];
5257 int fd;
5258 const int auxv_size = is_elf64
5259 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5260 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5261
5262 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5263
5264 fd = open (filename, O_RDONLY);
5265 if (fd < 0)
5266 return 1;
5267
5268 *phdr_memaddr = 0;
5269 *num_phdr = 0;
5270 while (read (fd, buf, auxv_size) == auxv_size
5271 && (*phdr_memaddr == 0 || *num_phdr == 0))
5272 {
5273 if (is_elf64)
5274 {
5275 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5276
5277 switch (aux->a_type)
5278 {
5279 case AT_PHDR:
5280 *phdr_memaddr = aux->a_un.a_val;
5281 break;
5282 case AT_PHNUM:
5283 *num_phdr = aux->a_un.a_val;
5284 break;
5285 }
5286 }
5287 else
5288 {
5289 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5290
5291 switch (aux->a_type)
5292 {
5293 case AT_PHDR:
5294 *phdr_memaddr = aux->a_un.a_val;
5295 break;
5296 case AT_PHNUM:
5297 *num_phdr = aux->a_un.a_val;
5298 break;
5299 }
5300 }
5301 }
5302
5303 close (fd);
5304
5305 if (*phdr_memaddr == 0 || *num_phdr == 0)
5306 {
5307 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5308 "phdr_memaddr = %ld, phdr_num = %d",
5309 (long) *phdr_memaddr, *num_phdr);
5310 return 2;
5311 }
5312
5313 return 0;
5314 }
5315
5316 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5317
5318 static CORE_ADDR
5319 get_dynamic (const int pid, const int is_elf64)
5320 {
5321 CORE_ADDR phdr_memaddr, relocation;
5322 int num_phdr, i;
5323 unsigned char *phdr_buf;
5324 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5325
5326 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5327 return 0;
5328
5329 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5330 phdr_buf = alloca (num_phdr * phdr_size);
5331
5332 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5333 return 0;
5334
5335 /* Compute relocation: it is expected to be 0 for "regular" executables,
5336 non-zero for PIE ones. */
5337 relocation = -1;
5338 for (i = 0; relocation == -1 && i < num_phdr; i++)
5339 if (is_elf64)
5340 {
5341 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5342
5343 if (p->p_type == PT_PHDR)
5344 relocation = phdr_memaddr - p->p_vaddr;
5345 }
5346 else
5347 {
5348 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5349
5350 if (p->p_type == PT_PHDR)
5351 relocation = phdr_memaddr - p->p_vaddr;
5352 }
5353
5354 if (relocation == -1)
5355 {
5356 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5357 any real world executables, including PIE executables, have always
5358 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5359 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5360 or present DT_DEBUG anyway (fpc binaries are statically linked).
5361
5362 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5363
5364 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5365
5366 return 0;
5367 }
5368
5369 for (i = 0; i < num_phdr; i++)
5370 {
5371 if (is_elf64)
5372 {
5373 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5374
5375 if (p->p_type == PT_DYNAMIC)
5376 return p->p_vaddr + relocation;
5377 }
5378 else
5379 {
5380 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5381
5382 if (p->p_type == PT_DYNAMIC)
5383 return p->p_vaddr + relocation;
5384 }
5385 }
5386
5387 return 0;
5388 }
5389
5390 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5391 can be 0 if the inferior does not yet have the library list initialized.
5392 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5393 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5394
5395 static CORE_ADDR
5396 get_r_debug (const int pid, const int is_elf64)
5397 {
5398 CORE_ADDR dynamic_memaddr;
5399 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5400 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5401 CORE_ADDR map = -1;
5402
5403 dynamic_memaddr = get_dynamic (pid, is_elf64);
5404 if (dynamic_memaddr == 0)
5405 return map;
5406
5407 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5408 {
5409 if (is_elf64)
5410 {
5411 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5412 #ifdef DT_MIPS_RLD_MAP
5413 union
5414 {
5415 Elf64_Xword map;
5416 unsigned char buf[sizeof (Elf64_Xword)];
5417 }
5418 rld_map;
5419
5420 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5421 {
5422 if (linux_read_memory (dyn->d_un.d_val,
5423 rld_map.buf, sizeof (rld_map.buf)) == 0)
5424 return rld_map.map;
5425 else
5426 break;
5427 }
5428 #endif /* DT_MIPS_RLD_MAP */
5429
5430 if (dyn->d_tag == DT_DEBUG && map == -1)
5431 map = dyn->d_un.d_val;
5432
5433 if (dyn->d_tag == DT_NULL)
5434 break;
5435 }
5436 else
5437 {
5438 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5439 #ifdef DT_MIPS_RLD_MAP
5440 union
5441 {
5442 Elf32_Word map;
5443 unsigned char buf[sizeof (Elf32_Word)];
5444 }
5445 rld_map;
5446
5447 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5448 {
5449 if (linux_read_memory (dyn->d_un.d_val,
5450 rld_map.buf, sizeof (rld_map.buf)) == 0)
5451 return rld_map.map;
5452 else
5453 break;
5454 }
5455 #endif /* DT_MIPS_RLD_MAP */
5456
5457 if (dyn->d_tag == DT_DEBUG && map == -1)
5458 map = dyn->d_un.d_val;
5459
5460 if (dyn->d_tag == DT_NULL)
5461 break;
5462 }
5463
5464 dynamic_memaddr += dyn_size;
5465 }
5466
5467 return map;
5468 }
5469
5470 /* Read one pointer from MEMADDR in the inferior. */
5471
5472 static int
5473 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5474 {
5475 int ret;
5476
5477 /* Go through a union so this works on either big or little endian
5478 hosts, when the inferior's pointer size is smaller than the size
5479 of CORE_ADDR. It is assumed the inferior's endianness is the
5480 same of the superior's. */
5481 union
5482 {
5483 CORE_ADDR core_addr;
5484 unsigned int ui;
5485 unsigned char uc;
5486 } addr;
5487
5488 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5489 if (ret == 0)
5490 {
5491 if (ptr_size == sizeof (CORE_ADDR))
5492 *ptr = addr.core_addr;
5493 else if (ptr_size == sizeof (unsigned int))
5494 *ptr = addr.ui;
5495 else
5496 gdb_assert_not_reached ("unhandled pointer size");
5497 }
5498 return ret;
5499 }
5500
5501 struct link_map_offsets
5502 {
5503 /* Offset and size of r_debug.r_version. */
5504 int r_version_offset;
5505
5506 /* Offset and size of r_debug.r_map. */
5507 int r_map_offset;
5508
5509 /* Offset to l_addr field in struct link_map. */
5510 int l_addr_offset;
5511
5512 /* Offset to l_name field in struct link_map. */
5513 int l_name_offset;
5514
5515 /* Offset to l_ld field in struct link_map. */
5516 int l_ld_offset;
5517
5518 /* Offset to l_next field in struct link_map. */
5519 int l_next_offset;
5520
5521 /* Offset to l_prev field in struct link_map. */
5522 int l_prev_offset;
5523 };
5524
5525 /* Construct qXfer:libraries-svr4:read reply. */
5526
5527 static int
5528 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5529 unsigned const char *writebuf,
5530 CORE_ADDR offset, int len)
5531 {
5532 char *document;
5533 unsigned document_len;
5534 struct process_info_private *const priv = current_process ()->private;
5535 char filename[PATH_MAX];
5536 int pid, is_elf64;
5537
5538 static const struct link_map_offsets lmo_32bit_offsets =
5539 {
5540 0, /* r_version offset. */
5541 4, /* r_debug.r_map offset. */
5542 0, /* l_addr offset in link_map. */
5543 4, /* l_name offset in link_map. */
5544 8, /* l_ld offset in link_map. */
5545 12, /* l_next offset in link_map. */
5546 16 /* l_prev offset in link_map. */
5547 };
5548
5549 static const struct link_map_offsets lmo_64bit_offsets =
5550 {
5551 0, /* r_version offset. */
5552 8, /* r_debug.r_map offset. */
5553 0, /* l_addr offset in link_map. */
5554 8, /* l_name offset in link_map. */
5555 16, /* l_ld offset in link_map. */
5556 24, /* l_next offset in link_map. */
5557 32 /* l_prev offset in link_map. */
5558 };
5559 const struct link_map_offsets *lmo;
5560 unsigned int machine;
5561 int ptr_size;
5562 CORE_ADDR lm_addr = 0, lm_prev = 0;
5563 int allocated = 1024;
5564 char *p;
5565 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5566 int header_done = 0;
5567
5568 if (writebuf != NULL)
5569 return -2;
5570 if (readbuf == NULL)
5571 return -1;
5572
5573 pid = lwpid_of (get_thread_lwp (current_inferior));
5574 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5575 is_elf64 = elf_64_file_p (filename, &machine);
5576 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5577 ptr_size = is_elf64 ? 8 : 4;
5578
5579 while (annex[0] != '\0')
5580 {
5581 const char *sep;
5582 CORE_ADDR *addrp;
5583 int len;
5584
5585 sep = strchr (annex, '=');
5586 if (sep == NULL)
5587 break;
5588
5589 len = sep - annex;
5590 if (len == 5 && strncmp (annex, "start", 5) == 0)
5591 addrp = &lm_addr;
5592 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5593 addrp = &lm_prev;
5594 else
5595 {
5596 annex = strchr (sep, ';');
5597 if (annex == NULL)
5598 break;
5599 annex++;
5600 continue;
5601 }
5602
5603 annex = decode_address_to_semicolon (addrp, sep + 1);
5604 }
5605
5606 if (lm_addr == 0)
5607 {
5608 int r_version = 0;
5609
5610 if (priv->r_debug == 0)
5611 priv->r_debug = get_r_debug (pid, is_elf64);
5612
5613 /* We failed to find DT_DEBUG. Such situation will not change
5614 for this inferior - do not retry it. Report it to GDB as
5615 E01, see for the reasons at the GDB solib-svr4.c side. */
5616 if (priv->r_debug == (CORE_ADDR) -1)
5617 return -1;
5618
5619 if (priv->r_debug != 0)
5620 {
5621 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5622 (unsigned char *) &r_version,
5623 sizeof (r_version)) != 0
5624 || r_version != 1)
5625 {
5626 warning ("unexpected r_debug version %d", r_version);
5627 }
5628 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5629 &lm_addr, ptr_size) != 0)
5630 {
5631 warning ("unable to read r_map from 0x%lx",
5632 (long) priv->r_debug + lmo->r_map_offset);
5633 }
5634 }
5635 }
5636
5637 document = xmalloc (allocated);
5638 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5639 p = document + strlen (document);
5640
5641 while (lm_addr
5642 && read_one_ptr (lm_addr + lmo->l_name_offset,
5643 &l_name, ptr_size) == 0
5644 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5645 &l_addr, ptr_size) == 0
5646 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5647 &l_ld, ptr_size) == 0
5648 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5649 &l_prev, ptr_size) == 0
5650 && read_one_ptr (lm_addr + lmo->l_next_offset,
5651 &l_next, ptr_size) == 0)
5652 {
5653 unsigned char libname[PATH_MAX];
5654
5655 if (lm_prev != l_prev)
5656 {
5657 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5658 (long) lm_prev, (long) l_prev);
5659 break;
5660 }
5661
5662 /* Ignore the first entry even if it has valid name as the first entry
5663 corresponds to the main executable. The first entry should not be
5664 skipped if the dynamic loader was loaded late by a static executable
5665 (see solib-svr4.c parameter ignore_first). But in such case the main
5666 executable does not have PT_DYNAMIC present and this function already
5667 exited above due to failed get_r_debug. */
5668 if (lm_prev == 0)
5669 {
5670 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5671 p = p + strlen (p);
5672 }
5673 else
5674 {
5675 /* Not checking for error because reading may stop before
5676 we've got PATH_MAX worth of characters. */
5677 libname[0] = '\0';
5678 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5679 libname[sizeof (libname) - 1] = '\0';
5680 if (libname[0] != '\0')
5681 {
5682 /* 6x the size for xml_escape_text below. */
5683 size_t len = 6 * strlen ((char *) libname);
5684 char *name;
5685
5686 if (!header_done)
5687 {
5688 /* Terminate `<library-list-svr4'. */
5689 *p++ = '>';
5690 header_done = 1;
5691 }
5692
5693 while (allocated < p - document + len + 200)
5694 {
5695 /* Expand to guarantee sufficient storage. */
5696 uintptr_t document_len = p - document;
5697
5698 document = xrealloc (document, 2 * allocated);
5699 allocated *= 2;
5700 p = document + document_len;
5701 }
5702
5703 name = xml_escape_text ((char *) libname);
5704 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5705 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5706 name, (unsigned long) lm_addr,
5707 (unsigned long) l_addr, (unsigned long) l_ld);
5708 free (name);
5709 }
5710 }
5711
5712 lm_prev = lm_addr;
5713 lm_addr = l_next;
5714 }
5715
5716 if (!header_done)
5717 {
5718 /* Empty list; terminate `<library-list-svr4'. */
5719 strcpy (p, "/>");
5720 }
5721 else
5722 strcpy (p, "</library-list-svr4>");
5723
5724 document_len = strlen (document);
5725 if (offset < document_len)
5726 document_len -= offset;
5727 else
5728 document_len = 0;
5729 if (len > document_len)
5730 len = document_len;
5731
5732 memcpy (readbuf, document + offset, len);
5733 xfree (document);
5734
5735 return len;
5736 }
5737
5738 #ifdef HAVE_LINUX_BTRACE
5739
5740 /* See to_enable_btrace target method. */
5741
5742 static struct btrace_target_info *
5743 linux_low_enable_btrace (ptid_t ptid)
5744 {
5745 struct btrace_target_info *tinfo;
5746
5747 tinfo = linux_enable_btrace (ptid);
5748
5749 if (tinfo != NULL)
5750 {
5751 struct thread_info *thread = find_thread_ptid (ptid);
5752 struct regcache *regcache = get_thread_regcache (thread, 0);
5753
5754 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5755 }
5756
5757 return tinfo;
5758 }
5759
5760 /* See to_disable_btrace target method. */
5761
5762 static int
5763 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5764 {
5765 enum btrace_error err;
5766
5767 err = linux_disable_btrace (tinfo);
5768 return (err == BTRACE_ERR_NONE ? 0 : -1);
5769 }
5770
5771 /* See to_read_btrace target method. */
5772
5773 static int
5774 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5775 int type)
5776 {
5777 VEC (btrace_block_s) *btrace;
5778 struct btrace_block *block;
5779 enum btrace_error err;
5780 int i;
5781
5782 btrace = NULL;
5783 err = linux_read_btrace (&btrace, tinfo, type);
5784 if (err != BTRACE_ERR_NONE)
5785 {
5786 if (err == BTRACE_ERR_OVERFLOW)
5787 buffer_grow_str0 (buffer, "E.Overflow.");
5788 else
5789 buffer_grow_str0 (buffer, "E.Generic Error.");
5790
5791 return -1;
5792 }
5793
5794 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5795 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5796
5797 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
5798 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5799 paddress (block->begin), paddress (block->end));
5800
5801 buffer_grow_str0 (buffer, "</btrace>\n");
5802
5803 VEC_free (btrace_block_s, btrace);
5804
5805 return 0;
5806 }
5807 #endif /* HAVE_LINUX_BTRACE */
5808
5809 static struct target_ops linux_target_ops = {
5810 linux_create_inferior,
5811 linux_attach,
5812 linux_kill,
5813 linux_detach,
5814 linux_mourn,
5815 linux_join,
5816 linux_thread_alive,
5817 linux_resume,
5818 linux_wait,
5819 linux_fetch_registers,
5820 linux_store_registers,
5821 linux_prepare_to_access_memory,
5822 linux_done_accessing_memory,
5823 linux_read_memory,
5824 linux_write_memory,
5825 linux_look_up_symbols,
5826 linux_request_interrupt,
5827 linux_read_auxv,
5828 linux_insert_point,
5829 linux_remove_point,
5830 linux_stopped_by_watchpoint,
5831 linux_stopped_data_address,
5832 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5833 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5834 && defined(PT_TEXT_END_ADDR)
5835 linux_read_offsets,
5836 #else
5837 NULL,
5838 #endif
5839 #ifdef USE_THREAD_DB
5840 thread_db_get_tls_address,
5841 #else
5842 NULL,
5843 #endif
5844 linux_qxfer_spu,
5845 hostio_last_error_from_errno,
5846 linux_qxfer_osdata,
5847 linux_xfer_siginfo,
5848 linux_supports_non_stop,
5849 linux_async,
5850 linux_start_non_stop,
5851 linux_supports_multi_process,
5852 #ifdef USE_THREAD_DB
5853 thread_db_handle_monitor_command,
5854 #else
5855 NULL,
5856 #endif
5857 linux_common_core_of_thread,
5858 linux_read_loadmap,
5859 linux_process_qsupported,
5860 linux_supports_tracepoints,
5861 linux_read_pc,
5862 linux_write_pc,
5863 linux_thread_stopped,
5864 NULL,
5865 linux_pause_all,
5866 linux_unpause_all,
5867 linux_cancel_breakpoints,
5868 linux_stabilize_threads,
5869 linux_install_fast_tracepoint_jump_pad,
5870 linux_emit_ops,
5871 linux_supports_disable_randomization,
5872 linux_get_min_fast_tracepoint_insn_len,
5873 linux_qxfer_libraries_svr4,
5874 linux_supports_agent,
5875 #ifdef HAVE_LINUX_BTRACE
5876 linux_supports_btrace,
5877 linux_low_enable_btrace,
5878 linux_low_disable_btrace,
5879 linux_low_read_btrace,
5880 #else
5881 NULL,
5882 NULL,
5883 NULL,
5884 NULL,
5885 #endif
5886 linux_supports_range_stepping,
5887 };
5888
5889 static void
5890 linux_init_signals ()
5891 {
5892 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5893 to find what the cancel signal actually is. */
5894 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5895 signal (__SIGRTMIN+1, SIG_IGN);
5896 #endif
5897 }
5898
5899 #ifdef HAVE_LINUX_REGSETS
5900 void
5901 initialize_regsets_info (struct regsets_info *info)
5902 {
5903 for (info->num_regsets = 0;
5904 info->regsets[info->num_regsets].size >= 0;
5905 info->num_regsets++)
5906 ;
5907 }
5908 #endif
5909
5910 void
5911 initialize_low (void)
5912 {
5913 struct sigaction sigchld_action;
5914 memset (&sigchld_action, 0, sizeof (sigchld_action));
5915 set_target_ops (&linux_target_ops);
5916 set_breakpoint_data (the_low_target.breakpoint,
5917 the_low_target.breakpoint_len);
5918 linux_init_signals ();
5919 linux_ptrace_init_warnings ();
5920
5921 sigchld_action.sa_handler = sigchld_handler;
5922 sigemptyset (&sigchld_action.sa_mask);
5923 sigchld_action.sa_flags = SA_RESTART;
5924 sigaction (SIGCHLD, &sigchld_action, NULL);
5925
5926 initialize_low_arch ();
5927 }
This page took 0.23089 seconds and 4 git commands to generate.