b280c174a971a5989e3fcade4d4539962c418deb
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24
25 #include "nat/linux-nat.h"
26 #include "nat/linux-waitpid.h"
27 #include "gdb_wait.h"
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include "nat/linux-personality.h"
32 #include <signal.h>
33 #include <sys/ioctl.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #include "filestuff.h"
46 #include "tracepoint.h"
47 #include "hostio.h"
48 #ifndef ELFMAG0
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53 #include <elf.h>
54 #endif
55
56 #ifndef SPUFS_MAGIC
57 #define SPUFS_MAGIC 0x23c9b64e
58 #endif
59
60 #ifdef HAVE_PERSONALITY
61 # include <sys/personality.h>
62 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
63 # define ADDR_NO_RANDOMIZE 0x0040000
64 # endif
65 #endif
66
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
70
71 #ifndef W_STOPCODE
72 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
73 #endif
74
75 /* This is the kernel's hard limit. Not to be confused with
76 SIGRTMIN. */
77 #ifndef __SIGRTMIN
78 #define __SIGRTMIN 32
79 #endif
80
81 /* Some targets did not define these ptrace constants from the start,
82 so gdbserver defines them locally here. In the future, these may
83 be removed after they are added to asm/ptrace.h. */
84 #if !(defined(PT_TEXT_ADDR) \
85 || defined(PT_DATA_ADDR) \
86 || defined(PT_TEXT_END_ADDR))
87 #if defined(__mcoldfire__)
88 /* These are still undefined in 3.10 kernels. */
89 #define PT_TEXT_ADDR 49*4
90 #define PT_DATA_ADDR 50*4
91 #define PT_TEXT_END_ADDR 51*4
92 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #elif defined(BFIN)
94 #define PT_TEXT_ADDR 220
95 #define PT_TEXT_END_ADDR 224
96 #define PT_DATA_ADDR 228
97 /* These are still undefined in 3.10 kernels. */
98 #elif defined(__TMS320C6X__)
99 #define PT_TEXT_ADDR (0x10000*4)
100 #define PT_DATA_ADDR (0x10004*4)
101 #define PT_TEXT_END_ADDR (0x10008*4)
102 #endif
103 #endif
104
105 #ifdef HAVE_LINUX_BTRACE
106 # include "nat/linux-btrace.h"
107 # include "btrace-common.h"
108 #endif
109
110 #ifndef HAVE_ELF32_AUXV_T
111 /* Copied from glibc's elf.h. */
112 typedef struct
113 {
114 uint32_t a_type; /* Entry type */
115 union
116 {
117 uint32_t a_val; /* Integer value */
118 /* We use to have pointer elements added here. We cannot do that,
119 though, since it does not work when using 32-bit definitions
120 on 64-bit platforms and vice versa. */
121 } a_un;
122 } Elf32_auxv_t;
123 #endif
124
125 #ifndef HAVE_ELF64_AUXV_T
126 /* Copied from glibc's elf.h. */
127 typedef struct
128 {
129 uint64_t a_type; /* Entry type */
130 union
131 {
132 uint64_t a_val; /* Integer value */
133 /* We use to have pointer elements added here. We cannot do that,
134 though, since it does not work when using 32-bit definitions
135 on 64-bit platforms and vice versa. */
136 } a_un;
137 } Elf64_auxv_t;
138 #endif
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* A list of all unknown processes which receive stop signals. Some
184 other process will presumably claim each of these as forked
185 children momentarily. */
186
187 struct simple_pid_list
188 {
189 /* The process ID. */
190 int pid;
191
192 /* The status as reported by waitpid. */
193 int status;
194
195 /* Next in chain. */
196 struct simple_pid_list *next;
197 };
198 struct simple_pid_list *stopped_pids;
199
200 /* Trivial list manipulation functions to keep track of a list of new
201 stopped processes. */
202
203 static void
204 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
205 {
206 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
207
208 new_pid->pid = pid;
209 new_pid->status = status;
210 new_pid->next = *listp;
211 *listp = new_pid;
212 }
213
214 static int
215 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
216 {
217 struct simple_pid_list **p;
218
219 for (p = listp; *p != NULL; p = &(*p)->next)
220 if ((*p)->pid == pid)
221 {
222 struct simple_pid_list *next = (*p)->next;
223
224 *statusp = (*p)->status;
225 xfree (*p);
226 *p = next;
227 return 1;
228 }
229 return 0;
230 }
231
232 enum stopping_threads_kind
233 {
234 /* Not stopping threads presently. */
235 NOT_STOPPING_THREADS,
236
237 /* Stopping threads. */
238 STOPPING_THREADS,
239
240 /* Stopping and suspending threads. */
241 STOPPING_AND_SUSPENDING_THREADS
242 };
243
244 /* This is set while stop_all_lwps is in effect. */
245 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
246
247 /* FIXME make into a target method? */
248 int using_threads = 1;
249
250 /* True if we're presently stabilizing threads (moving them out of
251 jump pads). */
252 static int stabilizing_threads;
253
254 static void linux_resume_one_lwp (struct lwp_info *lwp,
255 int step, int signal, siginfo_t *info);
256 static void linux_resume (struct thread_resume *resume_info, size_t n);
257 static void stop_all_lwps (int suspend, struct lwp_info *except);
258 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
259 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
260 int *wstat, int options);
261 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
262 static struct lwp_info *add_lwp (ptid_t ptid);
263 static int linux_stopped_by_watchpoint (void);
264 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
265 static void proceed_all_lwps (void);
266 static int finish_step_over (struct lwp_info *lwp);
267 static int kill_lwp (unsigned long lwpid, int signo);
268
269 /* When the event-loop is doing a step-over, this points at the thread
270 being stepped. */
271 ptid_t step_over_bkpt;
272
273 /* True if the low target can hardware single-step. Such targets
274 don't need a BREAKPOINT_REINSERT_ADDR callback. */
275
276 static int
277 can_hardware_single_step (void)
278 {
279 return (the_low_target.breakpoint_reinsert_addr == NULL);
280 }
281
282 /* True if the low target supports memory breakpoints. If so, we'll
283 have a GET_PC implementation. */
284
285 static int
286 supports_breakpoints (void)
287 {
288 return (the_low_target.get_pc != NULL);
289 }
290
291 /* Returns true if this target can support fast tracepoints. This
292 does not mean that the in-process agent has been loaded in the
293 inferior. */
294
295 static int
296 supports_fast_tracepoints (void)
297 {
298 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 struct pending_signals
312 {
313 int signal;
314 siginfo_t info;
315 struct pending_signals *prev;
316 };
317
318 /* The read/write ends of the pipe registered as waitable file in the
319 event loop. */
320 static int linux_event_pipe[2] = { -1, -1 };
321
322 /* True if we're currently in async mode. */
323 #define target_is_async_p() (linux_event_pipe[0] != -1)
324
325 static void send_sigstop (struct lwp_info *lwp);
326 static void wait_for_sigstop (void);
327
328 /* Return non-zero if HEADER is a 64-bit ELF file. */
329
330 static int
331 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
332 {
333 if (header->e_ident[EI_MAG0] == ELFMAG0
334 && header->e_ident[EI_MAG1] == ELFMAG1
335 && header->e_ident[EI_MAG2] == ELFMAG2
336 && header->e_ident[EI_MAG3] == ELFMAG3)
337 {
338 *machine = header->e_machine;
339 return header->e_ident[EI_CLASS] == ELFCLASS64;
340
341 }
342 *machine = EM_NONE;
343 return -1;
344 }
345
346 /* Return non-zero if FILE is a 64-bit ELF file,
347 zero if the file is not a 64-bit ELF file,
348 and -1 if the file is not accessible or doesn't exist. */
349
350 static int
351 elf_64_file_p (const char *file, unsigned int *machine)
352 {
353 Elf64_Ehdr header;
354 int fd;
355
356 fd = open (file, O_RDONLY);
357 if (fd < 0)
358 return -1;
359
360 if (read (fd, &header, sizeof (header)) != sizeof (header))
361 {
362 close (fd);
363 return 0;
364 }
365 close (fd);
366
367 return elf_64_header_p (&header, machine);
368 }
369
370 /* Accepts an integer PID; Returns true if the executable PID is
371 running is a 64-bit ELF file.. */
372
373 int
374 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
375 {
376 char file[PATH_MAX];
377
378 sprintf (file, "/proc/%d/exe", pid);
379 return elf_64_file_p (file, machine);
380 }
381
382 static void
383 delete_lwp (struct lwp_info *lwp)
384 {
385 struct thread_info *thr = get_lwp_thread (lwp);
386
387 if (debug_threads)
388 debug_printf ("deleting %ld\n", lwpid_of (thr));
389
390 remove_thread (thr);
391 free (lwp->arch_private);
392 free (lwp);
393 }
394
395 /* Add a process to the common process list, and set its private
396 data. */
397
398 static struct process_info *
399 linux_add_process (int pid, int attached)
400 {
401 struct process_info *proc;
402
403 proc = add_process (pid, attached);
404 proc->priv = xcalloc (1, sizeof (*proc->priv));
405
406 /* Set the arch when the first LWP stops. */
407 proc->priv->new_inferior = 1;
408
409 if (the_low_target.new_process != NULL)
410 proc->priv->arch_private = the_low_target.new_process ();
411
412 return proc;
413 }
414
415 static CORE_ADDR get_pc (struct lwp_info *lwp);
416
417 /* Handle a GNU/Linux extended wait response. If we see a clone
418 event, we need to add the new LWP to our list (and return 0 so as
419 not to report the trap to higher layers). */
420
421 static int
422 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
423 {
424 int event = linux_ptrace_get_extended_event (wstat);
425 struct thread_info *event_thr = get_lwp_thread (event_lwp);
426 struct lwp_info *new_lwp;
427
428 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_CLONE))
429 {
430 ptid_t ptid;
431 unsigned long new_pid;
432 int ret, status;
433
434 /* Get the pid of the new lwp. */
435 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
436 &new_pid);
437
438 /* If we haven't already seen the new PID stop, wait for it now. */
439 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
440 {
441 /* The new child has a pending SIGSTOP. We can't affect it until it
442 hits the SIGSTOP, but we're already attached. */
443
444 ret = my_waitpid (new_pid, &status, __WALL);
445
446 if (ret == -1)
447 perror_with_name ("waiting for new child");
448 else if (ret != new_pid)
449 warning ("wait returned unexpected PID %d", ret);
450 else if (!WIFSTOPPED (status))
451 warning ("wait returned unexpected status 0x%x", status);
452 }
453
454 if (event == PTRACE_EVENT_FORK)
455 {
456 struct process_info *parent_proc;
457 struct process_info *child_proc;
458 struct lwp_info *child_lwp;
459 struct target_desc *tdesc;
460
461 ptid = ptid_build (new_pid, new_pid, 0);
462
463 if (debug_threads)
464 {
465 debug_printf ("HEW: Got fork event from LWP %ld, "
466 "new child is %d\n",
467 ptid_get_lwp (ptid_of (event_thr)),
468 ptid_get_pid (ptid));
469 }
470
471 /* Add the new process to the tables and clone the breakpoint
472 lists of the parent. We need to do this even if the new process
473 will be detached, since we will need the process object and the
474 breakpoints to remove any breakpoints from memory when we
475 detach, and the client side will access registers. */
476 child_proc = linux_add_process (new_pid, 0);
477 gdb_assert (child_proc != NULL);
478 child_lwp = add_lwp (ptid);
479 gdb_assert (child_lwp != NULL);
480 child_lwp->stopped = 1;
481 parent_proc = get_thread_process (event_thr);
482 child_proc->attached = parent_proc->attached;
483 clone_all_breakpoints (&child_proc->breakpoints,
484 &child_proc->raw_breakpoints,
485 parent_proc->breakpoints);
486
487 tdesc = xmalloc (sizeof (struct target_desc));
488 copy_target_description (tdesc, parent_proc->tdesc);
489 child_proc->tdesc = tdesc;
490 child_lwp->must_set_ptrace_flags = 1;
491
492 /* Save fork info in the parent thread. */
493 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
494 event_lwp->waitstatus.value.related_pid = ptid;
495 /* The status_pending field contains bits denoting the
496 extended event, so when the pending event is handled,
497 the handler will look at lwp->waitstatus. */
498 event_lwp->status_pending_p = 1;
499 event_lwp->status_pending = wstat;
500
501 /* Report the event. */
502 return 0;
503 }
504
505 if (debug_threads)
506 debug_printf ("HEW: Got clone event "
507 "from LWP %ld, new child is LWP %ld\n",
508 lwpid_of (event_thr), new_pid);
509
510 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
511 new_lwp = add_lwp (ptid);
512
513 /* Either we're going to immediately resume the new thread
514 or leave it stopped. linux_resume_one_lwp is a nop if it
515 thinks the thread is currently running, so set this first
516 before calling linux_resume_one_lwp. */
517 new_lwp->stopped = 1;
518
519 /* If we're suspending all threads, leave this one suspended
520 too. */
521 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
522 new_lwp->suspended = 1;
523
524 /* Normally we will get the pending SIGSTOP. But in some cases
525 we might get another signal delivered to the group first.
526 If we do get another signal, be sure not to lose it. */
527 if (WSTOPSIG (status) != SIGSTOP)
528 {
529 new_lwp->stop_expected = 1;
530 new_lwp->status_pending_p = 1;
531 new_lwp->status_pending = status;
532 }
533
534 /* Don't report the event. */
535 return 1;
536 }
537
538 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
539 }
540
541 /* Return the PC as read from the regcache of LWP, without any
542 adjustment. */
543
544 static CORE_ADDR
545 get_pc (struct lwp_info *lwp)
546 {
547 struct thread_info *saved_thread;
548 struct regcache *regcache;
549 CORE_ADDR pc;
550
551 if (the_low_target.get_pc == NULL)
552 return 0;
553
554 saved_thread = current_thread;
555 current_thread = get_lwp_thread (lwp);
556
557 regcache = get_thread_regcache (current_thread, 1);
558 pc = (*the_low_target.get_pc) (regcache);
559
560 if (debug_threads)
561 debug_printf ("pc is 0x%lx\n", (long) pc);
562
563 current_thread = saved_thread;
564 return pc;
565 }
566
567 /* This function should only be called if LWP got a SIGTRAP.
568 The SIGTRAP could mean several things.
569
570 On i386, where decr_pc_after_break is non-zero:
571
572 If we were single-stepping this process using PTRACE_SINGLESTEP, we
573 will get only the one SIGTRAP. The value of $eip will be the next
574 instruction. If the instruction we stepped over was a breakpoint,
575 we need to decrement the PC.
576
577 If we continue the process using PTRACE_CONT, we will get a
578 SIGTRAP when we hit a breakpoint. The value of $eip will be
579 the instruction after the breakpoint (i.e. needs to be
580 decremented). If we report the SIGTRAP to GDB, we must also
581 report the undecremented PC. If the breakpoint is removed, we
582 must resume at the decremented PC.
583
584 On a non-decr_pc_after_break machine with hardware or kernel
585 single-step:
586
587 If we either single-step a breakpoint instruction, or continue and
588 hit a breakpoint instruction, our PC will point at the breakpoint
589 instruction. */
590
591 static int
592 check_stopped_by_breakpoint (struct lwp_info *lwp)
593 {
594 CORE_ADDR pc;
595 CORE_ADDR sw_breakpoint_pc;
596 struct thread_info *saved_thread;
597 #if USE_SIGTRAP_SIGINFO
598 siginfo_t siginfo;
599 #endif
600
601 if (the_low_target.get_pc == NULL)
602 return 0;
603
604 pc = get_pc (lwp);
605 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
606
607 /* breakpoint_at reads from the current thread. */
608 saved_thread = current_thread;
609 current_thread = get_lwp_thread (lwp);
610
611 #if USE_SIGTRAP_SIGINFO
612 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
613 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
614 {
615 if (siginfo.si_signo == SIGTRAP)
616 {
617 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
618 {
619 if (debug_threads)
620 {
621 struct thread_info *thr = get_lwp_thread (lwp);
622
623 debug_printf ("CSBB: %s stopped by software breakpoint\n",
624 target_pid_to_str (ptid_of (thr)));
625 }
626
627 /* Back up the PC if necessary. */
628 if (pc != sw_breakpoint_pc)
629 {
630 struct regcache *regcache
631 = get_thread_regcache (current_thread, 1);
632 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
633 }
634
635 lwp->stop_pc = sw_breakpoint_pc;
636 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
637 current_thread = saved_thread;
638 return 1;
639 }
640 else if (siginfo.si_code == TRAP_HWBKPT)
641 {
642 if (debug_threads)
643 {
644 struct thread_info *thr = get_lwp_thread (lwp);
645
646 debug_printf ("CSBB: %s stopped by hardware "
647 "breakpoint/watchpoint\n",
648 target_pid_to_str (ptid_of (thr)));
649 }
650
651 lwp->stop_pc = pc;
652 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
653 current_thread = saved_thread;
654 return 1;
655 }
656 else if (siginfo.si_code == TRAP_TRACE)
657 {
658 if (debug_threads)
659 {
660 struct thread_info *thr = get_lwp_thread (lwp);
661
662 debug_printf ("CSBB: %s stopped by trace\n",
663 target_pid_to_str (ptid_of (thr)));
664 }
665 }
666 }
667 }
668 #else
669 /* We may have just stepped a breakpoint instruction. E.g., in
670 non-stop mode, GDB first tells the thread A to step a range, and
671 then the user inserts a breakpoint inside the range. In that
672 case we need to report the breakpoint PC. */
673 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
674 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
675 {
676 if (debug_threads)
677 {
678 struct thread_info *thr = get_lwp_thread (lwp);
679
680 debug_printf ("CSBB: %s stopped by software breakpoint\n",
681 target_pid_to_str (ptid_of (thr)));
682 }
683
684 /* Back up the PC if necessary. */
685 if (pc != sw_breakpoint_pc)
686 {
687 struct regcache *regcache
688 = get_thread_regcache (current_thread, 1);
689 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
690 }
691
692 lwp->stop_pc = sw_breakpoint_pc;
693 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
694 current_thread = saved_thread;
695 return 1;
696 }
697
698 if (hardware_breakpoint_inserted_here (pc))
699 {
700 if (debug_threads)
701 {
702 struct thread_info *thr = get_lwp_thread (lwp);
703
704 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
705 target_pid_to_str (ptid_of (thr)));
706 }
707
708 lwp->stop_pc = pc;
709 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
710 current_thread = saved_thread;
711 return 1;
712 }
713 #endif
714
715 current_thread = saved_thread;
716 return 0;
717 }
718
719 static struct lwp_info *
720 add_lwp (ptid_t ptid)
721 {
722 struct lwp_info *lwp;
723
724 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
725 memset (lwp, 0, sizeof (*lwp));
726
727 if (the_low_target.new_thread != NULL)
728 the_low_target.new_thread (lwp);
729
730 lwp->thread = add_thread (ptid, lwp);
731
732 return lwp;
733 }
734
735 /* Start an inferior process and returns its pid.
736 ALLARGS is a vector of program-name and args. */
737
738 static int
739 linux_create_inferior (char *program, char **allargs)
740 {
741 struct lwp_info *new_lwp;
742 int pid;
743 ptid_t ptid;
744 struct cleanup *restore_personality
745 = maybe_disable_address_space_randomization (disable_randomization);
746
747 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
748 pid = vfork ();
749 #else
750 pid = fork ();
751 #endif
752 if (pid < 0)
753 perror_with_name ("fork");
754
755 if (pid == 0)
756 {
757 close_most_fds ();
758 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
759
760 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
761 signal (__SIGRTMIN + 1, SIG_DFL);
762 #endif
763
764 setpgid (0, 0);
765
766 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
767 stdout to stderr so that inferior i/o doesn't corrupt the connection.
768 Also, redirect stdin to /dev/null. */
769 if (remote_connection_is_stdio ())
770 {
771 close (0);
772 open ("/dev/null", O_RDONLY);
773 dup2 (2, 1);
774 if (write (2, "stdin/stdout redirected\n",
775 sizeof ("stdin/stdout redirected\n") - 1) < 0)
776 {
777 /* Errors ignored. */;
778 }
779 }
780
781 execv (program, allargs);
782 if (errno == ENOENT)
783 execvp (program, allargs);
784
785 fprintf (stderr, "Cannot exec %s: %s.\n", program,
786 strerror (errno));
787 fflush (stderr);
788 _exit (0177);
789 }
790
791 do_cleanups (restore_personality);
792
793 linux_add_process (pid, 0);
794
795 ptid = ptid_build (pid, pid, 0);
796 new_lwp = add_lwp (ptid);
797 new_lwp->must_set_ptrace_flags = 1;
798
799 return pid;
800 }
801
802 /* Attach to an inferior process. Returns 0 on success, ERRNO on
803 error. */
804
805 int
806 linux_attach_lwp (ptid_t ptid)
807 {
808 struct lwp_info *new_lwp;
809 int lwpid = ptid_get_lwp (ptid);
810
811 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
812 != 0)
813 return errno;
814
815 new_lwp = add_lwp (ptid);
816
817 /* We need to wait for SIGSTOP before being able to make the next
818 ptrace call on this LWP. */
819 new_lwp->must_set_ptrace_flags = 1;
820
821 if (linux_proc_pid_is_stopped (lwpid))
822 {
823 if (debug_threads)
824 debug_printf ("Attached to a stopped process\n");
825
826 /* The process is definitely stopped. It is in a job control
827 stop, unless the kernel predates the TASK_STOPPED /
828 TASK_TRACED distinction, in which case it might be in a
829 ptrace stop. Make sure it is in a ptrace stop; from there we
830 can kill it, signal it, et cetera.
831
832 First make sure there is a pending SIGSTOP. Since we are
833 already attached, the process can not transition from stopped
834 to running without a PTRACE_CONT; so we know this signal will
835 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
836 probably already in the queue (unless this kernel is old
837 enough to use TASK_STOPPED for ptrace stops); but since
838 SIGSTOP is not an RT signal, it can only be queued once. */
839 kill_lwp (lwpid, SIGSTOP);
840
841 /* Finally, resume the stopped process. This will deliver the
842 SIGSTOP (or a higher priority signal, just like normal
843 PTRACE_ATTACH), which we'll catch later on. */
844 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
845 }
846
847 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
848 brings it to a halt.
849
850 There are several cases to consider here:
851
852 1) gdbserver has already attached to the process and is being notified
853 of a new thread that is being created.
854 In this case we should ignore that SIGSTOP and resume the
855 process. This is handled below by setting stop_expected = 1,
856 and the fact that add_thread sets last_resume_kind ==
857 resume_continue.
858
859 2) This is the first thread (the process thread), and we're attaching
860 to it via attach_inferior.
861 In this case we want the process thread to stop.
862 This is handled by having linux_attach set last_resume_kind ==
863 resume_stop after we return.
864
865 If the pid we are attaching to is also the tgid, we attach to and
866 stop all the existing threads. Otherwise, we attach to pid and
867 ignore any other threads in the same group as this pid.
868
869 3) GDB is connecting to gdbserver and is requesting an enumeration of all
870 existing threads.
871 In this case we want the thread to stop.
872 FIXME: This case is currently not properly handled.
873 We should wait for the SIGSTOP but don't. Things work apparently
874 because enough time passes between when we ptrace (ATTACH) and when
875 gdb makes the next ptrace call on the thread.
876
877 On the other hand, if we are currently trying to stop all threads, we
878 should treat the new thread as if we had sent it a SIGSTOP. This works
879 because we are guaranteed that the add_lwp call above added us to the
880 end of the list, and so the new thread has not yet reached
881 wait_for_sigstop (but will). */
882 new_lwp->stop_expected = 1;
883
884 return 0;
885 }
886
887 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
888 already attached. Returns true if a new LWP is found, false
889 otherwise. */
890
891 static int
892 attach_proc_task_lwp_callback (ptid_t ptid)
893 {
894 /* Is this a new thread? */
895 if (find_thread_ptid (ptid) == NULL)
896 {
897 int lwpid = ptid_get_lwp (ptid);
898 int err;
899
900 if (debug_threads)
901 debug_printf ("Found new lwp %d\n", lwpid);
902
903 err = linux_attach_lwp (ptid);
904
905 /* Be quiet if we simply raced with the thread exiting. EPERM
906 is returned if the thread's task still exists, and is marked
907 as exited or zombie, as well as other conditions, so in that
908 case, confirm the status in /proc/PID/status. */
909 if (err == ESRCH
910 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
911 {
912 if (debug_threads)
913 {
914 debug_printf ("Cannot attach to lwp %d: "
915 "thread is gone (%d: %s)\n",
916 lwpid, err, strerror (err));
917 }
918 }
919 else if (err != 0)
920 {
921 warning (_("Cannot attach to lwp %d: %s"),
922 lwpid,
923 linux_ptrace_attach_fail_reason_string (ptid, err));
924 }
925
926 return 1;
927 }
928 return 0;
929 }
930
931 /* Attach to PID. If PID is the tgid, attach to it and all
932 of its threads. */
933
934 static int
935 linux_attach (unsigned long pid)
936 {
937 ptid_t ptid = ptid_build (pid, pid, 0);
938 int err;
939
940 /* Attach to PID. We will check for other threads
941 soon. */
942 err = linux_attach_lwp (ptid);
943 if (err != 0)
944 error ("Cannot attach to process %ld: %s",
945 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
946
947 linux_add_process (pid, 1);
948
949 if (!non_stop)
950 {
951 struct thread_info *thread;
952
953 /* Don't ignore the initial SIGSTOP if we just attached to this
954 process. It will be collected by wait shortly. */
955 thread = find_thread_ptid (ptid_build (pid, pid, 0));
956 thread->last_resume_kind = resume_stop;
957 }
958
959 /* We must attach to every LWP. If /proc is mounted, use that to
960 find them now. On the one hand, the inferior may be using raw
961 clone instead of using pthreads. On the other hand, even if it
962 is using pthreads, GDB may not be connected yet (thread_db needs
963 to do symbol lookups, through qSymbol). Also, thread_db walks
964 structures in the inferior's address space to find the list of
965 threads/LWPs, and those structures may well be corrupted. Note
966 that once thread_db is loaded, we'll still use it to list threads
967 and associate pthread info with each LWP. */
968 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
969 return 0;
970 }
971
972 struct counter
973 {
974 int pid;
975 int count;
976 };
977
978 static int
979 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
980 {
981 struct counter *counter = args;
982
983 if (ptid_get_pid (entry->id) == counter->pid)
984 {
985 if (++counter->count > 1)
986 return 1;
987 }
988
989 return 0;
990 }
991
992 static int
993 last_thread_of_process_p (int pid)
994 {
995 struct counter counter = { pid , 0 };
996
997 return (find_inferior (&all_threads,
998 second_thread_of_pid_p, &counter) == NULL);
999 }
1000
1001 /* Kill LWP. */
1002
1003 static void
1004 linux_kill_one_lwp (struct lwp_info *lwp)
1005 {
1006 struct thread_info *thr = get_lwp_thread (lwp);
1007 int pid = lwpid_of (thr);
1008
1009 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1010 there is no signal context, and ptrace(PTRACE_KILL) (or
1011 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1012 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1013 alternative is to kill with SIGKILL. We only need one SIGKILL
1014 per process, not one for each thread. But since we still support
1015 linuxthreads, and we also support debugging programs using raw
1016 clone without CLONE_THREAD, we send one for each thread. For
1017 years, we used PTRACE_KILL only, so we're being a bit paranoid
1018 about some old kernels where PTRACE_KILL might work better
1019 (dubious if there are any such, but that's why it's paranoia), so
1020 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1021 everywhere. */
1022
1023 errno = 0;
1024 kill_lwp (pid, SIGKILL);
1025 if (debug_threads)
1026 {
1027 int save_errno = errno;
1028
1029 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1030 target_pid_to_str (ptid_of (thr)),
1031 save_errno ? strerror (save_errno) : "OK");
1032 }
1033
1034 errno = 0;
1035 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1036 if (debug_threads)
1037 {
1038 int save_errno = errno;
1039
1040 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1041 target_pid_to_str (ptid_of (thr)),
1042 save_errno ? strerror (save_errno) : "OK");
1043 }
1044 }
1045
1046 /* Kill LWP and wait for it to die. */
1047
1048 static void
1049 kill_wait_lwp (struct lwp_info *lwp)
1050 {
1051 struct thread_info *thr = get_lwp_thread (lwp);
1052 int pid = ptid_get_pid (ptid_of (thr));
1053 int lwpid = ptid_get_lwp (ptid_of (thr));
1054 int wstat;
1055 int res;
1056
1057 if (debug_threads)
1058 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1059
1060 do
1061 {
1062 linux_kill_one_lwp (lwp);
1063
1064 /* Make sure it died. Notes:
1065
1066 - The loop is most likely unnecessary.
1067
1068 - We don't use linux_wait_for_event as that could delete lwps
1069 while we're iterating over them. We're not interested in
1070 any pending status at this point, only in making sure all
1071 wait status on the kernel side are collected until the
1072 process is reaped.
1073
1074 - We don't use __WALL here as the __WALL emulation relies on
1075 SIGCHLD, and killing a stopped process doesn't generate
1076 one, nor an exit status.
1077 */
1078 res = my_waitpid (lwpid, &wstat, 0);
1079 if (res == -1 && errno == ECHILD)
1080 res = my_waitpid (lwpid, &wstat, __WCLONE);
1081 } while (res > 0 && WIFSTOPPED (wstat));
1082
1083 gdb_assert (res > 0);
1084 }
1085
1086 /* Callback for `find_inferior'. Kills an lwp of a given process,
1087 except the leader. */
1088
1089 static int
1090 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1091 {
1092 struct thread_info *thread = (struct thread_info *) entry;
1093 struct lwp_info *lwp = get_thread_lwp (thread);
1094 int pid = * (int *) args;
1095
1096 if (ptid_get_pid (entry->id) != pid)
1097 return 0;
1098
1099 /* We avoid killing the first thread here, because of a Linux kernel (at
1100 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1101 the children get a chance to be reaped, it will remain a zombie
1102 forever. */
1103
1104 if (lwpid_of (thread) == pid)
1105 {
1106 if (debug_threads)
1107 debug_printf ("lkop: is last of process %s\n",
1108 target_pid_to_str (entry->id));
1109 return 0;
1110 }
1111
1112 kill_wait_lwp (lwp);
1113 return 0;
1114 }
1115
1116 static int
1117 linux_kill (int pid)
1118 {
1119 struct process_info *process;
1120 struct lwp_info *lwp;
1121
1122 process = find_process_pid (pid);
1123 if (process == NULL)
1124 return -1;
1125
1126 /* If we're killing a running inferior, make sure it is stopped
1127 first, as PTRACE_KILL will not work otherwise. */
1128 stop_all_lwps (0, NULL);
1129
1130 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1131
1132 /* See the comment in linux_kill_one_lwp. We did not kill the first
1133 thread in the list, so do so now. */
1134 lwp = find_lwp_pid (pid_to_ptid (pid));
1135
1136 if (lwp == NULL)
1137 {
1138 if (debug_threads)
1139 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1140 pid);
1141 }
1142 else
1143 kill_wait_lwp (lwp);
1144
1145 the_target->mourn (process);
1146
1147 /* Since we presently can only stop all lwps of all processes, we
1148 need to unstop lwps of other processes. */
1149 unstop_all_lwps (0, NULL);
1150 return 0;
1151 }
1152
1153 /* Get pending signal of THREAD, for detaching purposes. This is the
1154 signal the thread last stopped for, which we need to deliver to the
1155 thread when detaching, otherwise, it'd be suppressed/lost. */
1156
1157 static int
1158 get_detach_signal (struct thread_info *thread)
1159 {
1160 enum gdb_signal signo = GDB_SIGNAL_0;
1161 int status;
1162 struct lwp_info *lp = get_thread_lwp (thread);
1163
1164 if (lp->status_pending_p)
1165 status = lp->status_pending;
1166 else
1167 {
1168 /* If the thread had been suspended by gdbserver, and it stopped
1169 cleanly, then it'll have stopped with SIGSTOP. But we don't
1170 want to deliver that SIGSTOP. */
1171 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1172 || thread->last_status.value.sig == GDB_SIGNAL_0)
1173 return 0;
1174
1175 /* Otherwise, we may need to deliver the signal we
1176 intercepted. */
1177 status = lp->last_status;
1178 }
1179
1180 if (!WIFSTOPPED (status))
1181 {
1182 if (debug_threads)
1183 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1184 target_pid_to_str (ptid_of (thread)));
1185 return 0;
1186 }
1187
1188 /* Extended wait statuses aren't real SIGTRAPs. */
1189 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1190 {
1191 if (debug_threads)
1192 debug_printf ("GPS: lwp %s had stopped with extended "
1193 "status: no pending signal\n",
1194 target_pid_to_str (ptid_of (thread)));
1195 return 0;
1196 }
1197
1198 signo = gdb_signal_from_host (WSTOPSIG (status));
1199
1200 if (program_signals_p && !program_signals[signo])
1201 {
1202 if (debug_threads)
1203 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1204 target_pid_to_str (ptid_of (thread)),
1205 gdb_signal_to_string (signo));
1206 return 0;
1207 }
1208 else if (!program_signals_p
1209 /* If we have no way to know which signals GDB does not
1210 want to have passed to the program, assume
1211 SIGTRAP/SIGINT, which is GDB's default. */
1212 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1213 {
1214 if (debug_threads)
1215 debug_printf ("GPS: lwp %s had signal %s, "
1216 "but we don't know if we should pass it. "
1217 "Default to not.\n",
1218 target_pid_to_str (ptid_of (thread)),
1219 gdb_signal_to_string (signo));
1220 return 0;
1221 }
1222 else
1223 {
1224 if (debug_threads)
1225 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1226 target_pid_to_str (ptid_of (thread)),
1227 gdb_signal_to_string (signo));
1228
1229 return WSTOPSIG (status);
1230 }
1231 }
1232
1233 static int
1234 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1235 {
1236 struct thread_info *thread = (struct thread_info *) entry;
1237 struct lwp_info *lwp = get_thread_lwp (thread);
1238 int pid = * (int *) args;
1239 int sig;
1240
1241 if (ptid_get_pid (entry->id) != pid)
1242 return 0;
1243
1244 /* If there is a pending SIGSTOP, get rid of it. */
1245 if (lwp->stop_expected)
1246 {
1247 if (debug_threads)
1248 debug_printf ("Sending SIGCONT to %s\n",
1249 target_pid_to_str (ptid_of (thread)));
1250
1251 kill_lwp (lwpid_of (thread), SIGCONT);
1252 lwp->stop_expected = 0;
1253 }
1254
1255 /* Flush any pending changes to the process's registers. */
1256 regcache_invalidate_thread (thread);
1257
1258 /* Pass on any pending signal for this thread. */
1259 sig = get_detach_signal (thread);
1260
1261 /* Finally, let it resume. */
1262 if (the_low_target.prepare_to_resume != NULL)
1263 the_low_target.prepare_to_resume (lwp);
1264 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1265 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1266 error (_("Can't detach %s: %s"),
1267 target_pid_to_str (ptid_of (thread)),
1268 strerror (errno));
1269
1270 delete_lwp (lwp);
1271 return 0;
1272 }
1273
1274 static int
1275 linux_detach (int pid)
1276 {
1277 struct process_info *process;
1278
1279 process = find_process_pid (pid);
1280 if (process == NULL)
1281 return -1;
1282
1283 /* Stop all threads before detaching. First, ptrace requires that
1284 the thread is stopped to sucessfully detach. Second, thread_db
1285 may need to uninstall thread event breakpoints from memory, which
1286 only works with a stopped process anyway. */
1287 stop_all_lwps (0, NULL);
1288
1289 #ifdef USE_THREAD_DB
1290 thread_db_detach (process);
1291 #endif
1292
1293 /* Stabilize threads (move out of jump pads). */
1294 stabilize_threads ();
1295
1296 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1297
1298 the_target->mourn (process);
1299
1300 /* Since we presently can only stop all lwps of all processes, we
1301 need to unstop lwps of other processes. */
1302 unstop_all_lwps (0, NULL);
1303 return 0;
1304 }
1305
1306 /* Remove all LWPs that belong to process PROC from the lwp list. */
1307
1308 static int
1309 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1310 {
1311 struct thread_info *thread = (struct thread_info *) entry;
1312 struct lwp_info *lwp = get_thread_lwp (thread);
1313 struct process_info *process = proc;
1314
1315 if (pid_of (thread) == pid_of (process))
1316 delete_lwp (lwp);
1317
1318 return 0;
1319 }
1320
1321 static void
1322 linux_mourn (struct process_info *process)
1323 {
1324 struct process_info_private *priv;
1325
1326 #ifdef USE_THREAD_DB
1327 thread_db_mourn (process);
1328 #endif
1329
1330 find_inferior (&all_threads, delete_lwp_callback, process);
1331
1332 /* Freeing all private data. */
1333 priv = process->priv;
1334 free (priv->arch_private);
1335 free (priv);
1336 process->priv = NULL;
1337
1338 remove_process (process);
1339 }
1340
1341 static void
1342 linux_join (int pid)
1343 {
1344 int status, ret;
1345
1346 do {
1347 ret = my_waitpid (pid, &status, 0);
1348 if (WIFEXITED (status) || WIFSIGNALED (status))
1349 break;
1350 } while (ret != -1 || errno != ECHILD);
1351 }
1352
1353 /* Return nonzero if the given thread is still alive. */
1354 static int
1355 linux_thread_alive (ptid_t ptid)
1356 {
1357 struct lwp_info *lwp = find_lwp_pid (ptid);
1358
1359 /* We assume we always know if a thread exits. If a whole process
1360 exited but we still haven't been able to report it to GDB, we'll
1361 hold on to the last lwp of the dead process. */
1362 if (lwp != NULL)
1363 return !lwp->dead;
1364 else
1365 return 0;
1366 }
1367
1368 /* Return 1 if this lwp still has an interesting status pending. If
1369 not (e.g., it had stopped for a breakpoint that is gone), return
1370 false. */
1371
1372 static int
1373 thread_still_has_status_pending_p (struct thread_info *thread)
1374 {
1375 struct lwp_info *lp = get_thread_lwp (thread);
1376
1377 if (!lp->status_pending_p)
1378 return 0;
1379
1380 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1381 report any status pending the LWP may have. */
1382 if (thread->last_resume_kind == resume_stop
1383 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1384 return 0;
1385
1386 if (thread->last_resume_kind != resume_stop
1387 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1388 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1389 {
1390 struct thread_info *saved_thread;
1391 CORE_ADDR pc;
1392 int discard = 0;
1393
1394 gdb_assert (lp->last_status != 0);
1395
1396 pc = get_pc (lp);
1397
1398 saved_thread = current_thread;
1399 current_thread = thread;
1400
1401 if (pc != lp->stop_pc)
1402 {
1403 if (debug_threads)
1404 debug_printf ("PC of %ld changed\n",
1405 lwpid_of (thread));
1406 discard = 1;
1407 }
1408
1409 #if !USE_SIGTRAP_SIGINFO
1410 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1411 && !(*the_low_target.breakpoint_at) (pc))
1412 {
1413 if (debug_threads)
1414 debug_printf ("previous SW breakpoint of %ld gone\n",
1415 lwpid_of (thread));
1416 discard = 1;
1417 }
1418 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1419 && !hardware_breakpoint_inserted_here (pc))
1420 {
1421 if (debug_threads)
1422 debug_printf ("previous HW breakpoint of %ld gone\n",
1423 lwpid_of (thread));
1424 discard = 1;
1425 }
1426 #endif
1427
1428 current_thread = saved_thread;
1429
1430 if (discard)
1431 {
1432 if (debug_threads)
1433 debug_printf ("discarding pending breakpoint status\n");
1434 lp->status_pending_p = 0;
1435 return 0;
1436 }
1437 }
1438
1439 return 1;
1440 }
1441
1442 /* Return 1 if this lwp has an interesting status pending. */
1443 static int
1444 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1445 {
1446 struct thread_info *thread = (struct thread_info *) entry;
1447 struct lwp_info *lp = get_thread_lwp (thread);
1448 ptid_t ptid = * (ptid_t *) arg;
1449
1450 /* Check if we're only interested in events from a specific process
1451 or a specific LWP. */
1452 if (!ptid_match (ptid_of (thread), ptid))
1453 return 0;
1454
1455 if (lp->status_pending_p
1456 && !thread_still_has_status_pending_p (thread))
1457 {
1458 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1459 return 0;
1460 }
1461
1462 return lp->status_pending_p;
1463 }
1464
1465 static int
1466 same_lwp (struct inferior_list_entry *entry, void *data)
1467 {
1468 ptid_t ptid = *(ptid_t *) data;
1469 int lwp;
1470
1471 if (ptid_get_lwp (ptid) != 0)
1472 lwp = ptid_get_lwp (ptid);
1473 else
1474 lwp = ptid_get_pid (ptid);
1475
1476 if (ptid_get_lwp (entry->id) == lwp)
1477 return 1;
1478
1479 return 0;
1480 }
1481
1482 struct lwp_info *
1483 find_lwp_pid (ptid_t ptid)
1484 {
1485 struct inferior_list_entry *thread
1486 = find_inferior (&all_threads, same_lwp, &ptid);
1487
1488 if (thread == NULL)
1489 return NULL;
1490
1491 return get_thread_lwp ((struct thread_info *) thread);
1492 }
1493
1494 /* Return the number of known LWPs in the tgid given by PID. */
1495
1496 static int
1497 num_lwps (int pid)
1498 {
1499 struct inferior_list_entry *inf, *tmp;
1500 int count = 0;
1501
1502 ALL_INFERIORS (&all_threads, inf, tmp)
1503 {
1504 if (ptid_get_pid (inf->id) == pid)
1505 count++;
1506 }
1507
1508 return count;
1509 }
1510
1511 /* The arguments passed to iterate_over_lwps. */
1512
1513 struct iterate_over_lwps_args
1514 {
1515 /* The FILTER argument passed to iterate_over_lwps. */
1516 ptid_t filter;
1517
1518 /* The CALLBACK argument passed to iterate_over_lwps. */
1519 iterate_over_lwps_ftype *callback;
1520
1521 /* The DATA argument passed to iterate_over_lwps. */
1522 void *data;
1523 };
1524
1525 /* Callback for find_inferior used by iterate_over_lwps to filter
1526 calls to the callback supplied to that function. Returning a
1527 nonzero value causes find_inferiors to stop iterating and return
1528 the current inferior_list_entry. Returning zero indicates that
1529 find_inferiors should continue iterating. */
1530
1531 static int
1532 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1533 {
1534 struct iterate_over_lwps_args *args
1535 = (struct iterate_over_lwps_args *) args_p;
1536
1537 if (ptid_match (entry->id, args->filter))
1538 {
1539 struct thread_info *thr = (struct thread_info *) entry;
1540 struct lwp_info *lwp = get_thread_lwp (thr);
1541
1542 return (*args->callback) (lwp, args->data);
1543 }
1544
1545 return 0;
1546 }
1547
1548 /* See nat/linux-nat.h. */
1549
1550 struct lwp_info *
1551 iterate_over_lwps (ptid_t filter,
1552 iterate_over_lwps_ftype callback,
1553 void *data)
1554 {
1555 struct iterate_over_lwps_args args = {filter, callback, data};
1556 struct inferior_list_entry *entry;
1557
1558 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1559 if (entry == NULL)
1560 return NULL;
1561
1562 return get_thread_lwp ((struct thread_info *) entry);
1563 }
1564
1565 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1566 their exits until all other threads in the group have exited. */
1567
1568 static void
1569 check_zombie_leaders (void)
1570 {
1571 struct process_info *proc, *tmp;
1572
1573 ALL_PROCESSES (proc, tmp)
1574 {
1575 pid_t leader_pid = pid_of (proc);
1576 struct lwp_info *leader_lp;
1577
1578 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1579
1580 if (debug_threads)
1581 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1582 "num_lwps=%d, zombie=%d\n",
1583 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1584 linux_proc_pid_is_zombie (leader_pid));
1585
1586 if (leader_lp != NULL
1587 /* Check if there are other threads in the group, as we may
1588 have raced with the inferior simply exiting. */
1589 && !last_thread_of_process_p (leader_pid)
1590 && linux_proc_pid_is_zombie (leader_pid))
1591 {
1592 /* A leader zombie can mean one of two things:
1593
1594 - It exited, and there's an exit status pending
1595 available, or only the leader exited (not the whole
1596 program). In the latter case, we can't waitpid the
1597 leader's exit status until all other threads are gone.
1598
1599 - There are 3 or more threads in the group, and a thread
1600 other than the leader exec'd. On an exec, the Linux
1601 kernel destroys all other threads (except the execing
1602 one) in the thread group, and resets the execing thread's
1603 tid to the tgid. No exit notification is sent for the
1604 execing thread -- from the ptracer's perspective, it
1605 appears as though the execing thread just vanishes.
1606 Until we reap all other threads except the leader and the
1607 execing thread, the leader will be zombie, and the
1608 execing thread will be in `D (disc sleep)'. As soon as
1609 all other threads are reaped, the execing thread changes
1610 it's tid to the tgid, and the previous (zombie) leader
1611 vanishes, giving place to the "new" leader. We could try
1612 distinguishing the exit and exec cases, by waiting once
1613 more, and seeing if something comes out, but it doesn't
1614 sound useful. The previous leader _does_ go away, and
1615 we'll re-add the new one once we see the exec event
1616 (which is just the same as what would happen if the
1617 previous leader did exit voluntarily before some other
1618 thread execs). */
1619
1620 if (debug_threads)
1621 fprintf (stderr,
1622 "CZL: Thread group leader %d zombie "
1623 "(it exited, or another thread execd).\n",
1624 leader_pid);
1625
1626 delete_lwp (leader_lp);
1627 }
1628 }
1629 }
1630
1631 /* Callback for `find_inferior'. Returns the first LWP that is not
1632 stopped. ARG is a PTID filter. */
1633
1634 static int
1635 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1636 {
1637 struct thread_info *thr = (struct thread_info *) entry;
1638 struct lwp_info *lwp;
1639 ptid_t filter = *(ptid_t *) arg;
1640
1641 if (!ptid_match (ptid_of (thr), filter))
1642 return 0;
1643
1644 lwp = get_thread_lwp (thr);
1645 if (!lwp->stopped)
1646 return 1;
1647
1648 return 0;
1649 }
1650
1651 /* This function should only be called if the LWP got a SIGTRAP.
1652
1653 Handle any tracepoint steps or hits. Return true if a tracepoint
1654 event was handled, 0 otherwise. */
1655
1656 static int
1657 handle_tracepoints (struct lwp_info *lwp)
1658 {
1659 struct thread_info *tinfo = get_lwp_thread (lwp);
1660 int tpoint_related_event = 0;
1661
1662 gdb_assert (lwp->suspended == 0);
1663
1664 /* If this tracepoint hit causes a tracing stop, we'll immediately
1665 uninsert tracepoints. To do this, we temporarily pause all
1666 threads, unpatch away, and then unpause threads. We need to make
1667 sure the unpausing doesn't resume LWP too. */
1668 lwp->suspended++;
1669
1670 /* And we need to be sure that any all-threads-stopping doesn't try
1671 to move threads out of the jump pads, as it could deadlock the
1672 inferior (LWP could be in the jump pad, maybe even holding the
1673 lock.) */
1674
1675 /* Do any necessary step collect actions. */
1676 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1677
1678 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1679
1680 /* See if we just hit a tracepoint and do its main collect
1681 actions. */
1682 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1683
1684 lwp->suspended--;
1685
1686 gdb_assert (lwp->suspended == 0);
1687 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1688
1689 if (tpoint_related_event)
1690 {
1691 if (debug_threads)
1692 debug_printf ("got a tracepoint event\n");
1693 return 1;
1694 }
1695
1696 return 0;
1697 }
1698
1699 /* Convenience wrapper. Returns true if LWP is presently collecting a
1700 fast tracepoint. */
1701
1702 static int
1703 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1704 struct fast_tpoint_collect_status *status)
1705 {
1706 CORE_ADDR thread_area;
1707 struct thread_info *thread = get_lwp_thread (lwp);
1708
1709 if (the_low_target.get_thread_area == NULL)
1710 return 0;
1711
1712 /* Get the thread area address. This is used to recognize which
1713 thread is which when tracing with the in-process agent library.
1714 We don't read anything from the address, and treat it as opaque;
1715 it's the address itself that we assume is unique per-thread. */
1716 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1717 return 0;
1718
1719 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1720 }
1721
1722 /* The reason we resume in the caller, is because we want to be able
1723 to pass lwp->status_pending as WSTAT, and we need to clear
1724 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1725 refuses to resume. */
1726
1727 static int
1728 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1729 {
1730 struct thread_info *saved_thread;
1731
1732 saved_thread = current_thread;
1733 current_thread = get_lwp_thread (lwp);
1734
1735 if ((wstat == NULL
1736 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1737 && supports_fast_tracepoints ()
1738 && agent_loaded_p ())
1739 {
1740 struct fast_tpoint_collect_status status;
1741 int r;
1742
1743 if (debug_threads)
1744 debug_printf ("Checking whether LWP %ld needs to move out of the "
1745 "jump pad.\n",
1746 lwpid_of (current_thread));
1747
1748 r = linux_fast_tracepoint_collecting (lwp, &status);
1749
1750 if (wstat == NULL
1751 || (WSTOPSIG (*wstat) != SIGILL
1752 && WSTOPSIG (*wstat) != SIGFPE
1753 && WSTOPSIG (*wstat) != SIGSEGV
1754 && WSTOPSIG (*wstat) != SIGBUS))
1755 {
1756 lwp->collecting_fast_tracepoint = r;
1757
1758 if (r != 0)
1759 {
1760 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1761 {
1762 /* Haven't executed the original instruction yet.
1763 Set breakpoint there, and wait till it's hit,
1764 then single-step until exiting the jump pad. */
1765 lwp->exit_jump_pad_bkpt
1766 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1767 }
1768
1769 if (debug_threads)
1770 debug_printf ("Checking whether LWP %ld needs to move out of "
1771 "the jump pad...it does\n",
1772 lwpid_of (current_thread));
1773 current_thread = saved_thread;
1774
1775 return 1;
1776 }
1777 }
1778 else
1779 {
1780 /* If we get a synchronous signal while collecting, *and*
1781 while executing the (relocated) original instruction,
1782 reset the PC to point at the tpoint address, before
1783 reporting to GDB. Otherwise, it's an IPA lib bug: just
1784 report the signal to GDB, and pray for the best. */
1785
1786 lwp->collecting_fast_tracepoint = 0;
1787
1788 if (r != 0
1789 && (status.adjusted_insn_addr <= lwp->stop_pc
1790 && lwp->stop_pc < status.adjusted_insn_addr_end))
1791 {
1792 siginfo_t info;
1793 struct regcache *regcache;
1794
1795 /* The si_addr on a few signals references the address
1796 of the faulting instruction. Adjust that as
1797 well. */
1798 if ((WSTOPSIG (*wstat) == SIGILL
1799 || WSTOPSIG (*wstat) == SIGFPE
1800 || WSTOPSIG (*wstat) == SIGBUS
1801 || WSTOPSIG (*wstat) == SIGSEGV)
1802 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1803 (PTRACE_TYPE_ARG3) 0, &info) == 0
1804 /* Final check just to make sure we don't clobber
1805 the siginfo of non-kernel-sent signals. */
1806 && (uintptr_t) info.si_addr == lwp->stop_pc)
1807 {
1808 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1809 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1810 (PTRACE_TYPE_ARG3) 0, &info);
1811 }
1812
1813 regcache = get_thread_regcache (current_thread, 1);
1814 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1815 lwp->stop_pc = status.tpoint_addr;
1816
1817 /* Cancel any fast tracepoint lock this thread was
1818 holding. */
1819 force_unlock_trace_buffer ();
1820 }
1821
1822 if (lwp->exit_jump_pad_bkpt != NULL)
1823 {
1824 if (debug_threads)
1825 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1826 "stopping all threads momentarily.\n");
1827
1828 stop_all_lwps (1, lwp);
1829
1830 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1831 lwp->exit_jump_pad_bkpt = NULL;
1832
1833 unstop_all_lwps (1, lwp);
1834
1835 gdb_assert (lwp->suspended >= 0);
1836 }
1837 }
1838 }
1839
1840 if (debug_threads)
1841 debug_printf ("Checking whether LWP %ld needs to move out of the "
1842 "jump pad...no\n",
1843 lwpid_of (current_thread));
1844
1845 current_thread = saved_thread;
1846 return 0;
1847 }
1848
1849 /* Enqueue one signal in the "signals to report later when out of the
1850 jump pad" list. */
1851
1852 static void
1853 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1854 {
1855 struct pending_signals *p_sig;
1856 struct thread_info *thread = get_lwp_thread (lwp);
1857
1858 if (debug_threads)
1859 debug_printf ("Deferring signal %d for LWP %ld.\n",
1860 WSTOPSIG (*wstat), lwpid_of (thread));
1861
1862 if (debug_threads)
1863 {
1864 struct pending_signals *sig;
1865
1866 for (sig = lwp->pending_signals_to_report;
1867 sig != NULL;
1868 sig = sig->prev)
1869 debug_printf (" Already queued %d\n",
1870 sig->signal);
1871
1872 debug_printf (" (no more currently queued signals)\n");
1873 }
1874
1875 /* Don't enqueue non-RT signals if they are already in the deferred
1876 queue. (SIGSTOP being the easiest signal to see ending up here
1877 twice) */
1878 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1879 {
1880 struct pending_signals *sig;
1881
1882 for (sig = lwp->pending_signals_to_report;
1883 sig != NULL;
1884 sig = sig->prev)
1885 {
1886 if (sig->signal == WSTOPSIG (*wstat))
1887 {
1888 if (debug_threads)
1889 debug_printf ("Not requeuing already queued non-RT signal %d"
1890 " for LWP %ld\n",
1891 sig->signal,
1892 lwpid_of (thread));
1893 return;
1894 }
1895 }
1896 }
1897
1898 p_sig = xmalloc (sizeof (*p_sig));
1899 p_sig->prev = lwp->pending_signals_to_report;
1900 p_sig->signal = WSTOPSIG (*wstat);
1901 memset (&p_sig->info, 0, sizeof (siginfo_t));
1902 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1903 &p_sig->info);
1904
1905 lwp->pending_signals_to_report = p_sig;
1906 }
1907
1908 /* Dequeue one signal from the "signals to report later when out of
1909 the jump pad" list. */
1910
1911 static int
1912 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1913 {
1914 struct thread_info *thread = get_lwp_thread (lwp);
1915
1916 if (lwp->pending_signals_to_report != NULL)
1917 {
1918 struct pending_signals **p_sig;
1919
1920 p_sig = &lwp->pending_signals_to_report;
1921 while ((*p_sig)->prev != NULL)
1922 p_sig = &(*p_sig)->prev;
1923
1924 *wstat = W_STOPCODE ((*p_sig)->signal);
1925 if ((*p_sig)->info.si_signo != 0)
1926 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1927 &(*p_sig)->info);
1928 free (*p_sig);
1929 *p_sig = NULL;
1930
1931 if (debug_threads)
1932 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1933 WSTOPSIG (*wstat), lwpid_of (thread));
1934
1935 if (debug_threads)
1936 {
1937 struct pending_signals *sig;
1938
1939 for (sig = lwp->pending_signals_to_report;
1940 sig != NULL;
1941 sig = sig->prev)
1942 debug_printf (" Still queued %d\n",
1943 sig->signal);
1944
1945 debug_printf (" (no more queued signals)\n");
1946 }
1947
1948 return 1;
1949 }
1950
1951 return 0;
1952 }
1953
1954 /* Fetch the possibly triggered data watchpoint info and store it in
1955 CHILD.
1956
1957 On some archs, like x86, that use debug registers to set
1958 watchpoints, it's possible that the way to know which watched
1959 address trapped, is to check the register that is used to select
1960 which address to watch. Problem is, between setting the watchpoint
1961 and reading back which data address trapped, the user may change
1962 the set of watchpoints, and, as a consequence, GDB changes the
1963 debug registers in the inferior. To avoid reading back a stale
1964 stopped-data-address when that happens, we cache in LP the fact
1965 that a watchpoint trapped, and the corresponding data address, as
1966 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1967 registers meanwhile, we have the cached data we can rely on. */
1968
1969 static int
1970 check_stopped_by_watchpoint (struct lwp_info *child)
1971 {
1972 if (the_low_target.stopped_by_watchpoint != NULL)
1973 {
1974 struct thread_info *saved_thread;
1975
1976 saved_thread = current_thread;
1977 current_thread = get_lwp_thread (child);
1978
1979 if (the_low_target.stopped_by_watchpoint ())
1980 {
1981 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1982
1983 if (the_low_target.stopped_data_address != NULL)
1984 child->stopped_data_address
1985 = the_low_target.stopped_data_address ();
1986 else
1987 child->stopped_data_address = 0;
1988 }
1989
1990 current_thread = saved_thread;
1991 }
1992
1993 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
1994 }
1995
1996 /* Return the ptrace options that we want to try to enable. */
1997
1998 static int
1999 linux_low_ptrace_options (int attached)
2000 {
2001 int options = 0;
2002
2003 if (!attached)
2004 options |= PTRACE_O_EXITKILL;
2005
2006 if (report_fork_events)
2007 options |= PTRACE_O_TRACEFORK;
2008
2009 return options;
2010 }
2011
2012 /* Do low-level handling of the event, and check if we should go on
2013 and pass it to caller code. Return the affected lwp if we are, or
2014 NULL otherwise. */
2015
2016 static struct lwp_info *
2017 linux_low_filter_event (int lwpid, int wstat)
2018 {
2019 struct lwp_info *child;
2020 struct thread_info *thread;
2021 int have_stop_pc = 0;
2022
2023 child = find_lwp_pid (pid_to_ptid (lwpid));
2024
2025 /* If we didn't find a process, one of two things presumably happened:
2026 - A process we started and then detached from has exited. Ignore it.
2027 - A process we are controlling has forked and the new child's stop
2028 was reported to us by the kernel. Save its PID. */
2029 if (child == NULL && WIFSTOPPED (wstat))
2030 {
2031 add_to_pid_list (&stopped_pids, lwpid, wstat);
2032 return NULL;
2033 }
2034 else if (child == NULL)
2035 return NULL;
2036
2037 thread = get_lwp_thread (child);
2038
2039 child->stopped = 1;
2040
2041 child->last_status = wstat;
2042
2043 /* Check if the thread has exited. */
2044 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2045 {
2046 if (debug_threads)
2047 debug_printf ("LLFE: %d exited.\n", lwpid);
2048 if (num_lwps (pid_of (thread)) > 1)
2049 {
2050
2051 /* If there is at least one more LWP, then the exit signal was
2052 not the end of the debugged application and should be
2053 ignored. */
2054 delete_lwp (child);
2055 return NULL;
2056 }
2057 else
2058 {
2059 /* This was the last lwp in the process. Since events are
2060 serialized to GDB core, and we can't report this one
2061 right now, but GDB core and the other target layers will
2062 want to be notified about the exit code/signal, leave the
2063 status pending for the next time we're able to report
2064 it. */
2065 mark_lwp_dead (child, wstat);
2066 return child;
2067 }
2068 }
2069
2070 gdb_assert (WIFSTOPPED (wstat));
2071
2072 if (WIFSTOPPED (wstat))
2073 {
2074 struct process_info *proc;
2075
2076 /* Architecture-specific setup after inferior is running. This
2077 needs to happen after we have attached to the inferior and it
2078 is stopped for the first time, but before we access any
2079 inferior registers. */
2080 proc = find_process_pid (pid_of (thread));
2081 if (proc->priv->new_inferior)
2082 {
2083 struct thread_info *saved_thread;
2084
2085 saved_thread = current_thread;
2086 current_thread = thread;
2087
2088 the_low_target.arch_setup ();
2089
2090 current_thread = saved_thread;
2091
2092 proc->priv->new_inferior = 0;
2093 }
2094 }
2095
2096 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2097 {
2098 struct process_info *proc = find_process_pid (pid_of (thread));
2099 int options = linux_low_ptrace_options (proc->attached);
2100
2101 linux_enable_event_reporting (lwpid, options);
2102 child->must_set_ptrace_flags = 0;
2103 }
2104
2105 /* Be careful to not overwrite stop_pc until
2106 check_stopped_by_breakpoint is called. */
2107 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2108 && linux_is_extended_waitstatus (wstat))
2109 {
2110 child->stop_pc = get_pc (child);
2111 if (handle_extended_wait (child, wstat))
2112 {
2113 /* The event has been handled, so just return without
2114 reporting it. */
2115 return NULL;
2116 }
2117 }
2118
2119 /* Check first whether this was a SW/HW breakpoint before checking
2120 watchpoints, because at least s390 can't tell the data address of
2121 hardware watchpoint hits, and returns stopped-by-watchpoint as
2122 long as there's a watchpoint set. */
2123 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2124 {
2125 if (check_stopped_by_breakpoint (child))
2126 have_stop_pc = 1;
2127 }
2128
2129 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2130 or hardware watchpoint. Check which is which if we got
2131 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2132 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2133 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2134 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2135 check_stopped_by_watchpoint (child);
2136
2137 if (!have_stop_pc)
2138 child->stop_pc = get_pc (child);
2139
2140 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2141 && child->stop_expected)
2142 {
2143 if (debug_threads)
2144 debug_printf ("Expected stop.\n");
2145 child->stop_expected = 0;
2146
2147 if (thread->last_resume_kind == resume_stop)
2148 {
2149 /* We want to report the stop to the core. Treat the
2150 SIGSTOP as a normal event. */
2151 if (debug_threads)
2152 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2153 target_pid_to_str (ptid_of (thread)));
2154 }
2155 else if (stopping_threads != NOT_STOPPING_THREADS)
2156 {
2157 /* Stopping threads. We don't want this SIGSTOP to end up
2158 pending. */
2159 if (debug_threads)
2160 debug_printf ("LLW: SIGSTOP caught for %s "
2161 "while stopping threads.\n",
2162 target_pid_to_str (ptid_of (thread)));
2163 return NULL;
2164 }
2165 else
2166 {
2167 /* This is a delayed SIGSTOP. Filter out the event. */
2168 if (debug_threads)
2169 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2170 child->stepping ? "step" : "continue",
2171 target_pid_to_str (ptid_of (thread)));
2172
2173 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2174 return NULL;
2175 }
2176 }
2177
2178 child->status_pending_p = 1;
2179 child->status_pending = wstat;
2180 return child;
2181 }
2182
2183 /* Resume LWPs that are currently stopped without any pending status
2184 to report, but are resumed from the core's perspective. */
2185
2186 static void
2187 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2188 {
2189 struct thread_info *thread = (struct thread_info *) entry;
2190 struct lwp_info *lp = get_thread_lwp (thread);
2191
2192 if (lp->stopped
2193 && !lp->status_pending_p
2194 && thread->last_resume_kind != resume_stop
2195 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2196 {
2197 int step = thread->last_resume_kind == resume_step;
2198
2199 if (debug_threads)
2200 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2201 target_pid_to_str (ptid_of (thread)),
2202 paddress (lp->stop_pc),
2203 step);
2204
2205 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2206 }
2207 }
2208
2209 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2210 match FILTER_PTID (leaving others pending). The PTIDs can be:
2211 minus_one_ptid, to specify any child; a pid PTID, specifying all
2212 lwps of a thread group; or a PTID representing a single lwp. Store
2213 the stop status through the status pointer WSTAT. OPTIONS is
2214 passed to the waitpid call. Return 0 if no event was found and
2215 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2216 was found. Return the PID of the stopped child otherwise. */
2217
2218 static int
2219 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2220 int *wstatp, int options)
2221 {
2222 struct thread_info *event_thread;
2223 struct lwp_info *event_child, *requested_child;
2224 sigset_t block_mask, prev_mask;
2225
2226 retry:
2227 /* N.B. event_thread points to the thread_info struct that contains
2228 event_child. Keep them in sync. */
2229 event_thread = NULL;
2230 event_child = NULL;
2231 requested_child = NULL;
2232
2233 /* Check for a lwp with a pending status. */
2234
2235 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2236 {
2237 event_thread = (struct thread_info *)
2238 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2239 if (event_thread != NULL)
2240 event_child = get_thread_lwp (event_thread);
2241 if (debug_threads && event_thread)
2242 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2243 }
2244 else if (!ptid_equal (filter_ptid, null_ptid))
2245 {
2246 requested_child = find_lwp_pid (filter_ptid);
2247
2248 if (stopping_threads == NOT_STOPPING_THREADS
2249 && requested_child->status_pending_p
2250 && requested_child->collecting_fast_tracepoint)
2251 {
2252 enqueue_one_deferred_signal (requested_child,
2253 &requested_child->status_pending);
2254 requested_child->status_pending_p = 0;
2255 requested_child->status_pending = 0;
2256 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2257 }
2258
2259 if (requested_child->suspended
2260 && requested_child->status_pending_p)
2261 {
2262 internal_error (__FILE__, __LINE__,
2263 "requesting an event out of a"
2264 " suspended child?");
2265 }
2266
2267 if (requested_child->status_pending_p)
2268 {
2269 event_child = requested_child;
2270 event_thread = get_lwp_thread (event_child);
2271 }
2272 }
2273
2274 if (event_child != NULL)
2275 {
2276 if (debug_threads)
2277 debug_printf ("Got an event from pending child %ld (%04x)\n",
2278 lwpid_of (event_thread), event_child->status_pending);
2279 *wstatp = event_child->status_pending;
2280 event_child->status_pending_p = 0;
2281 event_child->status_pending = 0;
2282 current_thread = event_thread;
2283 return lwpid_of (event_thread);
2284 }
2285
2286 /* But if we don't find a pending event, we'll have to wait.
2287
2288 We only enter this loop if no process has a pending wait status.
2289 Thus any action taken in response to a wait status inside this
2290 loop is responding as soon as we detect the status, not after any
2291 pending events. */
2292
2293 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2294 all signals while here. */
2295 sigfillset (&block_mask);
2296 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2297
2298 /* Always pull all events out of the kernel. We'll randomly select
2299 an event LWP out of all that have events, to prevent
2300 starvation. */
2301 while (event_child == NULL)
2302 {
2303 pid_t ret = 0;
2304
2305 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2306 quirks:
2307
2308 - If the thread group leader exits while other threads in the
2309 thread group still exist, waitpid(TGID, ...) hangs. That
2310 waitpid won't return an exit status until the other threads
2311 in the group are reaped.
2312
2313 - When a non-leader thread execs, that thread just vanishes
2314 without reporting an exit (so we'd hang if we waited for it
2315 explicitly in that case). The exec event is reported to
2316 the TGID pid (although we don't currently enable exec
2317 events). */
2318 errno = 0;
2319 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2320
2321 if (debug_threads)
2322 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2323 ret, errno ? strerror (errno) : "ERRNO-OK");
2324
2325 if (ret > 0)
2326 {
2327 if (debug_threads)
2328 {
2329 debug_printf ("LLW: waitpid %ld received %s\n",
2330 (long) ret, status_to_str (*wstatp));
2331 }
2332
2333 /* Filter all events. IOW, leave all events pending. We'll
2334 randomly select an event LWP out of all that have events
2335 below. */
2336 linux_low_filter_event (ret, *wstatp);
2337 /* Retry until nothing comes out of waitpid. A single
2338 SIGCHLD can indicate more than one child stopped. */
2339 continue;
2340 }
2341
2342 /* Now that we've pulled all events out of the kernel, resume
2343 LWPs that don't have an interesting event to report. */
2344 if (stopping_threads == NOT_STOPPING_THREADS)
2345 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2346
2347 /* ... and find an LWP with a status to report to the core, if
2348 any. */
2349 event_thread = (struct thread_info *)
2350 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2351 if (event_thread != NULL)
2352 {
2353 event_child = get_thread_lwp (event_thread);
2354 *wstatp = event_child->status_pending;
2355 event_child->status_pending_p = 0;
2356 event_child->status_pending = 0;
2357 break;
2358 }
2359
2360 /* Check for zombie thread group leaders. Those can't be reaped
2361 until all other threads in the thread group are. */
2362 check_zombie_leaders ();
2363
2364 /* If there are no resumed children left in the set of LWPs we
2365 want to wait for, bail. We can't just block in
2366 waitpid/sigsuspend, because lwps might have been left stopped
2367 in trace-stop state, and we'd be stuck forever waiting for
2368 their status to change (which would only happen if we resumed
2369 them). Even if WNOHANG is set, this return code is preferred
2370 over 0 (below), as it is more detailed. */
2371 if ((find_inferior (&all_threads,
2372 not_stopped_callback,
2373 &wait_ptid) == NULL))
2374 {
2375 if (debug_threads)
2376 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2377 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2378 return -1;
2379 }
2380
2381 /* No interesting event to report to the caller. */
2382 if ((options & WNOHANG))
2383 {
2384 if (debug_threads)
2385 debug_printf ("WNOHANG set, no event found\n");
2386
2387 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2388 return 0;
2389 }
2390
2391 /* Block until we get an event reported with SIGCHLD. */
2392 if (debug_threads)
2393 debug_printf ("sigsuspend'ing\n");
2394
2395 sigsuspend (&prev_mask);
2396 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2397 goto retry;
2398 }
2399
2400 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2401
2402 current_thread = event_thread;
2403
2404 /* Check for thread exit. */
2405 if (! WIFSTOPPED (*wstatp))
2406 {
2407 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2408
2409 if (debug_threads)
2410 debug_printf ("LWP %d is the last lwp of process. "
2411 "Process %ld exiting.\n",
2412 pid_of (event_thread), lwpid_of (event_thread));
2413 return lwpid_of (event_thread);
2414 }
2415
2416 return lwpid_of (event_thread);
2417 }
2418
2419 /* Wait for an event from child(ren) PTID. PTIDs can be:
2420 minus_one_ptid, to specify any child; a pid PTID, specifying all
2421 lwps of a thread group; or a PTID representing a single lwp. Store
2422 the stop status through the status pointer WSTAT. OPTIONS is
2423 passed to the waitpid call. Return 0 if no event was found and
2424 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2425 was found. Return the PID of the stopped child otherwise. */
2426
2427 static int
2428 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2429 {
2430 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2431 }
2432
2433 /* Count the LWP's that have had events. */
2434
2435 static int
2436 count_events_callback (struct inferior_list_entry *entry, void *data)
2437 {
2438 struct thread_info *thread = (struct thread_info *) entry;
2439 struct lwp_info *lp = get_thread_lwp (thread);
2440 int *count = data;
2441
2442 gdb_assert (count != NULL);
2443
2444 /* Count only resumed LWPs that have an event pending. */
2445 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2446 && lp->status_pending_p)
2447 (*count)++;
2448
2449 return 0;
2450 }
2451
2452 /* Select the LWP (if any) that is currently being single-stepped. */
2453
2454 static int
2455 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2456 {
2457 struct thread_info *thread = (struct thread_info *) entry;
2458 struct lwp_info *lp = get_thread_lwp (thread);
2459
2460 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2461 && thread->last_resume_kind == resume_step
2462 && lp->status_pending_p)
2463 return 1;
2464 else
2465 return 0;
2466 }
2467
2468 /* Select the Nth LWP that has had an event. */
2469
2470 static int
2471 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2472 {
2473 struct thread_info *thread = (struct thread_info *) entry;
2474 struct lwp_info *lp = get_thread_lwp (thread);
2475 int *selector = data;
2476
2477 gdb_assert (selector != NULL);
2478
2479 /* Select only resumed LWPs that have an event pending. */
2480 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2481 && lp->status_pending_p)
2482 if ((*selector)-- == 0)
2483 return 1;
2484
2485 return 0;
2486 }
2487
2488 /* Select one LWP out of those that have events pending. */
2489
2490 static void
2491 select_event_lwp (struct lwp_info **orig_lp)
2492 {
2493 int num_events = 0;
2494 int random_selector;
2495 struct thread_info *event_thread = NULL;
2496
2497 /* In all-stop, give preference to the LWP that is being
2498 single-stepped. There will be at most one, and it's the LWP that
2499 the core is most interested in. If we didn't do this, then we'd
2500 have to handle pending step SIGTRAPs somehow in case the core
2501 later continues the previously-stepped thread, otherwise we'd
2502 report the pending SIGTRAP, and the core, not having stepped the
2503 thread, wouldn't understand what the trap was for, and therefore
2504 would report it to the user as a random signal. */
2505 if (!non_stop)
2506 {
2507 event_thread
2508 = (struct thread_info *) find_inferior (&all_threads,
2509 select_singlestep_lwp_callback,
2510 NULL);
2511 if (event_thread != NULL)
2512 {
2513 if (debug_threads)
2514 debug_printf ("SEL: Select single-step %s\n",
2515 target_pid_to_str (ptid_of (event_thread)));
2516 }
2517 }
2518 if (event_thread == NULL)
2519 {
2520 /* No single-stepping LWP. Select one at random, out of those
2521 which have had events. */
2522
2523 /* First see how many events we have. */
2524 find_inferior (&all_threads, count_events_callback, &num_events);
2525 gdb_assert (num_events > 0);
2526
2527 /* Now randomly pick a LWP out of those that have had
2528 events. */
2529 random_selector = (int)
2530 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2531
2532 if (debug_threads && num_events > 1)
2533 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2534 num_events, random_selector);
2535
2536 event_thread
2537 = (struct thread_info *) find_inferior (&all_threads,
2538 select_event_lwp_callback,
2539 &random_selector);
2540 }
2541
2542 if (event_thread != NULL)
2543 {
2544 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2545
2546 /* Switch the event LWP. */
2547 *orig_lp = event_lp;
2548 }
2549 }
2550
2551 /* Decrement the suspend count of an LWP. */
2552
2553 static int
2554 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2555 {
2556 struct thread_info *thread = (struct thread_info *) entry;
2557 struct lwp_info *lwp = get_thread_lwp (thread);
2558
2559 /* Ignore EXCEPT. */
2560 if (lwp == except)
2561 return 0;
2562
2563 lwp->suspended--;
2564
2565 gdb_assert (lwp->suspended >= 0);
2566 return 0;
2567 }
2568
2569 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2570 NULL. */
2571
2572 static void
2573 unsuspend_all_lwps (struct lwp_info *except)
2574 {
2575 find_inferior (&all_threads, unsuspend_one_lwp, except);
2576 }
2577
2578 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2579 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2580 void *data);
2581 static int lwp_running (struct inferior_list_entry *entry, void *data);
2582 static ptid_t linux_wait_1 (ptid_t ptid,
2583 struct target_waitstatus *ourstatus,
2584 int target_options);
2585
2586 /* Stabilize threads (move out of jump pads).
2587
2588 If a thread is midway collecting a fast tracepoint, we need to
2589 finish the collection and move it out of the jump pad before
2590 reporting the signal.
2591
2592 This avoids recursion while collecting (when a signal arrives
2593 midway, and the signal handler itself collects), which would trash
2594 the trace buffer. In case the user set a breakpoint in a signal
2595 handler, this avoids the backtrace showing the jump pad, etc..
2596 Most importantly, there are certain things we can't do safely if
2597 threads are stopped in a jump pad (or in its callee's). For
2598 example:
2599
2600 - starting a new trace run. A thread still collecting the
2601 previous run, could trash the trace buffer when resumed. The trace
2602 buffer control structures would have been reset but the thread had
2603 no way to tell. The thread could even midway memcpy'ing to the
2604 buffer, which would mean that when resumed, it would clobber the
2605 trace buffer that had been set for a new run.
2606
2607 - we can't rewrite/reuse the jump pads for new tracepoints
2608 safely. Say you do tstart while a thread is stopped midway while
2609 collecting. When the thread is later resumed, it finishes the
2610 collection, and returns to the jump pad, to execute the original
2611 instruction that was under the tracepoint jump at the time the
2612 older run had been started. If the jump pad had been rewritten
2613 since for something else in the new run, the thread would now
2614 execute the wrong / random instructions. */
2615
2616 static void
2617 linux_stabilize_threads (void)
2618 {
2619 struct thread_info *saved_thread;
2620 struct thread_info *thread_stuck;
2621
2622 thread_stuck
2623 = (struct thread_info *) find_inferior (&all_threads,
2624 stuck_in_jump_pad_callback,
2625 NULL);
2626 if (thread_stuck != NULL)
2627 {
2628 if (debug_threads)
2629 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2630 lwpid_of (thread_stuck));
2631 return;
2632 }
2633
2634 saved_thread = current_thread;
2635
2636 stabilizing_threads = 1;
2637
2638 /* Kick 'em all. */
2639 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2640
2641 /* Loop until all are stopped out of the jump pads. */
2642 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2643 {
2644 struct target_waitstatus ourstatus;
2645 struct lwp_info *lwp;
2646 int wstat;
2647
2648 /* Note that we go through the full wait even loop. While
2649 moving threads out of jump pad, we need to be able to step
2650 over internal breakpoints and such. */
2651 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2652
2653 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2654 {
2655 lwp = get_thread_lwp (current_thread);
2656
2657 /* Lock it. */
2658 lwp->suspended++;
2659
2660 if (ourstatus.value.sig != GDB_SIGNAL_0
2661 || current_thread->last_resume_kind == resume_stop)
2662 {
2663 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2664 enqueue_one_deferred_signal (lwp, &wstat);
2665 }
2666 }
2667 }
2668
2669 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2670
2671 stabilizing_threads = 0;
2672
2673 current_thread = saved_thread;
2674
2675 if (debug_threads)
2676 {
2677 thread_stuck
2678 = (struct thread_info *) find_inferior (&all_threads,
2679 stuck_in_jump_pad_callback,
2680 NULL);
2681 if (thread_stuck != NULL)
2682 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2683 lwpid_of (thread_stuck));
2684 }
2685 }
2686
2687 static void async_file_mark (void);
2688
2689 /* Convenience function that is called when the kernel reports an
2690 event that is not passed out to GDB. */
2691
2692 static ptid_t
2693 ignore_event (struct target_waitstatus *ourstatus)
2694 {
2695 /* If we got an event, there may still be others, as a single
2696 SIGCHLD can indicate more than one child stopped. This forces
2697 another target_wait call. */
2698 async_file_mark ();
2699
2700 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2701 return null_ptid;
2702 }
2703
2704 /* Return non-zero if WAITSTATUS reflects an extended linux
2705 event. Otherwise, return zero. */
2706
2707 static int
2708 extended_event_reported (const struct target_waitstatus *waitstatus)
2709 {
2710 if (waitstatus == NULL)
2711 return 0;
2712
2713 return (waitstatus->kind == TARGET_WAITKIND_FORKED);
2714 }
2715
2716 /* Wait for process, returns status. */
2717
2718 static ptid_t
2719 linux_wait_1 (ptid_t ptid,
2720 struct target_waitstatus *ourstatus, int target_options)
2721 {
2722 int w;
2723 struct lwp_info *event_child;
2724 int options;
2725 int pid;
2726 int step_over_finished;
2727 int bp_explains_trap;
2728 int maybe_internal_trap;
2729 int report_to_gdb;
2730 int trace_event;
2731 int in_step_range;
2732
2733 if (debug_threads)
2734 {
2735 debug_enter ();
2736 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2737 }
2738
2739 /* Translate generic target options into linux options. */
2740 options = __WALL;
2741 if (target_options & TARGET_WNOHANG)
2742 options |= WNOHANG;
2743
2744 bp_explains_trap = 0;
2745 trace_event = 0;
2746 in_step_range = 0;
2747 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2748
2749 if (ptid_equal (step_over_bkpt, null_ptid))
2750 pid = linux_wait_for_event (ptid, &w, options);
2751 else
2752 {
2753 if (debug_threads)
2754 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2755 target_pid_to_str (step_over_bkpt));
2756 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2757 }
2758
2759 if (pid == 0)
2760 {
2761 gdb_assert (target_options & TARGET_WNOHANG);
2762
2763 if (debug_threads)
2764 {
2765 debug_printf ("linux_wait_1 ret = null_ptid, "
2766 "TARGET_WAITKIND_IGNORE\n");
2767 debug_exit ();
2768 }
2769
2770 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2771 return null_ptid;
2772 }
2773 else if (pid == -1)
2774 {
2775 if (debug_threads)
2776 {
2777 debug_printf ("linux_wait_1 ret = null_ptid, "
2778 "TARGET_WAITKIND_NO_RESUMED\n");
2779 debug_exit ();
2780 }
2781
2782 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2783 return null_ptid;
2784 }
2785
2786 event_child = get_thread_lwp (current_thread);
2787
2788 /* linux_wait_for_event only returns an exit status for the last
2789 child of a process. Report it. */
2790 if (WIFEXITED (w) || WIFSIGNALED (w))
2791 {
2792 if (WIFEXITED (w))
2793 {
2794 ourstatus->kind = TARGET_WAITKIND_EXITED;
2795 ourstatus->value.integer = WEXITSTATUS (w);
2796
2797 if (debug_threads)
2798 {
2799 debug_printf ("linux_wait_1 ret = %s, exited with "
2800 "retcode %d\n",
2801 target_pid_to_str (ptid_of (current_thread)),
2802 WEXITSTATUS (w));
2803 debug_exit ();
2804 }
2805 }
2806 else
2807 {
2808 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2809 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2810
2811 if (debug_threads)
2812 {
2813 debug_printf ("linux_wait_1 ret = %s, terminated with "
2814 "signal %d\n",
2815 target_pid_to_str (ptid_of (current_thread)),
2816 WTERMSIG (w));
2817 debug_exit ();
2818 }
2819 }
2820
2821 return ptid_of (current_thread);
2822 }
2823
2824 /* If step-over executes a breakpoint instruction, it means a
2825 gdb/gdbserver breakpoint had been planted on top of a permanent
2826 breakpoint. The PC has been adjusted by
2827 check_stopped_by_breakpoint to point at the breakpoint address.
2828 Advance the PC manually past the breakpoint, otherwise the
2829 program would keep trapping the permanent breakpoint forever. */
2830 if (!ptid_equal (step_over_bkpt, null_ptid)
2831 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2832 {
2833 unsigned int increment_pc = the_low_target.breakpoint_len;
2834
2835 if (debug_threads)
2836 {
2837 debug_printf ("step-over for %s executed software breakpoint\n",
2838 target_pid_to_str (ptid_of (current_thread)));
2839 }
2840
2841 if (increment_pc != 0)
2842 {
2843 struct regcache *regcache
2844 = get_thread_regcache (current_thread, 1);
2845
2846 event_child->stop_pc += increment_pc;
2847 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2848
2849 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2850 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2851 }
2852 }
2853
2854 /* If this event was not handled before, and is not a SIGTRAP, we
2855 report it. SIGILL and SIGSEGV are also treated as traps in case
2856 a breakpoint is inserted at the current PC. If this target does
2857 not support internal breakpoints at all, we also report the
2858 SIGTRAP without further processing; it's of no concern to us. */
2859 maybe_internal_trap
2860 = (supports_breakpoints ()
2861 && (WSTOPSIG (w) == SIGTRAP
2862 || ((WSTOPSIG (w) == SIGILL
2863 || WSTOPSIG (w) == SIGSEGV)
2864 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2865
2866 if (maybe_internal_trap)
2867 {
2868 /* Handle anything that requires bookkeeping before deciding to
2869 report the event or continue waiting. */
2870
2871 /* First check if we can explain the SIGTRAP with an internal
2872 breakpoint, or if we should possibly report the event to GDB.
2873 Do this before anything that may remove or insert a
2874 breakpoint. */
2875 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2876
2877 /* We have a SIGTRAP, possibly a step-over dance has just
2878 finished. If so, tweak the state machine accordingly,
2879 reinsert breakpoints and delete any reinsert (software
2880 single-step) breakpoints. */
2881 step_over_finished = finish_step_over (event_child);
2882
2883 /* Now invoke the callbacks of any internal breakpoints there. */
2884 check_breakpoints (event_child->stop_pc);
2885
2886 /* Handle tracepoint data collecting. This may overflow the
2887 trace buffer, and cause a tracing stop, removing
2888 breakpoints. */
2889 trace_event = handle_tracepoints (event_child);
2890
2891 if (bp_explains_trap)
2892 {
2893 /* If we stepped or ran into an internal breakpoint, we've
2894 already handled it. So next time we resume (from this
2895 PC), we should step over it. */
2896 if (debug_threads)
2897 debug_printf ("Hit a gdbserver breakpoint.\n");
2898
2899 if (breakpoint_here (event_child->stop_pc))
2900 event_child->need_step_over = 1;
2901 }
2902 }
2903 else
2904 {
2905 /* We have some other signal, possibly a step-over dance was in
2906 progress, and it should be cancelled too. */
2907 step_over_finished = finish_step_over (event_child);
2908 }
2909
2910 /* We have all the data we need. Either report the event to GDB, or
2911 resume threads and keep waiting for more. */
2912
2913 /* If we're collecting a fast tracepoint, finish the collection and
2914 move out of the jump pad before delivering a signal. See
2915 linux_stabilize_threads. */
2916
2917 if (WIFSTOPPED (w)
2918 && WSTOPSIG (w) != SIGTRAP
2919 && supports_fast_tracepoints ()
2920 && agent_loaded_p ())
2921 {
2922 if (debug_threads)
2923 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2924 "to defer or adjust it.\n",
2925 WSTOPSIG (w), lwpid_of (current_thread));
2926
2927 /* Allow debugging the jump pad itself. */
2928 if (current_thread->last_resume_kind != resume_step
2929 && maybe_move_out_of_jump_pad (event_child, &w))
2930 {
2931 enqueue_one_deferred_signal (event_child, &w);
2932
2933 if (debug_threads)
2934 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2935 WSTOPSIG (w), lwpid_of (current_thread));
2936
2937 linux_resume_one_lwp (event_child, 0, 0, NULL);
2938
2939 return ignore_event (ourstatus);
2940 }
2941 }
2942
2943 if (event_child->collecting_fast_tracepoint)
2944 {
2945 if (debug_threads)
2946 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2947 "Check if we're already there.\n",
2948 lwpid_of (current_thread),
2949 event_child->collecting_fast_tracepoint);
2950
2951 trace_event = 1;
2952
2953 event_child->collecting_fast_tracepoint
2954 = linux_fast_tracepoint_collecting (event_child, NULL);
2955
2956 if (event_child->collecting_fast_tracepoint != 1)
2957 {
2958 /* No longer need this breakpoint. */
2959 if (event_child->exit_jump_pad_bkpt != NULL)
2960 {
2961 if (debug_threads)
2962 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2963 "stopping all threads momentarily.\n");
2964
2965 /* Other running threads could hit this breakpoint.
2966 We don't handle moribund locations like GDB does,
2967 instead we always pause all threads when removing
2968 breakpoints, so that any step-over or
2969 decr_pc_after_break adjustment is always taken
2970 care of while the breakpoint is still
2971 inserted. */
2972 stop_all_lwps (1, event_child);
2973
2974 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2975 event_child->exit_jump_pad_bkpt = NULL;
2976
2977 unstop_all_lwps (1, event_child);
2978
2979 gdb_assert (event_child->suspended >= 0);
2980 }
2981 }
2982
2983 if (event_child->collecting_fast_tracepoint == 0)
2984 {
2985 if (debug_threads)
2986 debug_printf ("fast tracepoint finished "
2987 "collecting successfully.\n");
2988
2989 /* We may have a deferred signal to report. */
2990 if (dequeue_one_deferred_signal (event_child, &w))
2991 {
2992 if (debug_threads)
2993 debug_printf ("dequeued one signal.\n");
2994 }
2995 else
2996 {
2997 if (debug_threads)
2998 debug_printf ("no deferred signals.\n");
2999
3000 if (stabilizing_threads)
3001 {
3002 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3003 ourstatus->value.sig = GDB_SIGNAL_0;
3004
3005 if (debug_threads)
3006 {
3007 debug_printf ("linux_wait_1 ret = %s, stopped "
3008 "while stabilizing threads\n",
3009 target_pid_to_str (ptid_of (current_thread)));
3010 debug_exit ();
3011 }
3012
3013 return ptid_of (current_thread);
3014 }
3015 }
3016 }
3017 }
3018
3019 /* Check whether GDB would be interested in this event. */
3020
3021 /* If GDB is not interested in this signal, don't stop other
3022 threads, and don't report it to GDB. Just resume the inferior
3023 right away. We do this for threading-related signals as well as
3024 any that GDB specifically requested we ignore. But never ignore
3025 SIGSTOP if we sent it ourselves, and do not ignore signals when
3026 stepping - they may require special handling to skip the signal
3027 handler. Also never ignore signals that could be caused by a
3028 breakpoint. */
3029 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3030 thread library? */
3031 if (WIFSTOPPED (w)
3032 && current_thread->last_resume_kind != resume_step
3033 && (
3034 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3035 (current_process ()->priv->thread_db != NULL
3036 && (WSTOPSIG (w) == __SIGRTMIN
3037 || WSTOPSIG (w) == __SIGRTMIN + 1))
3038 ||
3039 #endif
3040 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3041 && !(WSTOPSIG (w) == SIGSTOP
3042 && current_thread->last_resume_kind == resume_stop)
3043 && !linux_wstatus_maybe_breakpoint (w))))
3044 {
3045 siginfo_t info, *info_p;
3046
3047 if (debug_threads)
3048 debug_printf ("Ignored signal %d for LWP %ld.\n",
3049 WSTOPSIG (w), lwpid_of (current_thread));
3050
3051 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3052 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3053 info_p = &info;
3054 else
3055 info_p = NULL;
3056 linux_resume_one_lwp (event_child, event_child->stepping,
3057 WSTOPSIG (w), info_p);
3058 return ignore_event (ourstatus);
3059 }
3060
3061 /* Note that all addresses are always "out of the step range" when
3062 there's no range to begin with. */
3063 in_step_range = lwp_in_step_range (event_child);
3064
3065 /* If GDB wanted this thread to single step, and the thread is out
3066 of the step range, we always want to report the SIGTRAP, and let
3067 GDB handle it. Watchpoints should always be reported. So should
3068 signals we can't explain. A SIGTRAP we can't explain could be a
3069 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3070 do, we're be able to handle GDB breakpoints on top of internal
3071 breakpoints, by handling the internal breakpoint and still
3072 reporting the event to GDB. If we don't, we're out of luck, GDB
3073 won't see the breakpoint hit. */
3074 report_to_gdb = (!maybe_internal_trap
3075 || (current_thread->last_resume_kind == resume_step
3076 && !in_step_range)
3077 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3078 || (!step_over_finished && !in_step_range
3079 && !bp_explains_trap && !trace_event)
3080 || (gdb_breakpoint_here (event_child->stop_pc)
3081 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3082 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3083 || extended_event_reported (&event_child->waitstatus));
3084
3085 run_breakpoint_commands (event_child->stop_pc);
3086
3087 /* We found no reason GDB would want us to stop. We either hit one
3088 of our own breakpoints, or finished an internal step GDB
3089 shouldn't know about. */
3090 if (!report_to_gdb)
3091 {
3092 if (debug_threads)
3093 {
3094 if (bp_explains_trap)
3095 debug_printf ("Hit a gdbserver breakpoint.\n");
3096 if (step_over_finished)
3097 debug_printf ("Step-over finished.\n");
3098 if (trace_event)
3099 debug_printf ("Tracepoint event.\n");
3100 if (lwp_in_step_range (event_child))
3101 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3102 paddress (event_child->stop_pc),
3103 paddress (event_child->step_range_start),
3104 paddress (event_child->step_range_end));
3105 if (extended_event_reported (&event_child->waitstatus))
3106 {
3107 char *str = target_waitstatus_to_string (ourstatus);
3108 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3109 lwpid_of (get_lwp_thread (event_child)), str);
3110 xfree (str);
3111 }
3112 }
3113
3114 /* We're not reporting this breakpoint to GDB, so apply the
3115 decr_pc_after_break adjustment to the inferior's regcache
3116 ourselves. */
3117
3118 if (the_low_target.set_pc != NULL)
3119 {
3120 struct regcache *regcache
3121 = get_thread_regcache (current_thread, 1);
3122 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3123 }
3124
3125 /* We may have finished stepping over a breakpoint. If so,
3126 we've stopped and suspended all LWPs momentarily except the
3127 stepping one. This is where we resume them all again. We're
3128 going to keep waiting, so use proceed, which handles stepping
3129 over the next breakpoint. */
3130 if (debug_threads)
3131 debug_printf ("proceeding all threads.\n");
3132
3133 if (step_over_finished)
3134 unsuspend_all_lwps (event_child);
3135
3136 proceed_all_lwps ();
3137 return ignore_event (ourstatus);
3138 }
3139
3140 if (debug_threads)
3141 {
3142 if (current_thread->last_resume_kind == resume_step)
3143 {
3144 if (event_child->step_range_start == event_child->step_range_end)
3145 debug_printf ("GDB wanted to single-step, reporting event.\n");
3146 else if (!lwp_in_step_range (event_child))
3147 debug_printf ("Out of step range, reporting event.\n");
3148 }
3149 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3150 debug_printf ("Stopped by watchpoint.\n");
3151 else if (gdb_breakpoint_here (event_child->stop_pc))
3152 debug_printf ("Stopped by GDB breakpoint.\n");
3153 if (debug_threads)
3154 debug_printf ("Hit a non-gdbserver trap event.\n");
3155 }
3156
3157 /* Alright, we're going to report a stop. */
3158
3159 if (!stabilizing_threads)
3160 {
3161 /* In all-stop, stop all threads. */
3162 if (!non_stop)
3163 stop_all_lwps (0, NULL);
3164
3165 /* If we're not waiting for a specific LWP, choose an event LWP
3166 from among those that have had events. Giving equal priority
3167 to all LWPs that have had events helps prevent
3168 starvation. */
3169 if (ptid_equal (ptid, minus_one_ptid))
3170 {
3171 event_child->status_pending_p = 1;
3172 event_child->status_pending = w;
3173
3174 select_event_lwp (&event_child);
3175
3176 /* current_thread and event_child must stay in sync. */
3177 current_thread = get_lwp_thread (event_child);
3178
3179 event_child->status_pending_p = 0;
3180 w = event_child->status_pending;
3181 }
3182
3183 if (step_over_finished)
3184 {
3185 if (!non_stop)
3186 {
3187 /* If we were doing a step-over, all other threads but
3188 the stepping one had been paused in start_step_over,
3189 with their suspend counts incremented. We don't want
3190 to do a full unstop/unpause, because we're in
3191 all-stop mode (so we want threads stopped), but we
3192 still need to unsuspend the other threads, to
3193 decrement their `suspended' count back. */
3194 unsuspend_all_lwps (event_child);
3195 }
3196 else
3197 {
3198 /* If we just finished a step-over, then all threads had
3199 been momentarily paused. In all-stop, that's fine,
3200 we want threads stopped by now anyway. In non-stop,
3201 we need to re-resume threads that GDB wanted to be
3202 running. */
3203 unstop_all_lwps (1, event_child);
3204 }
3205 }
3206
3207 /* Stabilize threads (move out of jump pads). */
3208 if (!non_stop)
3209 stabilize_threads ();
3210 }
3211 else
3212 {
3213 /* If we just finished a step-over, then all threads had been
3214 momentarily paused. In all-stop, that's fine, we want
3215 threads stopped by now anyway. In non-stop, we need to
3216 re-resume threads that GDB wanted to be running. */
3217 if (step_over_finished)
3218 unstop_all_lwps (1, event_child);
3219 }
3220
3221 if (extended_event_reported (&event_child->waitstatus))
3222 {
3223 /* If the reported event is a fork, vfork or exec, let GDB know. */
3224 ourstatus->kind = event_child->waitstatus.kind;
3225 ourstatus->value = event_child->waitstatus.value;
3226
3227 /* Clear the event lwp's waitstatus since we handled it already. */
3228 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3229 }
3230 else
3231 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3232
3233 /* Now that we've selected our final event LWP, un-adjust its PC if
3234 it was a software breakpoint, and the client doesn't know we can
3235 adjust the breakpoint ourselves. */
3236 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3237 && !swbreak_feature)
3238 {
3239 int decr_pc = the_low_target.decr_pc_after_break;
3240
3241 if (decr_pc != 0)
3242 {
3243 struct regcache *regcache
3244 = get_thread_regcache (current_thread, 1);
3245 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3246 }
3247 }
3248
3249 if (current_thread->last_resume_kind == resume_stop
3250 && WSTOPSIG (w) == SIGSTOP)
3251 {
3252 /* A thread that has been requested to stop by GDB with vCont;t,
3253 and it stopped cleanly, so report as SIG0. The use of
3254 SIGSTOP is an implementation detail. */
3255 ourstatus->value.sig = GDB_SIGNAL_0;
3256 }
3257 else if (current_thread->last_resume_kind == resume_stop
3258 && WSTOPSIG (w) != SIGSTOP)
3259 {
3260 /* A thread that has been requested to stop by GDB with vCont;t,
3261 but, it stopped for other reasons. */
3262 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3263 }
3264 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3265 {
3266 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3267 }
3268
3269 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3270
3271 if (debug_threads)
3272 {
3273 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3274 target_pid_to_str (ptid_of (current_thread)),
3275 ourstatus->kind, ourstatus->value.sig);
3276 debug_exit ();
3277 }
3278
3279 return ptid_of (current_thread);
3280 }
3281
3282 /* Get rid of any pending event in the pipe. */
3283 static void
3284 async_file_flush (void)
3285 {
3286 int ret;
3287 char buf;
3288
3289 do
3290 ret = read (linux_event_pipe[0], &buf, 1);
3291 while (ret >= 0 || (ret == -1 && errno == EINTR));
3292 }
3293
3294 /* Put something in the pipe, so the event loop wakes up. */
3295 static void
3296 async_file_mark (void)
3297 {
3298 int ret;
3299
3300 async_file_flush ();
3301
3302 do
3303 ret = write (linux_event_pipe[1], "+", 1);
3304 while (ret == 0 || (ret == -1 && errno == EINTR));
3305
3306 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3307 be awakened anyway. */
3308 }
3309
3310 static ptid_t
3311 linux_wait (ptid_t ptid,
3312 struct target_waitstatus *ourstatus, int target_options)
3313 {
3314 ptid_t event_ptid;
3315
3316 /* Flush the async file first. */
3317 if (target_is_async_p ())
3318 async_file_flush ();
3319
3320 do
3321 {
3322 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3323 }
3324 while ((target_options & TARGET_WNOHANG) == 0
3325 && ptid_equal (event_ptid, null_ptid)
3326 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3327
3328 /* If at least one stop was reported, there may be more. A single
3329 SIGCHLD can signal more than one child stop. */
3330 if (target_is_async_p ()
3331 && (target_options & TARGET_WNOHANG) != 0
3332 && !ptid_equal (event_ptid, null_ptid))
3333 async_file_mark ();
3334
3335 return event_ptid;
3336 }
3337
3338 /* Send a signal to an LWP. */
3339
3340 static int
3341 kill_lwp (unsigned long lwpid, int signo)
3342 {
3343 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3344 fails, then we are not using nptl threads and we should be using kill. */
3345
3346 #ifdef __NR_tkill
3347 {
3348 static int tkill_failed;
3349
3350 if (!tkill_failed)
3351 {
3352 int ret;
3353
3354 errno = 0;
3355 ret = syscall (__NR_tkill, lwpid, signo);
3356 if (errno != ENOSYS)
3357 return ret;
3358 tkill_failed = 1;
3359 }
3360 }
3361 #endif
3362
3363 return kill (lwpid, signo);
3364 }
3365
3366 void
3367 linux_stop_lwp (struct lwp_info *lwp)
3368 {
3369 send_sigstop (lwp);
3370 }
3371
3372 static void
3373 send_sigstop (struct lwp_info *lwp)
3374 {
3375 int pid;
3376
3377 pid = lwpid_of (get_lwp_thread (lwp));
3378
3379 /* If we already have a pending stop signal for this process, don't
3380 send another. */
3381 if (lwp->stop_expected)
3382 {
3383 if (debug_threads)
3384 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3385
3386 return;
3387 }
3388
3389 if (debug_threads)
3390 debug_printf ("Sending sigstop to lwp %d\n", pid);
3391
3392 lwp->stop_expected = 1;
3393 kill_lwp (pid, SIGSTOP);
3394 }
3395
3396 static int
3397 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3398 {
3399 struct thread_info *thread = (struct thread_info *) entry;
3400 struct lwp_info *lwp = get_thread_lwp (thread);
3401
3402 /* Ignore EXCEPT. */
3403 if (lwp == except)
3404 return 0;
3405
3406 if (lwp->stopped)
3407 return 0;
3408
3409 send_sigstop (lwp);
3410 return 0;
3411 }
3412
3413 /* Increment the suspend count of an LWP, and stop it, if not stopped
3414 yet. */
3415 static int
3416 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3417 void *except)
3418 {
3419 struct thread_info *thread = (struct thread_info *) entry;
3420 struct lwp_info *lwp = get_thread_lwp (thread);
3421
3422 /* Ignore EXCEPT. */
3423 if (lwp == except)
3424 return 0;
3425
3426 lwp->suspended++;
3427
3428 return send_sigstop_callback (entry, except);
3429 }
3430
3431 static void
3432 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3433 {
3434 /* It's dead, really. */
3435 lwp->dead = 1;
3436
3437 /* Store the exit status for later. */
3438 lwp->status_pending_p = 1;
3439 lwp->status_pending = wstat;
3440
3441 /* Prevent trying to stop it. */
3442 lwp->stopped = 1;
3443
3444 /* No further stops are expected from a dead lwp. */
3445 lwp->stop_expected = 0;
3446 }
3447
3448 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3449
3450 static void
3451 wait_for_sigstop (void)
3452 {
3453 struct thread_info *saved_thread;
3454 ptid_t saved_tid;
3455 int wstat;
3456 int ret;
3457
3458 saved_thread = current_thread;
3459 if (saved_thread != NULL)
3460 saved_tid = saved_thread->entry.id;
3461 else
3462 saved_tid = null_ptid; /* avoid bogus unused warning */
3463
3464 if (debug_threads)
3465 debug_printf ("wait_for_sigstop: pulling events\n");
3466
3467 /* Passing NULL_PTID as filter indicates we want all events to be
3468 left pending. Eventually this returns when there are no
3469 unwaited-for children left. */
3470 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3471 &wstat, __WALL);
3472 gdb_assert (ret == -1);
3473
3474 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3475 current_thread = saved_thread;
3476 else
3477 {
3478 if (debug_threads)
3479 debug_printf ("Previously current thread died.\n");
3480
3481 if (non_stop)
3482 {
3483 /* We can't change the current inferior behind GDB's back,
3484 otherwise, a subsequent command may apply to the wrong
3485 process. */
3486 current_thread = NULL;
3487 }
3488 else
3489 {
3490 /* Set a valid thread as current. */
3491 set_desired_thread (0);
3492 }
3493 }
3494 }
3495
3496 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3497 move it out, because we need to report the stop event to GDB. For
3498 example, if the user puts a breakpoint in the jump pad, it's
3499 because she wants to debug it. */
3500
3501 static int
3502 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3503 {
3504 struct thread_info *thread = (struct thread_info *) entry;
3505 struct lwp_info *lwp = get_thread_lwp (thread);
3506
3507 gdb_assert (lwp->suspended == 0);
3508 gdb_assert (lwp->stopped);
3509
3510 /* Allow debugging the jump pad, gdb_collect, etc.. */
3511 return (supports_fast_tracepoints ()
3512 && agent_loaded_p ()
3513 && (gdb_breakpoint_here (lwp->stop_pc)
3514 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3515 || thread->last_resume_kind == resume_step)
3516 && linux_fast_tracepoint_collecting (lwp, NULL));
3517 }
3518
3519 static void
3520 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3521 {
3522 struct thread_info *thread = (struct thread_info *) entry;
3523 struct lwp_info *lwp = get_thread_lwp (thread);
3524 int *wstat;
3525
3526 gdb_assert (lwp->suspended == 0);
3527 gdb_assert (lwp->stopped);
3528
3529 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3530
3531 /* Allow debugging the jump pad, gdb_collect, etc. */
3532 if (!gdb_breakpoint_here (lwp->stop_pc)
3533 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3534 && thread->last_resume_kind != resume_step
3535 && maybe_move_out_of_jump_pad (lwp, wstat))
3536 {
3537 if (debug_threads)
3538 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3539 lwpid_of (thread));
3540
3541 if (wstat)
3542 {
3543 lwp->status_pending_p = 0;
3544 enqueue_one_deferred_signal (lwp, wstat);
3545
3546 if (debug_threads)
3547 debug_printf ("Signal %d for LWP %ld deferred "
3548 "(in jump pad)\n",
3549 WSTOPSIG (*wstat), lwpid_of (thread));
3550 }
3551
3552 linux_resume_one_lwp (lwp, 0, 0, NULL);
3553 }
3554 else
3555 lwp->suspended++;
3556 }
3557
3558 static int
3559 lwp_running (struct inferior_list_entry *entry, void *data)
3560 {
3561 struct thread_info *thread = (struct thread_info *) entry;
3562 struct lwp_info *lwp = get_thread_lwp (thread);
3563
3564 if (lwp->dead)
3565 return 0;
3566 if (lwp->stopped)
3567 return 0;
3568 return 1;
3569 }
3570
3571 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3572 If SUSPEND, then also increase the suspend count of every LWP,
3573 except EXCEPT. */
3574
3575 static void
3576 stop_all_lwps (int suspend, struct lwp_info *except)
3577 {
3578 /* Should not be called recursively. */
3579 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3580
3581 if (debug_threads)
3582 {
3583 debug_enter ();
3584 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3585 suspend ? "stop-and-suspend" : "stop",
3586 except != NULL
3587 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3588 : "none");
3589 }
3590
3591 stopping_threads = (suspend
3592 ? STOPPING_AND_SUSPENDING_THREADS
3593 : STOPPING_THREADS);
3594
3595 if (suspend)
3596 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3597 else
3598 find_inferior (&all_threads, send_sigstop_callback, except);
3599 wait_for_sigstop ();
3600 stopping_threads = NOT_STOPPING_THREADS;
3601
3602 if (debug_threads)
3603 {
3604 debug_printf ("stop_all_lwps done, setting stopping_threads "
3605 "back to !stopping\n");
3606 debug_exit ();
3607 }
3608 }
3609
3610 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3611 SIGNAL is nonzero, give it that signal. */
3612
3613 static void
3614 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3615 int step, int signal, siginfo_t *info)
3616 {
3617 struct thread_info *thread = get_lwp_thread (lwp);
3618 struct thread_info *saved_thread;
3619 int fast_tp_collecting;
3620
3621 if (lwp->stopped == 0)
3622 return;
3623
3624 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3625
3626 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3627
3628 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3629 user used the "jump" command, or "set $pc = foo"). */
3630 if (lwp->stop_pc != get_pc (lwp))
3631 {
3632 /* Collecting 'while-stepping' actions doesn't make sense
3633 anymore. */
3634 release_while_stepping_state_list (thread);
3635 }
3636
3637 /* If we have pending signals or status, and a new signal, enqueue the
3638 signal. Also enqueue the signal if we are waiting to reinsert a
3639 breakpoint; it will be picked up again below. */
3640 if (signal != 0
3641 && (lwp->status_pending_p
3642 || lwp->pending_signals != NULL
3643 || lwp->bp_reinsert != 0
3644 || fast_tp_collecting))
3645 {
3646 struct pending_signals *p_sig;
3647 p_sig = xmalloc (sizeof (*p_sig));
3648 p_sig->prev = lwp->pending_signals;
3649 p_sig->signal = signal;
3650 if (info == NULL)
3651 memset (&p_sig->info, 0, sizeof (siginfo_t));
3652 else
3653 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3654 lwp->pending_signals = p_sig;
3655 }
3656
3657 if (lwp->status_pending_p)
3658 {
3659 if (debug_threads)
3660 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3661 " has pending status\n",
3662 lwpid_of (thread), step ? "step" : "continue", signal,
3663 lwp->stop_expected ? "expected" : "not expected");
3664 return;
3665 }
3666
3667 saved_thread = current_thread;
3668 current_thread = thread;
3669
3670 if (debug_threads)
3671 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3672 lwpid_of (thread), step ? "step" : "continue", signal,
3673 lwp->stop_expected ? "expected" : "not expected");
3674
3675 /* This bit needs some thinking about. If we get a signal that
3676 we must report while a single-step reinsert is still pending,
3677 we often end up resuming the thread. It might be better to
3678 (ew) allow a stack of pending events; then we could be sure that
3679 the reinsert happened right away and not lose any signals.
3680
3681 Making this stack would also shrink the window in which breakpoints are
3682 uninserted (see comment in linux_wait_for_lwp) but not enough for
3683 complete correctness, so it won't solve that problem. It may be
3684 worthwhile just to solve this one, however. */
3685 if (lwp->bp_reinsert != 0)
3686 {
3687 if (debug_threads)
3688 debug_printf (" pending reinsert at 0x%s\n",
3689 paddress (lwp->bp_reinsert));
3690
3691 if (can_hardware_single_step ())
3692 {
3693 if (fast_tp_collecting == 0)
3694 {
3695 if (step == 0)
3696 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3697 if (lwp->suspended)
3698 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3699 lwp->suspended);
3700 }
3701
3702 step = 1;
3703 }
3704
3705 /* Postpone any pending signal. It was enqueued above. */
3706 signal = 0;
3707 }
3708
3709 if (fast_tp_collecting == 1)
3710 {
3711 if (debug_threads)
3712 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3713 " (exit-jump-pad-bkpt)\n",
3714 lwpid_of (thread));
3715
3716 /* Postpone any pending signal. It was enqueued above. */
3717 signal = 0;
3718 }
3719 else if (fast_tp_collecting == 2)
3720 {
3721 if (debug_threads)
3722 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3723 " single-stepping\n",
3724 lwpid_of (thread));
3725
3726 if (can_hardware_single_step ())
3727 step = 1;
3728 else
3729 {
3730 internal_error (__FILE__, __LINE__,
3731 "moving out of jump pad single-stepping"
3732 " not implemented on this target");
3733 }
3734
3735 /* Postpone any pending signal. It was enqueued above. */
3736 signal = 0;
3737 }
3738
3739 /* If we have while-stepping actions in this thread set it stepping.
3740 If we have a signal to deliver, it may or may not be set to
3741 SIG_IGN, we don't know. Assume so, and allow collecting
3742 while-stepping into a signal handler. A possible smart thing to
3743 do would be to set an internal breakpoint at the signal return
3744 address, continue, and carry on catching this while-stepping
3745 action only when that breakpoint is hit. A future
3746 enhancement. */
3747 if (thread->while_stepping != NULL
3748 && can_hardware_single_step ())
3749 {
3750 if (debug_threads)
3751 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3752 lwpid_of (thread));
3753 step = 1;
3754 }
3755
3756 if (the_low_target.get_pc != NULL)
3757 {
3758 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3759
3760 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3761
3762 if (debug_threads)
3763 {
3764 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3765 (long) lwp->stop_pc);
3766 }
3767 }
3768
3769 /* If we have pending signals, consume one unless we are trying to
3770 reinsert a breakpoint or we're trying to finish a fast tracepoint
3771 collect. */
3772 if (lwp->pending_signals != NULL
3773 && lwp->bp_reinsert == 0
3774 && fast_tp_collecting == 0)
3775 {
3776 struct pending_signals **p_sig;
3777
3778 p_sig = &lwp->pending_signals;
3779 while ((*p_sig)->prev != NULL)
3780 p_sig = &(*p_sig)->prev;
3781
3782 signal = (*p_sig)->signal;
3783 if ((*p_sig)->info.si_signo != 0)
3784 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3785 &(*p_sig)->info);
3786
3787 free (*p_sig);
3788 *p_sig = NULL;
3789 }
3790
3791 if (the_low_target.prepare_to_resume != NULL)
3792 the_low_target.prepare_to_resume (lwp);
3793
3794 regcache_invalidate_thread (thread);
3795 errno = 0;
3796 lwp->stepping = step;
3797 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3798 (PTRACE_TYPE_ARG3) 0,
3799 /* Coerce to a uintptr_t first to avoid potential gcc warning
3800 of coercing an 8 byte integer to a 4 byte pointer. */
3801 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3802
3803 current_thread = saved_thread;
3804 if (errno)
3805 perror_with_name ("resuming thread");
3806
3807 /* Successfully resumed. Clear state that no longer makes sense,
3808 and mark the LWP as running. Must not do this before resuming
3809 otherwise if that fails other code will be confused. E.g., we'd
3810 later try to stop the LWP and hang forever waiting for a stop
3811 status. Note that we must not throw after this is cleared,
3812 otherwise handle_zombie_lwp_error would get confused. */
3813 lwp->stopped = 0;
3814 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3815 }
3816
3817 /* Called when we try to resume a stopped LWP and that errors out. If
3818 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3819 or about to become), discard the error, clear any pending status
3820 the LWP may have, and return true (we'll collect the exit status
3821 soon enough). Otherwise, return false. */
3822
3823 static int
3824 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3825 {
3826 struct thread_info *thread = get_lwp_thread (lp);
3827
3828 /* If we get an error after resuming the LWP successfully, we'd
3829 confuse !T state for the LWP being gone. */
3830 gdb_assert (lp->stopped);
3831
3832 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3833 because even if ptrace failed with ESRCH, the tracee may be "not
3834 yet fully dead", but already refusing ptrace requests. In that
3835 case the tracee has 'R (Running)' state for a little bit
3836 (observed in Linux 3.18). See also the note on ESRCH in the
3837 ptrace(2) man page. Instead, check whether the LWP has any state
3838 other than ptrace-stopped. */
3839
3840 /* Don't assume anything if /proc/PID/status can't be read. */
3841 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3842 {
3843 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3844 lp->status_pending_p = 0;
3845 return 1;
3846 }
3847 return 0;
3848 }
3849
3850 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3851 disappears while we try to resume it. */
3852
3853 static void
3854 linux_resume_one_lwp (struct lwp_info *lwp,
3855 int step, int signal, siginfo_t *info)
3856 {
3857 TRY
3858 {
3859 linux_resume_one_lwp_throw (lwp, step, signal, info);
3860 }
3861 CATCH (ex, RETURN_MASK_ERROR)
3862 {
3863 if (!check_ptrace_stopped_lwp_gone (lwp))
3864 throw_exception (ex);
3865 }
3866 END_CATCH
3867 }
3868
3869 struct thread_resume_array
3870 {
3871 struct thread_resume *resume;
3872 size_t n;
3873 };
3874
3875 /* This function is called once per thread via find_inferior.
3876 ARG is a pointer to a thread_resume_array struct.
3877 We look up the thread specified by ENTRY in ARG, and mark the thread
3878 with a pointer to the appropriate resume request.
3879
3880 This algorithm is O(threads * resume elements), but resume elements
3881 is small (and will remain small at least until GDB supports thread
3882 suspension). */
3883
3884 static int
3885 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3886 {
3887 struct thread_info *thread = (struct thread_info *) entry;
3888 struct lwp_info *lwp = get_thread_lwp (thread);
3889 int ndx;
3890 struct thread_resume_array *r;
3891
3892 r = arg;
3893
3894 for (ndx = 0; ndx < r->n; ndx++)
3895 {
3896 ptid_t ptid = r->resume[ndx].thread;
3897 if (ptid_equal (ptid, minus_one_ptid)
3898 || ptid_equal (ptid, entry->id)
3899 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3900 of PID'. */
3901 || (ptid_get_pid (ptid) == pid_of (thread)
3902 && (ptid_is_pid (ptid)
3903 || ptid_get_lwp (ptid) == -1)))
3904 {
3905 if (r->resume[ndx].kind == resume_stop
3906 && thread->last_resume_kind == resume_stop)
3907 {
3908 if (debug_threads)
3909 debug_printf ("already %s LWP %ld at GDB's request\n",
3910 (thread->last_status.kind
3911 == TARGET_WAITKIND_STOPPED)
3912 ? "stopped"
3913 : "stopping",
3914 lwpid_of (thread));
3915
3916 continue;
3917 }
3918
3919 lwp->resume = &r->resume[ndx];
3920 thread->last_resume_kind = lwp->resume->kind;
3921
3922 lwp->step_range_start = lwp->resume->step_range_start;
3923 lwp->step_range_end = lwp->resume->step_range_end;
3924
3925 /* If we had a deferred signal to report, dequeue one now.
3926 This can happen if LWP gets more than one signal while
3927 trying to get out of a jump pad. */
3928 if (lwp->stopped
3929 && !lwp->status_pending_p
3930 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3931 {
3932 lwp->status_pending_p = 1;
3933
3934 if (debug_threads)
3935 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3936 "leaving status pending.\n",
3937 WSTOPSIG (lwp->status_pending),
3938 lwpid_of (thread));
3939 }
3940
3941 return 0;
3942 }
3943 }
3944
3945 /* No resume action for this thread. */
3946 lwp->resume = NULL;
3947
3948 return 0;
3949 }
3950
3951 /* find_inferior callback for linux_resume.
3952 Set *FLAG_P if this lwp has an interesting status pending. */
3953
3954 static int
3955 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3956 {
3957 struct thread_info *thread = (struct thread_info *) entry;
3958 struct lwp_info *lwp = get_thread_lwp (thread);
3959
3960 /* LWPs which will not be resumed are not interesting, because
3961 we might not wait for them next time through linux_wait. */
3962 if (lwp->resume == NULL)
3963 return 0;
3964
3965 if (thread_still_has_status_pending_p (thread))
3966 * (int *) flag_p = 1;
3967
3968 return 0;
3969 }
3970
3971 /* Return 1 if this lwp that GDB wants running is stopped at an
3972 internal breakpoint that we need to step over. It assumes that any
3973 required STOP_PC adjustment has already been propagated to the
3974 inferior's regcache. */
3975
3976 static int
3977 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3978 {
3979 struct thread_info *thread = (struct thread_info *) entry;
3980 struct lwp_info *lwp = get_thread_lwp (thread);
3981 struct thread_info *saved_thread;
3982 CORE_ADDR pc;
3983
3984 /* LWPs which will not be resumed are not interesting, because we
3985 might not wait for them next time through linux_wait. */
3986
3987 if (!lwp->stopped)
3988 {
3989 if (debug_threads)
3990 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3991 lwpid_of (thread));
3992 return 0;
3993 }
3994
3995 if (thread->last_resume_kind == resume_stop)
3996 {
3997 if (debug_threads)
3998 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3999 " stopped\n",
4000 lwpid_of (thread));
4001 return 0;
4002 }
4003
4004 gdb_assert (lwp->suspended >= 0);
4005
4006 if (lwp->suspended)
4007 {
4008 if (debug_threads)
4009 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4010 lwpid_of (thread));
4011 return 0;
4012 }
4013
4014 if (!lwp->need_step_over)
4015 {
4016 if (debug_threads)
4017 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4018 }
4019
4020 if (lwp->status_pending_p)
4021 {
4022 if (debug_threads)
4023 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4024 " status.\n",
4025 lwpid_of (thread));
4026 return 0;
4027 }
4028
4029 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4030 or we have. */
4031 pc = get_pc (lwp);
4032
4033 /* If the PC has changed since we stopped, then don't do anything,
4034 and let the breakpoint/tracepoint be hit. This happens if, for
4035 instance, GDB handled the decr_pc_after_break subtraction itself,
4036 GDB is OOL stepping this thread, or the user has issued a "jump"
4037 command, or poked thread's registers herself. */
4038 if (pc != lwp->stop_pc)
4039 {
4040 if (debug_threads)
4041 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4042 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4043 lwpid_of (thread),
4044 paddress (lwp->stop_pc), paddress (pc));
4045
4046 lwp->need_step_over = 0;
4047 return 0;
4048 }
4049
4050 saved_thread = current_thread;
4051 current_thread = thread;
4052
4053 /* We can only step over breakpoints we know about. */
4054 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4055 {
4056 /* Don't step over a breakpoint that GDB expects to hit
4057 though. If the condition is being evaluated on the target's side
4058 and it evaluate to false, step over this breakpoint as well. */
4059 if (gdb_breakpoint_here (pc)
4060 && gdb_condition_true_at_breakpoint (pc)
4061 && gdb_no_commands_at_breakpoint (pc))
4062 {
4063 if (debug_threads)
4064 debug_printf ("Need step over [LWP %ld]? yes, but found"
4065 " GDB breakpoint at 0x%s; skipping step over\n",
4066 lwpid_of (thread), paddress (pc));
4067
4068 current_thread = saved_thread;
4069 return 0;
4070 }
4071 else
4072 {
4073 if (debug_threads)
4074 debug_printf ("Need step over [LWP %ld]? yes, "
4075 "found breakpoint at 0x%s\n",
4076 lwpid_of (thread), paddress (pc));
4077
4078 /* We've found an lwp that needs stepping over --- return 1 so
4079 that find_inferior stops looking. */
4080 current_thread = saved_thread;
4081
4082 /* If the step over is cancelled, this is set again. */
4083 lwp->need_step_over = 0;
4084 return 1;
4085 }
4086 }
4087
4088 current_thread = saved_thread;
4089
4090 if (debug_threads)
4091 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4092 " at 0x%s\n",
4093 lwpid_of (thread), paddress (pc));
4094
4095 return 0;
4096 }
4097
4098 /* Start a step-over operation on LWP. When LWP stopped at a
4099 breakpoint, to make progress, we need to remove the breakpoint out
4100 of the way. If we let other threads run while we do that, they may
4101 pass by the breakpoint location and miss hitting it. To avoid
4102 that, a step-over momentarily stops all threads while LWP is
4103 single-stepped while the breakpoint is temporarily uninserted from
4104 the inferior. When the single-step finishes, we reinsert the
4105 breakpoint, and let all threads that are supposed to be running,
4106 run again.
4107
4108 On targets that don't support hardware single-step, we don't
4109 currently support full software single-stepping. Instead, we only
4110 support stepping over the thread event breakpoint, by asking the
4111 low target where to place a reinsert breakpoint. Since this
4112 routine assumes the breakpoint being stepped over is a thread event
4113 breakpoint, it usually assumes the return address of the current
4114 function is a good enough place to set the reinsert breakpoint. */
4115
4116 static int
4117 start_step_over (struct lwp_info *lwp)
4118 {
4119 struct thread_info *thread = get_lwp_thread (lwp);
4120 struct thread_info *saved_thread;
4121 CORE_ADDR pc;
4122 int step;
4123
4124 if (debug_threads)
4125 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4126 lwpid_of (thread));
4127
4128 stop_all_lwps (1, lwp);
4129 gdb_assert (lwp->suspended == 0);
4130
4131 if (debug_threads)
4132 debug_printf ("Done stopping all threads for step-over.\n");
4133
4134 /* Note, we should always reach here with an already adjusted PC,
4135 either by GDB (if we're resuming due to GDB's request), or by our
4136 caller, if we just finished handling an internal breakpoint GDB
4137 shouldn't care about. */
4138 pc = get_pc (lwp);
4139
4140 saved_thread = current_thread;
4141 current_thread = thread;
4142
4143 lwp->bp_reinsert = pc;
4144 uninsert_breakpoints_at (pc);
4145 uninsert_fast_tracepoint_jumps_at (pc);
4146
4147 if (can_hardware_single_step ())
4148 {
4149 step = 1;
4150 }
4151 else
4152 {
4153 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4154 set_reinsert_breakpoint (raddr);
4155 step = 0;
4156 }
4157
4158 current_thread = saved_thread;
4159
4160 linux_resume_one_lwp (lwp, step, 0, NULL);
4161
4162 /* Require next event from this LWP. */
4163 step_over_bkpt = thread->entry.id;
4164 return 1;
4165 }
4166
4167 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4168 start_step_over, if still there, and delete any reinsert
4169 breakpoints we've set, on non hardware single-step targets. */
4170
4171 static int
4172 finish_step_over (struct lwp_info *lwp)
4173 {
4174 if (lwp->bp_reinsert != 0)
4175 {
4176 if (debug_threads)
4177 debug_printf ("Finished step over.\n");
4178
4179 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4180 may be no breakpoint to reinsert there by now. */
4181 reinsert_breakpoints_at (lwp->bp_reinsert);
4182 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4183
4184 lwp->bp_reinsert = 0;
4185
4186 /* Delete any software-single-step reinsert breakpoints. No
4187 longer needed. We don't have to worry about other threads
4188 hitting this trap, and later not being able to explain it,
4189 because we were stepping over a breakpoint, and we hold all
4190 threads but LWP stopped while doing that. */
4191 if (!can_hardware_single_step ())
4192 delete_reinsert_breakpoints ();
4193
4194 step_over_bkpt = null_ptid;
4195 return 1;
4196 }
4197 else
4198 return 0;
4199 }
4200
4201 /* This function is called once per thread. We check the thread's resume
4202 request, which will tell us whether to resume, step, or leave the thread
4203 stopped; and what signal, if any, it should be sent.
4204
4205 For threads which we aren't explicitly told otherwise, we preserve
4206 the stepping flag; this is used for stepping over gdbserver-placed
4207 breakpoints.
4208
4209 If pending_flags was set in any thread, we queue any needed
4210 signals, since we won't actually resume. We already have a pending
4211 event to report, so we don't need to preserve any step requests;
4212 they should be re-issued if necessary. */
4213
4214 static int
4215 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4216 {
4217 struct thread_info *thread = (struct thread_info *) entry;
4218 struct lwp_info *lwp = get_thread_lwp (thread);
4219 int step;
4220 int leave_all_stopped = * (int *) arg;
4221 int leave_pending;
4222
4223 if (lwp->resume == NULL)
4224 return 0;
4225
4226 if (lwp->resume->kind == resume_stop)
4227 {
4228 if (debug_threads)
4229 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4230
4231 if (!lwp->stopped)
4232 {
4233 if (debug_threads)
4234 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4235
4236 /* Stop the thread, and wait for the event asynchronously,
4237 through the event loop. */
4238 send_sigstop (lwp);
4239 }
4240 else
4241 {
4242 if (debug_threads)
4243 debug_printf ("already stopped LWP %ld\n",
4244 lwpid_of (thread));
4245
4246 /* The LWP may have been stopped in an internal event that
4247 was not meant to be notified back to GDB (e.g., gdbserver
4248 breakpoint), so we should be reporting a stop event in
4249 this case too. */
4250
4251 /* If the thread already has a pending SIGSTOP, this is a
4252 no-op. Otherwise, something later will presumably resume
4253 the thread and this will cause it to cancel any pending
4254 operation, due to last_resume_kind == resume_stop. If
4255 the thread already has a pending status to report, we
4256 will still report it the next time we wait - see
4257 status_pending_p_callback. */
4258
4259 /* If we already have a pending signal to report, then
4260 there's no need to queue a SIGSTOP, as this means we're
4261 midway through moving the LWP out of the jumppad, and we
4262 will report the pending signal as soon as that is
4263 finished. */
4264 if (lwp->pending_signals_to_report == NULL)
4265 send_sigstop (lwp);
4266 }
4267
4268 /* For stop requests, we're done. */
4269 lwp->resume = NULL;
4270 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4271 return 0;
4272 }
4273
4274 /* If this thread which is about to be resumed has a pending status,
4275 then don't resume any threads - we can just report the pending
4276 status. Make sure to queue any signals that would otherwise be
4277 sent. In all-stop mode, we do this decision based on if *any*
4278 thread has a pending status. If there's a thread that needs the
4279 step-over-breakpoint dance, then don't resume any other thread
4280 but that particular one. */
4281 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4282
4283 if (!leave_pending)
4284 {
4285 if (debug_threads)
4286 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4287
4288 step = (lwp->resume->kind == resume_step);
4289 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4290 }
4291 else
4292 {
4293 if (debug_threads)
4294 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4295
4296 /* If we have a new signal, enqueue the signal. */
4297 if (lwp->resume->sig != 0)
4298 {
4299 struct pending_signals *p_sig;
4300 p_sig = xmalloc (sizeof (*p_sig));
4301 p_sig->prev = lwp->pending_signals;
4302 p_sig->signal = lwp->resume->sig;
4303 memset (&p_sig->info, 0, sizeof (siginfo_t));
4304
4305 /* If this is the same signal we were previously stopped by,
4306 make sure to queue its siginfo. We can ignore the return
4307 value of ptrace; if it fails, we'll skip
4308 PTRACE_SETSIGINFO. */
4309 if (WIFSTOPPED (lwp->last_status)
4310 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4311 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4312 &p_sig->info);
4313
4314 lwp->pending_signals = p_sig;
4315 }
4316 }
4317
4318 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4319 lwp->resume = NULL;
4320 return 0;
4321 }
4322
4323 static void
4324 linux_resume (struct thread_resume *resume_info, size_t n)
4325 {
4326 struct thread_resume_array array = { resume_info, n };
4327 struct thread_info *need_step_over = NULL;
4328 int any_pending;
4329 int leave_all_stopped;
4330
4331 if (debug_threads)
4332 {
4333 debug_enter ();
4334 debug_printf ("linux_resume:\n");
4335 }
4336
4337 find_inferior (&all_threads, linux_set_resume_request, &array);
4338
4339 /* If there is a thread which would otherwise be resumed, which has
4340 a pending status, then don't resume any threads - we can just
4341 report the pending status. Make sure to queue any signals that
4342 would otherwise be sent. In non-stop mode, we'll apply this
4343 logic to each thread individually. We consume all pending events
4344 before considering to start a step-over (in all-stop). */
4345 any_pending = 0;
4346 if (!non_stop)
4347 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4348
4349 /* If there is a thread which would otherwise be resumed, which is
4350 stopped at a breakpoint that needs stepping over, then don't
4351 resume any threads - have it step over the breakpoint with all
4352 other threads stopped, then resume all threads again. Make sure
4353 to queue any signals that would otherwise be delivered or
4354 queued. */
4355 if (!any_pending && supports_breakpoints ())
4356 need_step_over
4357 = (struct thread_info *) find_inferior (&all_threads,
4358 need_step_over_p, NULL);
4359
4360 leave_all_stopped = (need_step_over != NULL || any_pending);
4361
4362 if (debug_threads)
4363 {
4364 if (need_step_over != NULL)
4365 debug_printf ("Not resuming all, need step over\n");
4366 else if (any_pending)
4367 debug_printf ("Not resuming, all-stop and found "
4368 "an LWP with pending status\n");
4369 else
4370 debug_printf ("Resuming, no pending status or step over needed\n");
4371 }
4372
4373 /* Even if we're leaving threads stopped, queue all signals we'd
4374 otherwise deliver. */
4375 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4376
4377 if (need_step_over)
4378 start_step_over (get_thread_lwp (need_step_over));
4379
4380 if (debug_threads)
4381 {
4382 debug_printf ("linux_resume done\n");
4383 debug_exit ();
4384 }
4385 }
4386
4387 /* This function is called once per thread. We check the thread's
4388 last resume request, which will tell us whether to resume, step, or
4389 leave the thread stopped. Any signal the client requested to be
4390 delivered has already been enqueued at this point.
4391
4392 If any thread that GDB wants running is stopped at an internal
4393 breakpoint that needs stepping over, we start a step-over operation
4394 on that particular thread, and leave all others stopped. */
4395
4396 static int
4397 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4398 {
4399 struct thread_info *thread = (struct thread_info *) entry;
4400 struct lwp_info *lwp = get_thread_lwp (thread);
4401 int step;
4402
4403 if (lwp == except)
4404 return 0;
4405
4406 if (debug_threads)
4407 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4408
4409 if (!lwp->stopped)
4410 {
4411 if (debug_threads)
4412 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4413 return 0;
4414 }
4415
4416 if (thread->last_resume_kind == resume_stop
4417 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4418 {
4419 if (debug_threads)
4420 debug_printf (" client wants LWP to remain %ld stopped\n",
4421 lwpid_of (thread));
4422 return 0;
4423 }
4424
4425 if (lwp->status_pending_p)
4426 {
4427 if (debug_threads)
4428 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4429 lwpid_of (thread));
4430 return 0;
4431 }
4432
4433 gdb_assert (lwp->suspended >= 0);
4434
4435 if (lwp->suspended)
4436 {
4437 if (debug_threads)
4438 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4439 return 0;
4440 }
4441
4442 if (thread->last_resume_kind == resume_stop
4443 && lwp->pending_signals_to_report == NULL
4444 && lwp->collecting_fast_tracepoint == 0)
4445 {
4446 /* We haven't reported this LWP as stopped yet (otherwise, the
4447 last_status.kind check above would catch it, and we wouldn't
4448 reach here. This LWP may have been momentarily paused by a
4449 stop_all_lwps call while handling for example, another LWP's
4450 step-over. In that case, the pending expected SIGSTOP signal
4451 that was queued at vCont;t handling time will have already
4452 been consumed by wait_for_sigstop, and so we need to requeue
4453 another one here. Note that if the LWP already has a SIGSTOP
4454 pending, this is a no-op. */
4455
4456 if (debug_threads)
4457 debug_printf ("Client wants LWP %ld to stop. "
4458 "Making sure it has a SIGSTOP pending\n",
4459 lwpid_of (thread));
4460
4461 send_sigstop (lwp);
4462 }
4463
4464 step = thread->last_resume_kind == resume_step;
4465 linux_resume_one_lwp (lwp, step, 0, NULL);
4466 return 0;
4467 }
4468
4469 static int
4470 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4471 {
4472 struct thread_info *thread = (struct thread_info *) entry;
4473 struct lwp_info *lwp = get_thread_lwp (thread);
4474
4475 if (lwp == except)
4476 return 0;
4477
4478 lwp->suspended--;
4479 gdb_assert (lwp->suspended >= 0);
4480
4481 return proceed_one_lwp (entry, except);
4482 }
4483
4484 /* When we finish a step-over, set threads running again. If there's
4485 another thread that may need a step-over, now's the time to start
4486 it. Eventually, we'll move all threads past their breakpoints. */
4487
4488 static void
4489 proceed_all_lwps (void)
4490 {
4491 struct thread_info *need_step_over;
4492
4493 /* If there is a thread which would otherwise be resumed, which is
4494 stopped at a breakpoint that needs stepping over, then don't
4495 resume any threads - have it step over the breakpoint with all
4496 other threads stopped, then resume all threads again. */
4497
4498 if (supports_breakpoints ())
4499 {
4500 need_step_over
4501 = (struct thread_info *) find_inferior (&all_threads,
4502 need_step_over_p, NULL);
4503
4504 if (need_step_over != NULL)
4505 {
4506 if (debug_threads)
4507 debug_printf ("proceed_all_lwps: found "
4508 "thread %ld needing a step-over\n",
4509 lwpid_of (need_step_over));
4510
4511 start_step_over (get_thread_lwp (need_step_over));
4512 return;
4513 }
4514 }
4515
4516 if (debug_threads)
4517 debug_printf ("Proceeding, no step-over needed\n");
4518
4519 find_inferior (&all_threads, proceed_one_lwp, NULL);
4520 }
4521
4522 /* Stopped LWPs that the client wanted to be running, that don't have
4523 pending statuses, are set to run again, except for EXCEPT, if not
4524 NULL. This undoes a stop_all_lwps call. */
4525
4526 static void
4527 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4528 {
4529 if (debug_threads)
4530 {
4531 debug_enter ();
4532 if (except)
4533 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4534 lwpid_of (get_lwp_thread (except)));
4535 else
4536 debug_printf ("unstopping all lwps\n");
4537 }
4538
4539 if (unsuspend)
4540 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4541 else
4542 find_inferior (&all_threads, proceed_one_lwp, except);
4543
4544 if (debug_threads)
4545 {
4546 debug_printf ("unstop_all_lwps done\n");
4547 debug_exit ();
4548 }
4549 }
4550
4551
4552 #ifdef HAVE_LINUX_REGSETS
4553
4554 #define use_linux_regsets 1
4555
4556 /* Returns true if REGSET has been disabled. */
4557
4558 static int
4559 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4560 {
4561 return (info->disabled_regsets != NULL
4562 && info->disabled_regsets[regset - info->regsets]);
4563 }
4564
4565 /* Disable REGSET. */
4566
4567 static void
4568 disable_regset (struct regsets_info *info, struct regset_info *regset)
4569 {
4570 int dr_offset;
4571
4572 dr_offset = regset - info->regsets;
4573 if (info->disabled_regsets == NULL)
4574 info->disabled_regsets = xcalloc (1, info->num_regsets);
4575 info->disabled_regsets[dr_offset] = 1;
4576 }
4577
4578 static int
4579 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4580 struct regcache *regcache)
4581 {
4582 struct regset_info *regset;
4583 int saw_general_regs = 0;
4584 int pid;
4585 struct iovec iov;
4586
4587 pid = lwpid_of (current_thread);
4588 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4589 {
4590 void *buf, *data;
4591 int nt_type, res;
4592
4593 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4594 continue;
4595
4596 buf = xmalloc (regset->size);
4597
4598 nt_type = regset->nt_type;
4599 if (nt_type)
4600 {
4601 iov.iov_base = buf;
4602 iov.iov_len = regset->size;
4603 data = (void *) &iov;
4604 }
4605 else
4606 data = buf;
4607
4608 #ifndef __sparc__
4609 res = ptrace (regset->get_request, pid,
4610 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4611 #else
4612 res = ptrace (regset->get_request, pid, data, nt_type);
4613 #endif
4614 if (res < 0)
4615 {
4616 if (errno == EIO)
4617 {
4618 /* If we get EIO on a regset, do not try it again for
4619 this process mode. */
4620 disable_regset (regsets_info, regset);
4621 }
4622 else if (errno == ENODATA)
4623 {
4624 /* ENODATA may be returned if the regset is currently
4625 not "active". This can happen in normal operation,
4626 so suppress the warning in this case. */
4627 }
4628 else
4629 {
4630 char s[256];
4631 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4632 pid);
4633 perror (s);
4634 }
4635 }
4636 else
4637 {
4638 if (regset->type == GENERAL_REGS)
4639 saw_general_regs = 1;
4640 regset->store_function (regcache, buf);
4641 }
4642 free (buf);
4643 }
4644 if (saw_general_regs)
4645 return 0;
4646 else
4647 return 1;
4648 }
4649
4650 static int
4651 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4652 struct regcache *regcache)
4653 {
4654 struct regset_info *regset;
4655 int saw_general_regs = 0;
4656 int pid;
4657 struct iovec iov;
4658
4659 pid = lwpid_of (current_thread);
4660 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4661 {
4662 void *buf, *data;
4663 int nt_type, res;
4664
4665 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4666 || regset->fill_function == NULL)
4667 continue;
4668
4669 buf = xmalloc (regset->size);
4670
4671 /* First fill the buffer with the current register set contents,
4672 in case there are any items in the kernel's regset that are
4673 not in gdbserver's regcache. */
4674
4675 nt_type = regset->nt_type;
4676 if (nt_type)
4677 {
4678 iov.iov_base = buf;
4679 iov.iov_len = regset->size;
4680 data = (void *) &iov;
4681 }
4682 else
4683 data = buf;
4684
4685 #ifndef __sparc__
4686 res = ptrace (regset->get_request, pid,
4687 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4688 #else
4689 res = ptrace (regset->get_request, pid, data, nt_type);
4690 #endif
4691
4692 if (res == 0)
4693 {
4694 /* Then overlay our cached registers on that. */
4695 regset->fill_function (regcache, buf);
4696
4697 /* Only now do we write the register set. */
4698 #ifndef __sparc__
4699 res = ptrace (regset->set_request, pid,
4700 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4701 #else
4702 res = ptrace (regset->set_request, pid, data, nt_type);
4703 #endif
4704 }
4705
4706 if (res < 0)
4707 {
4708 if (errno == EIO)
4709 {
4710 /* If we get EIO on a regset, do not try it again for
4711 this process mode. */
4712 disable_regset (regsets_info, regset);
4713 }
4714 else if (errno == ESRCH)
4715 {
4716 /* At this point, ESRCH should mean the process is
4717 already gone, in which case we simply ignore attempts
4718 to change its registers. See also the related
4719 comment in linux_resume_one_lwp. */
4720 free (buf);
4721 return 0;
4722 }
4723 else
4724 {
4725 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4726 }
4727 }
4728 else if (regset->type == GENERAL_REGS)
4729 saw_general_regs = 1;
4730 free (buf);
4731 }
4732 if (saw_general_regs)
4733 return 0;
4734 else
4735 return 1;
4736 }
4737
4738 #else /* !HAVE_LINUX_REGSETS */
4739
4740 #define use_linux_regsets 0
4741 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4742 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4743
4744 #endif
4745
4746 /* Return 1 if register REGNO is supported by one of the regset ptrace
4747 calls or 0 if it has to be transferred individually. */
4748
4749 static int
4750 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4751 {
4752 unsigned char mask = 1 << (regno % 8);
4753 size_t index = regno / 8;
4754
4755 return (use_linux_regsets
4756 && (regs_info->regset_bitmap == NULL
4757 || (regs_info->regset_bitmap[index] & mask) != 0));
4758 }
4759
4760 #ifdef HAVE_LINUX_USRREGS
4761
4762 int
4763 register_addr (const struct usrregs_info *usrregs, int regnum)
4764 {
4765 int addr;
4766
4767 if (regnum < 0 || regnum >= usrregs->num_regs)
4768 error ("Invalid register number %d.", regnum);
4769
4770 addr = usrregs->regmap[regnum];
4771
4772 return addr;
4773 }
4774
4775 /* Fetch one register. */
4776 static void
4777 fetch_register (const struct usrregs_info *usrregs,
4778 struct regcache *regcache, int regno)
4779 {
4780 CORE_ADDR regaddr;
4781 int i, size;
4782 char *buf;
4783 int pid;
4784
4785 if (regno >= usrregs->num_regs)
4786 return;
4787 if ((*the_low_target.cannot_fetch_register) (regno))
4788 return;
4789
4790 regaddr = register_addr (usrregs, regno);
4791 if (regaddr == -1)
4792 return;
4793
4794 size = ((register_size (regcache->tdesc, regno)
4795 + sizeof (PTRACE_XFER_TYPE) - 1)
4796 & -sizeof (PTRACE_XFER_TYPE));
4797 buf = alloca (size);
4798
4799 pid = lwpid_of (current_thread);
4800 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4801 {
4802 errno = 0;
4803 *(PTRACE_XFER_TYPE *) (buf + i) =
4804 ptrace (PTRACE_PEEKUSER, pid,
4805 /* Coerce to a uintptr_t first to avoid potential gcc warning
4806 of coercing an 8 byte integer to a 4 byte pointer. */
4807 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4808 regaddr += sizeof (PTRACE_XFER_TYPE);
4809 if (errno != 0)
4810 error ("reading register %d: %s", regno, strerror (errno));
4811 }
4812
4813 if (the_low_target.supply_ptrace_register)
4814 the_low_target.supply_ptrace_register (regcache, regno, buf);
4815 else
4816 supply_register (regcache, regno, buf);
4817 }
4818
4819 /* Store one register. */
4820 static void
4821 store_register (const struct usrregs_info *usrregs,
4822 struct regcache *regcache, int regno)
4823 {
4824 CORE_ADDR regaddr;
4825 int i, size;
4826 char *buf;
4827 int pid;
4828
4829 if (regno >= usrregs->num_regs)
4830 return;
4831 if ((*the_low_target.cannot_store_register) (regno))
4832 return;
4833
4834 regaddr = register_addr (usrregs, regno);
4835 if (regaddr == -1)
4836 return;
4837
4838 size = ((register_size (regcache->tdesc, regno)
4839 + sizeof (PTRACE_XFER_TYPE) - 1)
4840 & -sizeof (PTRACE_XFER_TYPE));
4841 buf = alloca (size);
4842 memset (buf, 0, size);
4843
4844 if (the_low_target.collect_ptrace_register)
4845 the_low_target.collect_ptrace_register (regcache, regno, buf);
4846 else
4847 collect_register (regcache, regno, buf);
4848
4849 pid = lwpid_of (current_thread);
4850 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4851 {
4852 errno = 0;
4853 ptrace (PTRACE_POKEUSER, pid,
4854 /* Coerce to a uintptr_t first to avoid potential gcc warning
4855 about coercing an 8 byte integer to a 4 byte pointer. */
4856 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4857 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4858 if (errno != 0)
4859 {
4860 /* At this point, ESRCH should mean the process is
4861 already gone, in which case we simply ignore attempts
4862 to change its registers. See also the related
4863 comment in linux_resume_one_lwp. */
4864 if (errno == ESRCH)
4865 return;
4866
4867 if ((*the_low_target.cannot_store_register) (regno) == 0)
4868 error ("writing register %d: %s", regno, strerror (errno));
4869 }
4870 regaddr += sizeof (PTRACE_XFER_TYPE);
4871 }
4872 }
4873
4874 /* Fetch all registers, or just one, from the child process.
4875 If REGNO is -1, do this for all registers, skipping any that are
4876 assumed to have been retrieved by regsets_fetch_inferior_registers,
4877 unless ALL is non-zero.
4878 Otherwise, REGNO specifies which register (so we can save time). */
4879 static void
4880 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4881 struct regcache *regcache, int regno, int all)
4882 {
4883 struct usrregs_info *usr = regs_info->usrregs;
4884
4885 if (regno == -1)
4886 {
4887 for (regno = 0; regno < usr->num_regs; regno++)
4888 if (all || !linux_register_in_regsets (regs_info, regno))
4889 fetch_register (usr, regcache, regno);
4890 }
4891 else
4892 fetch_register (usr, regcache, regno);
4893 }
4894
4895 /* Store our register values back into the inferior.
4896 If REGNO is -1, do this for all registers, skipping any that are
4897 assumed to have been saved by regsets_store_inferior_registers,
4898 unless ALL is non-zero.
4899 Otherwise, REGNO specifies which register (so we can save time). */
4900 static void
4901 usr_store_inferior_registers (const struct regs_info *regs_info,
4902 struct regcache *regcache, int regno, int all)
4903 {
4904 struct usrregs_info *usr = regs_info->usrregs;
4905
4906 if (regno == -1)
4907 {
4908 for (regno = 0; regno < usr->num_regs; regno++)
4909 if (all || !linux_register_in_regsets (regs_info, regno))
4910 store_register (usr, regcache, regno);
4911 }
4912 else
4913 store_register (usr, regcache, regno);
4914 }
4915
4916 #else /* !HAVE_LINUX_USRREGS */
4917
4918 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4919 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4920
4921 #endif
4922
4923
4924 void
4925 linux_fetch_registers (struct regcache *regcache, int regno)
4926 {
4927 int use_regsets;
4928 int all = 0;
4929 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4930
4931 if (regno == -1)
4932 {
4933 if (the_low_target.fetch_register != NULL
4934 && regs_info->usrregs != NULL)
4935 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4936 (*the_low_target.fetch_register) (regcache, regno);
4937
4938 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4939 if (regs_info->usrregs != NULL)
4940 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4941 }
4942 else
4943 {
4944 if (the_low_target.fetch_register != NULL
4945 && (*the_low_target.fetch_register) (regcache, regno))
4946 return;
4947
4948 use_regsets = linux_register_in_regsets (regs_info, regno);
4949 if (use_regsets)
4950 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4951 regcache);
4952 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4953 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4954 }
4955 }
4956
4957 void
4958 linux_store_registers (struct regcache *regcache, int regno)
4959 {
4960 int use_regsets;
4961 int all = 0;
4962 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4963
4964 if (regno == -1)
4965 {
4966 all = regsets_store_inferior_registers (regs_info->regsets_info,
4967 regcache);
4968 if (regs_info->usrregs != NULL)
4969 usr_store_inferior_registers (regs_info, regcache, regno, all);
4970 }
4971 else
4972 {
4973 use_regsets = linux_register_in_regsets (regs_info, regno);
4974 if (use_regsets)
4975 all = regsets_store_inferior_registers (regs_info->regsets_info,
4976 regcache);
4977 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4978 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4979 }
4980 }
4981
4982
4983 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4984 to debugger memory starting at MYADDR. */
4985
4986 static int
4987 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4988 {
4989 int pid = lwpid_of (current_thread);
4990 register PTRACE_XFER_TYPE *buffer;
4991 register CORE_ADDR addr;
4992 register int count;
4993 char filename[64];
4994 register int i;
4995 int ret;
4996 int fd;
4997
4998 /* Try using /proc. Don't bother for one word. */
4999 if (len >= 3 * sizeof (long))
5000 {
5001 int bytes;
5002
5003 /* We could keep this file open and cache it - possibly one per
5004 thread. That requires some juggling, but is even faster. */
5005 sprintf (filename, "/proc/%d/mem", pid);
5006 fd = open (filename, O_RDONLY | O_LARGEFILE);
5007 if (fd == -1)
5008 goto no_proc;
5009
5010 /* If pread64 is available, use it. It's faster if the kernel
5011 supports it (only one syscall), and it's 64-bit safe even on
5012 32-bit platforms (for instance, SPARC debugging a SPARC64
5013 application). */
5014 #ifdef HAVE_PREAD64
5015 bytes = pread64 (fd, myaddr, len, memaddr);
5016 #else
5017 bytes = -1;
5018 if (lseek (fd, memaddr, SEEK_SET) != -1)
5019 bytes = read (fd, myaddr, len);
5020 #endif
5021
5022 close (fd);
5023 if (bytes == len)
5024 return 0;
5025
5026 /* Some data was read, we'll try to get the rest with ptrace. */
5027 if (bytes > 0)
5028 {
5029 memaddr += bytes;
5030 myaddr += bytes;
5031 len -= bytes;
5032 }
5033 }
5034
5035 no_proc:
5036 /* Round starting address down to longword boundary. */
5037 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5038 /* Round ending address up; get number of longwords that makes. */
5039 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5040 / sizeof (PTRACE_XFER_TYPE));
5041 /* Allocate buffer of that many longwords. */
5042 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5043
5044 /* Read all the longwords */
5045 errno = 0;
5046 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5047 {
5048 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5049 about coercing an 8 byte integer to a 4 byte pointer. */
5050 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5051 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5052 (PTRACE_TYPE_ARG4) 0);
5053 if (errno)
5054 break;
5055 }
5056 ret = errno;
5057
5058 /* Copy appropriate bytes out of the buffer. */
5059 if (i > 0)
5060 {
5061 i *= sizeof (PTRACE_XFER_TYPE);
5062 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5063 memcpy (myaddr,
5064 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5065 i < len ? i : len);
5066 }
5067
5068 return ret;
5069 }
5070
5071 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5072 memory at MEMADDR. On failure (cannot write to the inferior)
5073 returns the value of errno. Always succeeds if LEN is zero. */
5074
5075 static int
5076 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5077 {
5078 register int i;
5079 /* Round starting address down to longword boundary. */
5080 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5081 /* Round ending address up; get number of longwords that makes. */
5082 register int count
5083 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5084 / sizeof (PTRACE_XFER_TYPE);
5085
5086 /* Allocate buffer of that many longwords. */
5087 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5088 alloca (count * sizeof (PTRACE_XFER_TYPE));
5089
5090 int pid = lwpid_of (current_thread);
5091
5092 if (len == 0)
5093 {
5094 /* Zero length write always succeeds. */
5095 return 0;
5096 }
5097
5098 if (debug_threads)
5099 {
5100 /* Dump up to four bytes. */
5101 unsigned int val = * (unsigned int *) myaddr;
5102 if (len == 1)
5103 val = val & 0xff;
5104 else if (len == 2)
5105 val = val & 0xffff;
5106 else if (len == 3)
5107 val = val & 0xffffff;
5108 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5109 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5110 }
5111
5112 /* Fill start and end extra bytes of buffer with existing memory data. */
5113
5114 errno = 0;
5115 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5116 about coercing an 8 byte integer to a 4 byte pointer. */
5117 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5118 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5119 (PTRACE_TYPE_ARG4) 0);
5120 if (errno)
5121 return errno;
5122
5123 if (count > 1)
5124 {
5125 errno = 0;
5126 buffer[count - 1]
5127 = ptrace (PTRACE_PEEKTEXT, pid,
5128 /* Coerce to a uintptr_t first to avoid potential gcc warning
5129 about coercing an 8 byte integer to a 4 byte pointer. */
5130 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5131 * sizeof (PTRACE_XFER_TYPE)),
5132 (PTRACE_TYPE_ARG4) 0);
5133 if (errno)
5134 return errno;
5135 }
5136
5137 /* Copy data to be written over corresponding part of buffer. */
5138
5139 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5140 myaddr, len);
5141
5142 /* Write the entire buffer. */
5143
5144 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5145 {
5146 errno = 0;
5147 ptrace (PTRACE_POKETEXT, pid,
5148 /* Coerce to a uintptr_t first to avoid potential gcc warning
5149 about coercing an 8 byte integer to a 4 byte pointer. */
5150 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5151 (PTRACE_TYPE_ARG4) buffer[i]);
5152 if (errno)
5153 return errno;
5154 }
5155
5156 return 0;
5157 }
5158
5159 static void
5160 linux_look_up_symbols (void)
5161 {
5162 #ifdef USE_THREAD_DB
5163 struct process_info *proc = current_process ();
5164
5165 if (proc->priv->thread_db != NULL)
5166 return;
5167
5168 /* If the kernel supports tracing clones, then we don't need to
5169 use the magic thread event breakpoint to learn about
5170 threads. */
5171 thread_db_init (!linux_supports_traceclone ());
5172 #endif
5173 }
5174
5175 static void
5176 linux_request_interrupt (void)
5177 {
5178 extern unsigned long signal_pid;
5179
5180 /* Send a SIGINT to the process group. This acts just like the user
5181 typed a ^C on the controlling terminal. */
5182 kill (-signal_pid, SIGINT);
5183 }
5184
5185 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5186 to debugger memory starting at MYADDR. */
5187
5188 static int
5189 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5190 {
5191 char filename[PATH_MAX];
5192 int fd, n;
5193 int pid = lwpid_of (current_thread);
5194
5195 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5196
5197 fd = open (filename, O_RDONLY);
5198 if (fd < 0)
5199 return -1;
5200
5201 if (offset != (CORE_ADDR) 0
5202 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5203 n = -1;
5204 else
5205 n = read (fd, myaddr, len);
5206
5207 close (fd);
5208
5209 return n;
5210 }
5211
5212 /* These breakpoint and watchpoint related wrapper functions simply
5213 pass on the function call if the target has registered a
5214 corresponding function. */
5215
5216 static int
5217 linux_supports_z_point_type (char z_type)
5218 {
5219 return (the_low_target.supports_z_point_type != NULL
5220 && the_low_target.supports_z_point_type (z_type));
5221 }
5222
5223 static int
5224 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5225 int size, struct raw_breakpoint *bp)
5226 {
5227 if (type == raw_bkpt_type_sw)
5228 return insert_memory_breakpoint (bp);
5229 else if (the_low_target.insert_point != NULL)
5230 return the_low_target.insert_point (type, addr, size, bp);
5231 else
5232 /* Unsupported (see target.h). */
5233 return 1;
5234 }
5235
5236 static int
5237 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5238 int size, struct raw_breakpoint *bp)
5239 {
5240 if (type == raw_bkpt_type_sw)
5241 return remove_memory_breakpoint (bp);
5242 else if (the_low_target.remove_point != NULL)
5243 return the_low_target.remove_point (type, addr, size, bp);
5244 else
5245 /* Unsupported (see target.h). */
5246 return 1;
5247 }
5248
5249 /* Implement the to_stopped_by_sw_breakpoint target_ops
5250 method. */
5251
5252 static int
5253 linux_stopped_by_sw_breakpoint (void)
5254 {
5255 struct lwp_info *lwp = get_thread_lwp (current_thread);
5256
5257 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5258 }
5259
5260 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5261 method. */
5262
5263 static int
5264 linux_supports_stopped_by_sw_breakpoint (void)
5265 {
5266 return USE_SIGTRAP_SIGINFO;
5267 }
5268
5269 /* Implement the to_stopped_by_hw_breakpoint target_ops
5270 method. */
5271
5272 static int
5273 linux_stopped_by_hw_breakpoint (void)
5274 {
5275 struct lwp_info *lwp = get_thread_lwp (current_thread);
5276
5277 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5278 }
5279
5280 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5281 method. */
5282
5283 static int
5284 linux_supports_stopped_by_hw_breakpoint (void)
5285 {
5286 return USE_SIGTRAP_SIGINFO;
5287 }
5288
5289 /* Implement the supports_conditional_breakpoints target_ops
5290 method. */
5291
5292 static int
5293 linux_supports_conditional_breakpoints (void)
5294 {
5295 /* GDBserver needs to step over the breakpoint if the condition is
5296 false. GDBserver software single step is too simple, so disable
5297 conditional breakpoints if the target doesn't have hardware single
5298 step. */
5299 return can_hardware_single_step ();
5300 }
5301
5302 static int
5303 linux_stopped_by_watchpoint (void)
5304 {
5305 struct lwp_info *lwp = get_thread_lwp (current_thread);
5306
5307 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5308 }
5309
5310 static CORE_ADDR
5311 linux_stopped_data_address (void)
5312 {
5313 struct lwp_info *lwp = get_thread_lwp (current_thread);
5314
5315 return lwp->stopped_data_address;
5316 }
5317
5318 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5319 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5320 && defined(PT_TEXT_END_ADDR)
5321
5322 /* This is only used for targets that define PT_TEXT_ADDR,
5323 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5324 the target has different ways of acquiring this information, like
5325 loadmaps. */
5326
5327 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5328 to tell gdb about. */
5329
5330 static int
5331 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5332 {
5333 unsigned long text, text_end, data;
5334 int pid = lwpid_of (current_thread);
5335
5336 errno = 0;
5337
5338 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5339 (PTRACE_TYPE_ARG4) 0);
5340 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5341 (PTRACE_TYPE_ARG4) 0);
5342 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5343 (PTRACE_TYPE_ARG4) 0);
5344
5345 if (errno == 0)
5346 {
5347 /* Both text and data offsets produced at compile-time (and so
5348 used by gdb) are relative to the beginning of the program,
5349 with the data segment immediately following the text segment.
5350 However, the actual runtime layout in memory may put the data
5351 somewhere else, so when we send gdb a data base-address, we
5352 use the real data base address and subtract the compile-time
5353 data base-address from it (which is just the length of the
5354 text segment). BSS immediately follows data in both
5355 cases. */
5356 *text_p = text;
5357 *data_p = data - (text_end - text);
5358
5359 return 1;
5360 }
5361 return 0;
5362 }
5363 #endif
5364
5365 static int
5366 linux_qxfer_osdata (const char *annex,
5367 unsigned char *readbuf, unsigned const char *writebuf,
5368 CORE_ADDR offset, int len)
5369 {
5370 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5371 }
5372
5373 /* Convert a native/host siginfo object, into/from the siginfo in the
5374 layout of the inferiors' architecture. */
5375
5376 static void
5377 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5378 {
5379 int done = 0;
5380
5381 if (the_low_target.siginfo_fixup != NULL)
5382 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5383
5384 /* If there was no callback, or the callback didn't do anything,
5385 then just do a straight memcpy. */
5386 if (!done)
5387 {
5388 if (direction == 1)
5389 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5390 else
5391 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5392 }
5393 }
5394
5395 static int
5396 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5397 unsigned const char *writebuf, CORE_ADDR offset, int len)
5398 {
5399 int pid;
5400 siginfo_t siginfo;
5401 char inf_siginfo[sizeof (siginfo_t)];
5402
5403 if (current_thread == NULL)
5404 return -1;
5405
5406 pid = lwpid_of (current_thread);
5407
5408 if (debug_threads)
5409 debug_printf ("%s siginfo for lwp %d.\n",
5410 readbuf != NULL ? "Reading" : "Writing",
5411 pid);
5412
5413 if (offset >= sizeof (siginfo))
5414 return -1;
5415
5416 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5417 return -1;
5418
5419 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5420 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5421 inferior with a 64-bit GDBSERVER should look the same as debugging it
5422 with a 32-bit GDBSERVER, we need to convert it. */
5423 siginfo_fixup (&siginfo, inf_siginfo, 0);
5424
5425 if (offset + len > sizeof (siginfo))
5426 len = sizeof (siginfo) - offset;
5427
5428 if (readbuf != NULL)
5429 memcpy (readbuf, inf_siginfo + offset, len);
5430 else
5431 {
5432 memcpy (inf_siginfo + offset, writebuf, len);
5433
5434 /* Convert back to ptrace layout before flushing it out. */
5435 siginfo_fixup (&siginfo, inf_siginfo, 1);
5436
5437 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5438 return -1;
5439 }
5440
5441 return len;
5442 }
5443
5444 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5445 so we notice when children change state; as the handler for the
5446 sigsuspend in my_waitpid. */
5447
5448 static void
5449 sigchld_handler (int signo)
5450 {
5451 int old_errno = errno;
5452
5453 if (debug_threads)
5454 {
5455 do
5456 {
5457 /* fprintf is not async-signal-safe, so call write
5458 directly. */
5459 if (write (2, "sigchld_handler\n",
5460 sizeof ("sigchld_handler\n") - 1) < 0)
5461 break; /* just ignore */
5462 } while (0);
5463 }
5464
5465 if (target_is_async_p ())
5466 async_file_mark (); /* trigger a linux_wait */
5467
5468 errno = old_errno;
5469 }
5470
5471 static int
5472 linux_supports_non_stop (void)
5473 {
5474 return 1;
5475 }
5476
5477 static int
5478 linux_async (int enable)
5479 {
5480 int previous = target_is_async_p ();
5481
5482 if (debug_threads)
5483 debug_printf ("linux_async (%d), previous=%d\n",
5484 enable, previous);
5485
5486 if (previous != enable)
5487 {
5488 sigset_t mask;
5489 sigemptyset (&mask);
5490 sigaddset (&mask, SIGCHLD);
5491
5492 sigprocmask (SIG_BLOCK, &mask, NULL);
5493
5494 if (enable)
5495 {
5496 if (pipe (linux_event_pipe) == -1)
5497 {
5498 linux_event_pipe[0] = -1;
5499 linux_event_pipe[1] = -1;
5500 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5501
5502 warning ("creating event pipe failed.");
5503 return previous;
5504 }
5505
5506 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5507 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5508
5509 /* Register the event loop handler. */
5510 add_file_handler (linux_event_pipe[0],
5511 handle_target_event, NULL);
5512
5513 /* Always trigger a linux_wait. */
5514 async_file_mark ();
5515 }
5516 else
5517 {
5518 delete_file_handler (linux_event_pipe[0]);
5519
5520 close (linux_event_pipe[0]);
5521 close (linux_event_pipe[1]);
5522 linux_event_pipe[0] = -1;
5523 linux_event_pipe[1] = -1;
5524 }
5525
5526 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5527 }
5528
5529 return previous;
5530 }
5531
5532 static int
5533 linux_start_non_stop (int nonstop)
5534 {
5535 /* Register or unregister from event-loop accordingly. */
5536 linux_async (nonstop);
5537
5538 if (target_is_async_p () != (nonstop != 0))
5539 return -1;
5540
5541 return 0;
5542 }
5543
5544 static int
5545 linux_supports_multi_process (void)
5546 {
5547 return 1;
5548 }
5549
5550 /* Check if fork events are supported. */
5551
5552 static int
5553 linux_supports_fork_events (void)
5554 {
5555 return linux_supports_tracefork ();
5556 }
5557
5558 /* Check if vfork events are supported. */
5559
5560 static int
5561 linux_supports_vfork_events (void)
5562 {
5563 return linux_supports_tracefork ();
5564 }
5565
5566 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5567 options for the specified lwp. */
5568
5569 static int
5570 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5571 void *args)
5572 {
5573 struct thread_info *thread = (struct thread_info *) entry;
5574 struct lwp_info *lwp = get_thread_lwp (thread);
5575
5576 if (!lwp->stopped)
5577 {
5578 /* Stop the lwp so we can modify its ptrace options. */
5579 lwp->must_set_ptrace_flags = 1;
5580 linux_stop_lwp (lwp);
5581 }
5582 else
5583 {
5584 /* Already stopped; go ahead and set the ptrace options. */
5585 struct process_info *proc = find_process_pid (pid_of (thread));
5586 int options = linux_low_ptrace_options (proc->attached);
5587
5588 linux_enable_event_reporting (lwpid_of (thread), options);
5589 lwp->must_set_ptrace_flags = 0;
5590 }
5591
5592 return 0;
5593 }
5594
5595 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5596 ptrace flags for all inferiors. This is in case the new GDB connection
5597 doesn't support the same set of events that the previous one did. */
5598
5599 static void
5600 linux_handle_new_gdb_connection (void)
5601 {
5602 pid_t pid;
5603
5604 /* Request that all the lwps reset their ptrace options. */
5605 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5606 }
5607
5608 static int
5609 linux_supports_disable_randomization (void)
5610 {
5611 #ifdef HAVE_PERSONALITY
5612 return 1;
5613 #else
5614 return 0;
5615 #endif
5616 }
5617
5618 static int
5619 linux_supports_agent (void)
5620 {
5621 return 1;
5622 }
5623
5624 static int
5625 linux_supports_range_stepping (void)
5626 {
5627 if (*the_low_target.supports_range_stepping == NULL)
5628 return 0;
5629
5630 return (*the_low_target.supports_range_stepping) ();
5631 }
5632
5633 /* Enumerate spufs IDs for process PID. */
5634 static int
5635 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5636 {
5637 int pos = 0;
5638 int written = 0;
5639 char path[128];
5640 DIR *dir;
5641 struct dirent *entry;
5642
5643 sprintf (path, "/proc/%ld/fd", pid);
5644 dir = opendir (path);
5645 if (!dir)
5646 return -1;
5647
5648 rewinddir (dir);
5649 while ((entry = readdir (dir)) != NULL)
5650 {
5651 struct stat st;
5652 struct statfs stfs;
5653 int fd;
5654
5655 fd = atoi (entry->d_name);
5656 if (!fd)
5657 continue;
5658
5659 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5660 if (stat (path, &st) != 0)
5661 continue;
5662 if (!S_ISDIR (st.st_mode))
5663 continue;
5664
5665 if (statfs (path, &stfs) != 0)
5666 continue;
5667 if (stfs.f_type != SPUFS_MAGIC)
5668 continue;
5669
5670 if (pos >= offset && pos + 4 <= offset + len)
5671 {
5672 *(unsigned int *)(buf + pos - offset) = fd;
5673 written += 4;
5674 }
5675 pos += 4;
5676 }
5677
5678 closedir (dir);
5679 return written;
5680 }
5681
5682 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5683 object type, using the /proc file system. */
5684 static int
5685 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5686 unsigned const char *writebuf,
5687 CORE_ADDR offset, int len)
5688 {
5689 long pid = lwpid_of (current_thread);
5690 char buf[128];
5691 int fd = 0;
5692 int ret = 0;
5693
5694 if (!writebuf && !readbuf)
5695 return -1;
5696
5697 if (!*annex)
5698 {
5699 if (!readbuf)
5700 return -1;
5701 else
5702 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5703 }
5704
5705 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5706 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5707 if (fd <= 0)
5708 return -1;
5709
5710 if (offset != 0
5711 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5712 {
5713 close (fd);
5714 return 0;
5715 }
5716
5717 if (writebuf)
5718 ret = write (fd, writebuf, (size_t) len);
5719 else
5720 ret = read (fd, readbuf, (size_t) len);
5721
5722 close (fd);
5723 return ret;
5724 }
5725
5726 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5727 struct target_loadseg
5728 {
5729 /* Core address to which the segment is mapped. */
5730 Elf32_Addr addr;
5731 /* VMA recorded in the program header. */
5732 Elf32_Addr p_vaddr;
5733 /* Size of this segment in memory. */
5734 Elf32_Word p_memsz;
5735 };
5736
5737 # if defined PT_GETDSBT
5738 struct target_loadmap
5739 {
5740 /* Protocol version number, must be zero. */
5741 Elf32_Word version;
5742 /* Pointer to the DSBT table, its size, and the DSBT index. */
5743 unsigned *dsbt_table;
5744 unsigned dsbt_size, dsbt_index;
5745 /* Number of segments in this map. */
5746 Elf32_Word nsegs;
5747 /* The actual memory map. */
5748 struct target_loadseg segs[/*nsegs*/];
5749 };
5750 # define LINUX_LOADMAP PT_GETDSBT
5751 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5752 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5753 # else
5754 struct target_loadmap
5755 {
5756 /* Protocol version number, must be zero. */
5757 Elf32_Half version;
5758 /* Number of segments in this map. */
5759 Elf32_Half nsegs;
5760 /* The actual memory map. */
5761 struct target_loadseg segs[/*nsegs*/];
5762 };
5763 # define LINUX_LOADMAP PTRACE_GETFDPIC
5764 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5765 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5766 # endif
5767
5768 static int
5769 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5770 unsigned char *myaddr, unsigned int len)
5771 {
5772 int pid = lwpid_of (current_thread);
5773 int addr = -1;
5774 struct target_loadmap *data = NULL;
5775 unsigned int actual_length, copy_length;
5776
5777 if (strcmp (annex, "exec") == 0)
5778 addr = (int) LINUX_LOADMAP_EXEC;
5779 else if (strcmp (annex, "interp") == 0)
5780 addr = (int) LINUX_LOADMAP_INTERP;
5781 else
5782 return -1;
5783
5784 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5785 return -1;
5786
5787 if (data == NULL)
5788 return -1;
5789
5790 actual_length = sizeof (struct target_loadmap)
5791 + sizeof (struct target_loadseg) * data->nsegs;
5792
5793 if (offset < 0 || offset > actual_length)
5794 return -1;
5795
5796 copy_length = actual_length - offset < len ? actual_length - offset : len;
5797 memcpy (myaddr, (char *) data + offset, copy_length);
5798 return copy_length;
5799 }
5800 #else
5801 # define linux_read_loadmap NULL
5802 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5803
5804 static void
5805 linux_process_qsupported (const char *query)
5806 {
5807 if (the_low_target.process_qsupported != NULL)
5808 the_low_target.process_qsupported (query);
5809 }
5810
5811 static int
5812 linux_supports_tracepoints (void)
5813 {
5814 if (*the_low_target.supports_tracepoints == NULL)
5815 return 0;
5816
5817 return (*the_low_target.supports_tracepoints) ();
5818 }
5819
5820 static CORE_ADDR
5821 linux_read_pc (struct regcache *regcache)
5822 {
5823 if (the_low_target.get_pc == NULL)
5824 return 0;
5825
5826 return (*the_low_target.get_pc) (regcache);
5827 }
5828
5829 static void
5830 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5831 {
5832 gdb_assert (the_low_target.set_pc != NULL);
5833
5834 (*the_low_target.set_pc) (regcache, pc);
5835 }
5836
5837 static int
5838 linux_thread_stopped (struct thread_info *thread)
5839 {
5840 return get_thread_lwp (thread)->stopped;
5841 }
5842
5843 /* This exposes stop-all-threads functionality to other modules. */
5844
5845 static void
5846 linux_pause_all (int freeze)
5847 {
5848 stop_all_lwps (freeze, NULL);
5849 }
5850
5851 /* This exposes unstop-all-threads functionality to other gdbserver
5852 modules. */
5853
5854 static void
5855 linux_unpause_all (int unfreeze)
5856 {
5857 unstop_all_lwps (unfreeze, NULL);
5858 }
5859
5860 static int
5861 linux_prepare_to_access_memory (void)
5862 {
5863 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5864 running LWP. */
5865 if (non_stop)
5866 linux_pause_all (1);
5867 return 0;
5868 }
5869
5870 static void
5871 linux_done_accessing_memory (void)
5872 {
5873 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5874 running LWP. */
5875 if (non_stop)
5876 linux_unpause_all (1);
5877 }
5878
5879 static int
5880 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5881 CORE_ADDR collector,
5882 CORE_ADDR lockaddr,
5883 ULONGEST orig_size,
5884 CORE_ADDR *jump_entry,
5885 CORE_ADDR *trampoline,
5886 ULONGEST *trampoline_size,
5887 unsigned char *jjump_pad_insn,
5888 ULONGEST *jjump_pad_insn_size,
5889 CORE_ADDR *adjusted_insn_addr,
5890 CORE_ADDR *adjusted_insn_addr_end,
5891 char *err)
5892 {
5893 return (*the_low_target.install_fast_tracepoint_jump_pad)
5894 (tpoint, tpaddr, collector, lockaddr, orig_size,
5895 jump_entry, trampoline, trampoline_size,
5896 jjump_pad_insn, jjump_pad_insn_size,
5897 adjusted_insn_addr, adjusted_insn_addr_end,
5898 err);
5899 }
5900
5901 static struct emit_ops *
5902 linux_emit_ops (void)
5903 {
5904 if (the_low_target.emit_ops != NULL)
5905 return (*the_low_target.emit_ops) ();
5906 else
5907 return NULL;
5908 }
5909
5910 static int
5911 linux_get_min_fast_tracepoint_insn_len (void)
5912 {
5913 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5914 }
5915
5916 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5917
5918 static int
5919 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5920 CORE_ADDR *phdr_memaddr, int *num_phdr)
5921 {
5922 char filename[PATH_MAX];
5923 int fd;
5924 const int auxv_size = is_elf64
5925 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5926 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5927
5928 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5929
5930 fd = open (filename, O_RDONLY);
5931 if (fd < 0)
5932 return 1;
5933
5934 *phdr_memaddr = 0;
5935 *num_phdr = 0;
5936 while (read (fd, buf, auxv_size) == auxv_size
5937 && (*phdr_memaddr == 0 || *num_phdr == 0))
5938 {
5939 if (is_elf64)
5940 {
5941 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5942
5943 switch (aux->a_type)
5944 {
5945 case AT_PHDR:
5946 *phdr_memaddr = aux->a_un.a_val;
5947 break;
5948 case AT_PHNUM:
5949 *num_phdr = aux->a_un.a_val;
5950 break;
5951 }
5952 }
5953 else
5954 {
5955 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5956
5957 switch (aux->a_type)
5958 {
5959 case AT_PHDR:
5960 *phdr_memaddr = aux->a_un.a_val;
5961 break;
5962 case AT_PHNUM:
5963 *num_phdr = aux->a_un.a_val;
5964 break;
5965 }
5966 }
5967 }
5968
5969 close (fd);
5970
5971 if (*phdr_memaddr == 0 || *num_phdr == 0)
5972 {
5973 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5974 "phdr_memaddr = %ld, phdr_num = %d",
5975 (long) *phdr_memaddr, *num_phdr);
5976 return 2;
5977 }
5978
5979 return 0;
5980 }
5981
5982 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5983
5984 static CORE_ADDR
5985 get_dynamic (const int pid, const int is_elf64)
5986 {
5987 CORE_ADDR phdr_memaddr, relocation;
5988 int num_phdr, i;
5989 unsigned char *phdr_buf;
5990 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5991
5992 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5993 return 0;
5994
5995 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5996 phdr_buf = alloca (num_phdr * phdr_size);
5997
5998 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5999 return 0;
6000
6001 /* Compute relocation: it is expected to be 0 for "regular" executables,
6002 non-zero for PIE ones. */
6003 relocation = -1;
6004 for (i = 0; relocation == -1 && i < num_phdr; i++)
6005 if (is_elf64)
6006 {
6007 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6008
6009 if (p->p_type == PT_PHDR)
6010 relocation = phdr_memaddr - p->p_vaddr;
6011 }
6012 else
6013 {
6014 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6015
6016 if (p->p_type == PT_PHDR)
6017 relocation = phdr_memaddr - p->p_vaddr;
6018 }
6019
6020 if (relocation == -1)
6021 {
6022 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6023 any real world executables, including PIE executables, have always
6024 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6025 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6026 or present DT_DEBUG anyway (fpc binaries are statically linked).
6027
6028 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6029
6030 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6031
6032 return 0;
6033 }
6034
6035 for (i = 0; i < num_phdr; i++)
6036 {
6037 if (is_elf64)
6038 {
6039 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6040
6041 if (p->p_type == PT_DYNAMIC)
6042 return p->p_vaddr + relocation;
6043 }
6044 else
6045 {
6046 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6047
6048 if (p->p_type == PT_DYNAMIC)
6049 return p->p_vaddr + relocation;
6050 }
6051 }
6052
6053 return 0;
6054 }
6055
6056 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6057 can be 0 if the inferior does not yet have the library list initialized.
6058 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6059 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6060
6061 static CORE_ADDR
6062 get_r_debug (const int pid, const int is_elf64)
6063 {
6064 CORE_ADDR dynamic_memaddr;
6065 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6066 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6067 CORE_ADDR map = -1;
6068
6069 dynamic_memaddr = get_dynamic (pid, is_elf64);
6070 if (dynamic_memaddr == 0)
6071 return map;
6072
6073 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6074 {
6075 if (is_elf64)
6076 {
6077 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6078 #ifdef DT_MIPS_RLD_MAP
6079 union
6080 {
6081 Elf64_Xword map;
6082 unsigned char buf[sizeof (Elf64_Xword)];
6083 }
6084 rld_map;
6085
6086 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6087 {
6088 if (linux_read_memory (dyn->d_un.d_val,
6089 rld_map.buf, sizeof (rld_map.buf)) == 0)
6090 return rld_map.map;
6091 else
6092 break;
6093 }
6094 #endif /* DT_MIPS_RLD_MAP */
6095
6096 if (dyn->d_tag == DT_DEBUG && map == -1)
6097 map = dyn->d_un.d_val;
6098
6099 if (dyn->d_tag == DT_NULL)
6100 break;
6101 }
6102 else
6103 {
6104 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6105 #ifdef DT_MIPS_RLD_MAP
6106 union
6107 {
6108 Elf32_Word map;
6109 unsigned char buf[sizeof (Elf32_Word)];
6110 }
6111 rld_map;
6112
6113 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6114 {
6115 if (linux_read_memory (dyn->d_un.d_val,
6116 rld_map.buf, sizeof (rld_map.buf)) == 0)
6117 return rld_map.map;
6118 else
6119 break;
6120 }
6121 #endif /* DT_MIPS_RLD_MAP */
6122
6123 if (dyn->d_tag == DT_DEBUG && map == -1)
6124 map = dyn->d_un.d_val;
6125
6126 if (dyn->d_tag == DT_NULL)
6127 break;
6128 }
6129
6130 dynamic_memaddr += dyn_size;
6131 }
6132
6133 return map;
6134 }
6135
6136 /* Read one pointer from MEMADDR in the inferior. */
6137
6138 static int
6139 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6140 {
6141 int ret;
6142
6143 /* Go through a union so this works on either big or little endian
6144 hosts, when the inferior's pointer size is smaller than the size
6145 of CORE_ADDR. It is assumed the inferior's endianness is the
6146 same of the superior's. */
6147 union
6148 {
6149 CORE_ADDR core_addr;
6150 unsigned int ui;
6151 unsigned char uc;
6152 } addr;
6153
6154 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6155 if (ret == 0)
6156 {
6157 if (ptr_size == sizeof (CORE_ADDR))
6158 *ptr = addr.core_addr;
6159 else if (ptr_size == sizeof (unsigned int))
6160 *ptr = addr.ui;
6161 else
6162 gdb_assert_not_reached ("unhandled pointer size");
6163 }
6164 return ret;
6165 }
6166
6167 struct link_map_offsets
6168 {
6169 /* Offset and size of r_debug.r_version. */
6170 int r_version_offset;
6171
6172 /* Offset and size of r_debug.r_map. */
6173 int r_map_offset;
6174
6175 /* Offset to l_addr field in struct link_map. */
6176 int l_addr_offset;
6177
6178 /* Offset to l_name field in struct link_map. */
6179 int l_name_offset;
6180
6181 /* Offset to l_ld field in struct link_map. */
6182 int l_ld_offset;
6183
6184 /* Offset to l_next field in struct link_map. */
6185 int l_next_offset;
6186
6187 /* Offset to l_prev field in struct link_map. */
6188 int l_prev_offset;
6189 };
6190
6191 /* Construct qXfer:libraries-svr4:read reply. */
6192
6193 static int
6194 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6195 unsigned const char *writebuf,
6196 CORE_ADDR offset, int len)
6197 {
6198 char *document;
6199 unsigned document_len;
6200 struct process_info_private *const priv = current_process ()->priv;
6201 char filename[PATH_MAX];
6202 int pid, is_elf64;
6203
6204 static const struct link_map_offsets lmo_32bit_offsets =
6205 {
6206 0, /* r_version offset. */
6207 4, /* r_debug.r_map offset. */
6208 0, /* l_addr offset in link_map. */
6209 4, /* l_name offset in link_map. */
6210 8, /* l_ld offset in link_map. */
6211 12, /* l_next offset in link_map. */
6212 16 /* l_prev offset in link_map. */
6213 };
6214
6215 static const struct link_map_offsets lmo_64bit_offsets =
6216 {
6217 0, /* r_version offset. */
6218 8, /* r_debug.r_map offset. */
6219 0, /* l_addr offset in link_map. */
6220 8, /* l_name offset in link_map. */
6221 16, /* l_ld offset in link_map. */
6222 24, /* l_next offset in link_map. */
6223 32 /* l_prev offset in link_map. */
6224 };
6225 const struct link_map_offsets *lmo;
6226 unsigned int machine;
6227 int ptr_size;
6228 CORE_ADDR lm_addr = 0, lm_prev = 0;
6229 int allocated = 1024;
6230 char *p;
6231 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6232 int header_done = 0;
6233
6234 if (writebuf != NULL)
6235 return -2;
6236 if (readbuf == NULL)
6237 return -1;
6238
6239 pid = lwpid_of (current_thread);
6240 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6241 is_elf64 = elf_64_file_p (filename, &machine);
6242 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6243 ptr_size = is_elf64 ? 8 : 4;
6244
6245 while (annex[0] != '\0')
6246 {
6247 const char *sep;
6248 CORE_ADDR *addrp;
6249 int len;
6250
6251 sep = strchr (annex, '=');
6252 if (sep == NULL)
6253 break;
6254
6255 len = sep - annex;
6256 if (len == 5 && startswith (annex, "start"))
6257 addrp = &lm_addr;
6258 else if (len == 4 && startswith (annex, "prev"))
6259 addrp = &lm_prev;
6260 else
6261 {
6262 annex = strchr (sep, ';');
6263 if (annex == NULL)
6264 break;
6265 annex++;
6266 continue;
6267 }
6268
6269 annex = decode_address_to_semicolon (addrp, sep + 1);
6270 }
6271
6272 if (lm_addr == 0)
6273 {
6274 int r_version = 0;
6275
6276 if (priv->r_debug == 0)
6277 priv->r_debug = get_r_debug (pid, is_elf64);
6278
6279 /* We failed to find DT_DEBUG. Such situation will not change
6280 for this inferior - do not retry it. Report it to GDB as
6281 E01, see for the reasons at the GDB solib-svr4.c side. */
6282 if (priv->r_debug == (CORE_ADDR) -1)
6283 return -1;
6284
6285 if (priv->r_debug != 0)
6286 {
6287 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6288 (unsigned char *) &r_version,
6289 sizeof (r_version)) != 0
6290 || r_version != 1)
6291 {
6292 warning ("unexpected r_debug version %d", r_version);
6293 }
6294 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6295 &lm_addr, ptr_size) != 0)
6296 {
6297 warning ("unable to read r_map from 0x%lx",
6298 (long) priv->r_debug + lmo->r_map_offset);
6299 }
6300 }
6301 }
6302
6303 document = xmalloc (allocated);
6304 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6305 p = document + strlen (document);
6306
6307 while (lm_addr
6308 && read_one_ptr (lm_addr + lmo->l_name_offset,
6309 &l_name, ptr_size) == 0
6310 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6311 &l_addr, ptr_size) == 0
6312 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6313 &l_ld, ptr_size) == 0
6314 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6315 &l_prev, ptr_size) == 0
6316 && read_one_ptr (lm_addr + lmo->l_next_offset,
6317 &l_next, ptr_size) == 0)
6318 {
6319 unsigned char libname[PATH_MAX];
6320
6321 if (lm_prev != l_prev)
6322 {
6323 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6324 (long) lm_prev, (long) l_prev);
6325 break;
6326 }
6327
6328 /* Ignore the first entry even if it has valid name as the first entry
6329 corresponds to the main executable. The first entry should not be
6330 skipped if the dynamic loader was loaded late by a static executable
6331 (see solib-svr4.c parameter ignore_first). But in such case the main
6332 executable does not have PT_DYNAMIC present and this function already
6333 exited above due to failed get_r_debug. */
6334 if (lm_prev == 0)
6335 {
6336 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6337 p = p + strlen (p);
6338 }
6339 else
6340 {
6341 /* Not checking for error because reading may stop before
6342 we've got PATH_MAX worth of characters. */
6343 libname[0] = '\0';
6344 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6345 libname[sizeof (libname) - 1] = '\0';
6346 if (libname[0] != '\0')
6347 {
6348 /* 6x the size for xml_escape_text below. */
6349 size_t len = 6 * strlen ((char *) libname);
6350 char *name;
6351
6352 if (!header_done)
6353 {
6354 /* Terminate `<library-list-svr4'. */
6355 *p++ = '>';
6356 header_done = 1;
6357 }
6358
6359 while (allocated < p - document + len + 200)
6360 {
6361 /* Expand to guarantee sufficient storage. */
6362 uintptr_t document_len = p - document;
6363
6364 document = xrealloc (document, 2 * allocated);
6365 allocated *= 2;
6366 p = document + document_len;
6367 }
6368
6369 name = xml_escape_text ((char *) libname);
6370 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6371 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6372 name, (unsigned long) lm_addr,
6373 (unsigned long) l_addr, (unsigned long) l_ld);
6374 free (name);
6375 }
6376 }
6377
6378 lm_prev = lm_addr;
6379 lm_addr = l_next;
6380 }
6381
6382 if (!header_done)
6383 {
6384 /* Empty list; terminate `<library-list-svr4'. */
6385 strcpy (p, "/>");
6386 }
6387 else
6388 strcpy (p, "</library-list-svr4>");
6389
6390 document_len = strlen (document);
6391 if (offset < document_len)
6392 document_len -= offset;
6393 else
6394 document_len = 0;
6395 if (len > document_len)
6396 len = document_len;
6397
6398 memcpy (readbuf, document + offset, len);
6399 xfree (document);
6400
6401 return len;
6402 }
6403
6404 #ifdef HAVE_LINUX_BTRACE
6405
6406 /* See to_enable_btrace target method. */
6407
6408 static struct btrace_target_info *
6409 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6410 {
6411 struct btrace_target_info *tinfo;
6412
6413 tinfo = linux_enable_btrace (ptid, conf);
6414
6415 if (tinfo != NULL && tinfo->ptr_bits == 0)
6416 {
6417 struct thread_info *thread = find_thread_ptid (ptid);
6418 struct regcache *regcache = get_thread_regcache (thread, 0);
6419
6420 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6421 }
6422
6423 return tinfo;
6424 }
6425
6426 /* See to_disable_btrace target method. */
6427
6428 static int
6429 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6430 {
6431 enum btrace_error err;
6432
6433 err = linux_disable_btrace (tinfo);
6434 return (err == BTRACE_ERR_NONE ? 0 : -1);
6435 }
6436
6437 /* See to_read_btrace target method. */
6438
6439 static int
6440 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6441 int type)
6442 {
6443 struct btrace_data btrace;
6444 struct btrace_block *block;
6445 enum btrace_error err;
6446 int i;
6447
6448 btrace_data_init (&btrace);
6449
6450 err = linux_read_btrace (&btrace, tinfo, type);
6451 if (err != BTRACE_ERR_NONE)
6452 {
6453 if (err == BTRACE_ERR_OVERFLOW)
6454 buffer_grow_str0 (buffer, "E.Overflow.");
6455 else
6456 buffer_grow_str0 (buffer, "E.Generic Error.");
6457
6458 btrace_data_fini (&btrace);
6459 return -1;
6460 }
6461
6462 switch (btrace.format)
6463 {
6464 case BTRACE_FORMAT_NONE:
6465 buffer_grow_str0 (buffer, "E.No Trace.");
6466 break;
6467
6468 case BTRACE_FORMAT_BTS:
6469 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6470 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6471
6472 for (i = 0;
6473 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6474 i++)
6475 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6476 paddress (block->begin), paddress (block->end));
6477
6478 buffer_grow_str0 (buffer, "</btrace>\n");
6479 break;
6480
6481 default:
6482 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6483
6484 btrace_data_fini (&btrace);
6485 return -1;
6486 }
6487
6488 btrace_data_fini (&btrace);
6489 return 0;
6490 }
6491
6492 /* See to_btrace_conf target method. */
6493
6494 static int
6495 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6496 struct buffer *buffer)
6497 {
6498 const struct btrace_config *conf;
6499
6500 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6501 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6502
6503 conf = linux_btrace_conf (tinfo);
6504 if (conf != NULL)
6505 {
6506 switch (conf->format)
6507 {
6508 case BTRACE_FORMAT_NONE:
6509 break;
6510
6511 case BTRACE_FORMAT_BTS:
6512 buffer_xml_printf (buffer, "<bts");
6513 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6514 buffer_xml_printf (buffer, " />\n");
6515 break;
6516 }
6517 }
6518
6519 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6520 return 0;
6521 }
6522 #endif /* HAVE_LINUX_BTRACE */
6523
6524 /* See nat/linux-nat.h. */
6525
6526 ptid_t
6527 current_lwp_ptid (void)
6528 {
6529 return ptid_of (current_thread);
6530 }
6531
6532 static struct target_ops linux_target_ops = {
6533 linux_create_inferior,
6534 linux_attach,
6535 linux_kill,
6536 linux_detach,
6537 linux_mourn,
6538 linux_join,
6539 linux_thread_alive,
6540 linux_resume,
6541 linux_wait,
6542 linux_fetch_registers,
6543 linux_store_registers,
6544 linux_prepare_to_access_memory,
6545 linux_done_accessing_memory,
6546 linux_read_memory,
6547 linux_write_memory,
6548 linux_look_up_symbols,
6549 linux_request_interrupt,
6550 linux_read_auxv,
6551 linux_supports_z_point_type,
6552 linux_insert_point,
6553 linux_remove_point,
6554 linux_stopped_by_sw_breakpoint,
6555 linux_supports_stopped_by_sw_breakpoint,
6556 linux_stopped_by_hw_breakpoint,
6557 linux_supports_stopped_by_hw_breakpoint,
6558 linux_supports_conditional_breakpoints,
6559 linux_stopped_by_watchpoint,
6560 linux_stopped_data_address,
6561 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6562 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6563 && defined(PT_TEXT_END_ADDR)
6564 linux_read_offsets,
6565 #else
6566 NULL,
6567 #endif
6568 #ifdef USE_THREAD_DB
6569 thread_db_get_tls_address,
6570 #else
6571 NULL,
6572 #endif
6573 linux_qxfer_spu,
6574 hostio_last_error_from_errno,
6575 linux_qxfer_osdata,
6576 linux_xfer_siginfo,
6577 linux_supports_non_stop,
6578 linux_async,
6579 linux_start_non_stop,
6580 linux_supports_multi_process,
6581 linux_supports_fork_events,
6582 linux_supports_vfork_events,
6583 linux_handle_new_gdb_connection,
6584 #ifdef USE_THREAD_DB
6585 thread_db_handle_monitor_command,
6586 #else
6587 NULL,
6588 #endif
6589 linux_common_core_of_thread,
6590 linux_read_loadmap,
6591 linux_process_qsupported,
6592 linux_supports_tracepoints,
6593 linux_read_pc,
6594 linux_write_pc,
6595 linux_thread_stopped,
6596 NULL,
6597 linux_pause_all,
6598 linux_unpause_all,
6599 linux_stabilize_threads,
6600 linux_install_fast_tracepoint_jump_pad,
6601 linux_emit_ops,
6602 linux_supports_disable_randomization,
6603 linux_get_min_fast_tracepoint_insn_len,
6604 linux_qxfer_libraries_svr4,
6605 linux_supports_agent,
6606 #ifdef HAVE_LINUX_BTRACE
6607 linux_supports_btrace,
6608 linux_low_enable_btrace,
6609 linux_low_disable_btrace,
6610 linux_low_read_btrace,
6611 linux_low_btrace_conf,
6612 #else
6613 NULL,
6614 NULL,
6615 NULL,
6616 NULL,
6617 NULL,
6618 #endif
6619 linux_supports_range_stepping,
6620 linux_proc_pid_to_exec_file,
6621 };
6622
6623 static void
6624 linux_init_signals ()
6625 {
6626 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6627 to find what the cancel signal actually is. */
6628 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6629 signal (__SIGRTMIN+1, SIG_IGN);
6630 #endif
6631 }
6632
6633 #ifdef HAVE_LINUX_REGSETS
6634 void
6635 initialize_regsets_info (struct regsets_info *info)
6636 {
6637 for (info->num_regsets = 0;
6638 info->regsets[info->num_regsets].size >= 0;
6639 info->num_regsets++)
6640 ;
6641 }
6642 #endif
6643
6644 void
6645 initialize_low (void)
6646 {
6647 struct sigaction sigchld_action;
6648 memset (&sigchld_action, 0, sizeof (sigchld_action));
6649 set_target_ops (&linux_target_ops);
6650 set_breakpoint_data (the_low_target.breakpoint,
6651 the_low_target.breakpoint_len);
6652 linux_init_signals ();
6653 linux_ptrace_init_warnings ();
6654
6655 sigchld_action.sa_handler = sigchld_handler;
6656 sigemptyset (&sigchld_action.sa_mask);
6657 sigchld_action.sa_flags = SA_RESTART;
6658 sigaction (SIGCHLD, &sigchld_action, NULL);
6659
6660 initialize_low_arch ();
6661
6662 linux_check_ptrace_features ();
6663 }
This page took 0.186842 seconds and 4 git commands to generate.