Arch-specific remote follow fork
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24
25 #include "nat/linux-nat.h"
26 #include "nat/linux-waitpid.h"
27 #include "gdb_wait.h"
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include "nat/linux-personality.h"
32 #include <signal.h>
33 #include <sys/ioctl.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #include "filestuff.h"
46 #include "tracepoint.h"
47 #include "hostio.h"
48 #ifndef ELFMAG0
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53 #include <elf.h>
54 #endif
55
56 #ifndef SPUFS_MAGIC
57 #define SPUFS_MAGIC 0x23c9b64e
58 #endif
59
60 #ifdef HAVE_PERSONALITY
61 # include <sys/personality.h>
62 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
63 # define ADDR_NO_RANDOMIZE 0x0040000
64 # endif
65 #endif
66
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
70
71 #ifndef W_STOPCODE
72 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
73 #endif
74
75 /* This is the kernel's hard limit. Not to be confused with
76 SIGRTMIN. */
77 #ifndef __SIGRTMIN
78 #define __SIGRTMIN 32
79 #endif
80
81 /* Some targets did not define these ptrace constants from the start,
82 so gdbserver defines them locally here. In the future, these may
83 be removed after they are added to asm/ptrace.h. */
84 #if !(defined(PT_TEXT_ADDR) \
85 || defined(PT_DATA_ADDR) \
86 || defined(PT_TEXT_END_ADDR))
87 #if defined(__mcoldfire__)
88 /* These are still undefined in 3.10 kernels. */
89 #define PT_TEXT_ADDR 49*4
90 #define PT_DATA_ADDR 50*4
91 #define PT_TEXT_END_ADDR 51*4
92 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #elif defined(BFIN)
94 #define PT_TEXT_ADDR 220
95 #define PT_TEXT_END_ADDR 224
96 #define PT_DATA_ADDR 228
97 /* These are still undefined in 3.10 kernels. */
98 #elif defined(__TMS320C6X__)
99 #define PT_TEXT_ADDR (0x10000*4)
100 #define PT_DATA_ADDR (0x10004*4)
101 #define PT_TEXT_END_ADDR (0x10008*4)
102 #endif
103 #endif
104
105 #ifdef HAVE_LINUX_BTRACE
106 # include "nat/linux-btrace.h"
107 # include "btrace-common.h"
108 #endif
109
110 #ifndef HAVE_ELF32_AUXV_T
111 /* Copied from glibc's elf.h. */
112 typedef struct
113 {
114 uint32_t a_type; /* Entry type */
115 union
116 {
117 uint32_t a_val; /* Integer value */
118 /* We use to have pointer elements added here. We cannot do that,
119 though, since it does not work when using 32-bit definitions
120 on 64-bit platforms and vice versa. */
121 } a_un;
122 } Elf32_auxv_t;
123 #endif
124
125 #ifndef HAVE_ELF64_AUXV_T
126 /* Copied from glibc's elf.h. */
127 typedef struct
128 {
129 uint64_t a_type; /* Entry type */
130 union
131 {
132 uint64_t a_val; /* Integer value */
133 /* We use to have pointer elements added here. We cannot do that,
134 though, since it does not work when using 32-bit definitions
135 on 64-bit platforms and vice versa. */
136 } a_un;
137 } Elf64_auxv_t;
138 #endif
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* A list of all unknown processes which receive stop signals. Some
184 other process will presumably claim each of these as forked
185 children momentarily. */
186
187 struct simple_pid_list
188 {
189 /* The process ID. */
190 int pid;
191
192 /* The status as reported by waitpid. */
193 int status;
194
195 /* Next in chain. */
196 struct simple_pid_list *next;
197 };
198 struct simple_pid_list *stopped_pids;
199
200 /* Trivial list manipulation functions to keep track of a list of new
201 stopped processes. */
202
203 static void
204 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
205 {
206 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
207
208 new_pid->pid = pid;
209 new_pid->status = status;
210 new_pid->next = *listp;
211 *listp = new_pid;
212 }
213
214 static int
215 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
216 {
217 struct simple_pid_list **p;
218
219 for (p = listp; *p != NULL; p = &(*p)->next)
220 if ((*p)->pid == pid)
221 {
222 struct simple_pid_list *next = (*p)->next;
223
224 *statusp = (*p)->status;
225 xfree (*p);
226 *p = next;
227 return 1;
228 }
229 return 0;
230 }
231
232 enum stopping_threads_kind
233 {
234 /* Not stopping threads presently. */
235 NOT_STOPPING_THREADS,
236
237 /* Stopping threads. */
238 STOPPING_THREADS,
239
240 /* Stopping and suspending threads. */
241 STOPPING_AND_SUSPENDING_THREADS
242 };
243
244 /* This is set while stop_all_lwps is in effect. */
245 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
246
247 /* FIXME make into a target method? */
248 int using_threads = 1;
249
250 /* True if we're presently stabilizing threads (moving them out of
251 jump pads). */
252 static int stabilizing_threads;
253
254 static void linux_resume_one_lwp (struct lwp_info *lwp,
255 int step, int signal, siginfo_t *info);
256 static void linux_resume (struct thread_resume *resume_info, size_t n);
257 static void stop_all_lwps (int suspend, struct lwp_info *except);
258 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
259 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
260 int *wstat, int options);
261 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
262 static struct lwp_info *add_lwp (ptid_t ptid);
263 static int linux_stopped_by_watchpoint (void);
264 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
265 static void proceed_all_lwps (void);
266 static int finish_step_over (struct lwp_info *lwp);
267 static int kill_lwp (unsigned long lwpid, int signo);
268
269 /* When the event-loop is doing a step-over, this points at the thread
270 being stepped. */
271 ptid_t step_over_bkpt;
272
273 /* True if the low target can hardware single-step. Such targets
274 don't need a BREAKPOINT_REINSERT_ADDR callback. */
275
276 static int
277 can_hardware_single_step (void)
278 {
279 return (the_low_target.breakpoint_reinsert_addr == NULL);
280 }
281
282 /* True if the low target supports memory breakpoints. If so, we'll
283 have a GET_PC implementation. */
284
285 static int
286 supports_breakpoints (void)
287 {
288 return (the_low_target.get_pc != NULL);
289 }
290
291 /* Returns true if this target can support fast tracepoints. This
292 does not mean that the in-process agent has been loaded in the
293 inferior. */
294
295 static int
296 supports_fast_tracepoints (void)
297 {
298 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 struct pending_signals
312 {
313 int signal;
314 siginfo_t info;
315 struct pending_signals *prev;
316 };
317
318 /* The read/write ends of the pipe registered as waitable file in the
319 event loop. */
320 static int linux_event_pipe[2] = { -1, -1 };
321
322 /* True if we're currently in async mode. */
323 #define target_is_async_p() (linux_event_pipe[0] != -1)
324
325 static void send_sigstop (struct lwp_info *lwp);
326 static void wait_for_sigstop (void);
327
328 /* Return non-zero if HEADER is a 64-bit ELF file. */
329
330 static int
331 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
332 {
333 if (header->e_ident[EI_MAG0] == ELFMAG0
334 && header->e_ident[EI_MAG1] == ELFMAG1
335 && header->e_ident[EI_MAG2] == ELFMAG2
336 && header->e_ident[EI_MAG3] == ELFMAG3)
337 {
338 *machine = header->e_machine;
339 return header->e_ident[EI_CLASS] == ELFCLASS64;
340
341 }
342 *machine = EM_NONE;
343 return -1;
344 }
345
346 /* Return non-zero if FILE is a 64-bit ELF file,
347 zero if the file is not a 64-bit ELF file,
348 and -1 if the file is not accessible or doesn't exist. */
349
350 static int
351 elf_64_file_p (const char *file, unsigned int *machine)
352 {
353 Elf64_Ehdr header;
354 int fd;
355
356 fd = open (file, O_RDONLY);
357 if (fd < 0)
358 return -1;
359
360 if (read (fd, &header, sizeof (header)) != sizeof (header))
361 {
362 close (fd);
363 return 0;
364 }
365 close (fd);
366
367 return elf_64_header_p (&header, machine);
368 }
369
370 /* Accepts an integer PID; Returns true if the executable PID is
371 running is a 64-bit ELF file.. */
372
373 int
374 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
375 {
376 char file[PATH_MAX];
377
378 sprintf (file, "/proc/%d/exe", pid);
379 return elf_64_file_p (file, machine);
380 }
381
382 static void
383 delete_lwp (struct lwp_info *lwp)
384 {
385 struct thread_info *thr = get_lwp_thread (lwp);
386
387 if (debug_threads)
388 debug_printf ("deleting %ld\n", lwpid_of (thr));
389
390 remove_thread (thr);
391 free (lwp->arch_private);
392 free (lwp);
393 }
394
395 /* Add a process to the common process list, and set its private
396 data. */
397
398 static struct process_info *
399 linux_add_process (int pid, int attached)
400 {
401 struct process_info *proc;
402
403 proc = add_process (pid, attached);
404 proc->priv = xcalloc (1, sizeof (*proc->priv));
405
406 /* Set the arch when the first LWP stops. */
407 proc->priv->new_inferior = 1;
408
409 if (the_low_target.new_process != NULL)
410 proc->priv->arch_private = the_low_target.new_process ();
411
412 return proc;
413 }
414
415 static CORE_ADDR get_pc (struct lwp_info *lwp);
416
417 /* Handle a GNU/Linux extended wait response. If we see a clone
418 event, we need to add the new LWP to our list (and return 0 so as
419 not to report the trap to higher layers). */
420
421 static int
422 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
423 {
424 int event = linux_ptrace_get_extended_event (wstat);
425 struct thread_info *event_thr = get_lwp_thread (event_lwp);
426 struct lwp_info *new_lwp;
427
428 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_CLONE))
429 {
430 ptid_t ptid;
431 unsigned long new_pid;
432 int ret, status;
433
434 /* Get the pid of the new lwp. */
435 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
436 &new_pid);
437
438 /* If we haven't already seen the new PID stop, wait for it now. */
439 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
440 {
441 /* The new child has a pending SIGSTOP. We can't affect it until it
442 hits the SIGSTOP, but we're already attached. */
443
444 ret = my_waitpid (new_pid, &status, __WALL);
445
446 if (ret == -1)
447 perror_with_name ("waiting for new child");
448 else if (ret != new_pid)
449 warning ("wait returned unexpected PID %d", ret);
450 else if (!WIFSTOPPED (status))
451 warning ("wait returned unexpected status 0x%x", status);
452 }
453
454 if (event == PTRACE_EVENT_FORK)
455 {
456 struct process_info *parent_proc;
457 struct process_info *child_proc;
458 struct lwp_info *child_lwp;
459 struct target_desc *tdesc;
460
461 ptid = ptid_build (new_pid, new_pid, 0);
462
463 if (debug_threads)
464 {
465 debug_printf ("HEW: Got fork event from LWP %ld, "
466 "new child is %d\n",
467 ptid_get_lwp (ptid_of (event_thr)),
468 ptid_get_pid (ptid));
469 }
470
471 /* Add the new process to the tables and clone the breakpoint
472 lists of the parent. We need to do this even if the new process
473 will be detached, since we will need the process object and the
474 breakpoints to remove any breakpoints from memory when we
475 detach, and the client side will access registers. */
476 child_proc = linux_add_process (new_pid, 0);
477 gdb_assert (child_proc != NULL);
478 child_lwp = add_lwp (ptid);
479 gdb_assert (child_lwp != NULL);
480 child_lwp->stopped = 1;
481 parent_proc = get_thread_process (event_thr);
482 child_proc->attached = parent_proc->attached;
483 clone_all_breakpoints (&child_proc->breakpoints,
484 &child_proc->raw_breakpoints,
485 parent_proc->breakpoints);
486
487 tdesc = xmalloc (sizeof (struct target_desc));
488 copy_target_description (tdesc, parent_proc->tdesc);
489 child_proc->tdesc = tdesc;
490 child_lwp->must_set_ptrace_flags = 1;
491
492 /* Clone arch-specific process data. */
493 if (the_low_target.new_fork != NULL)
494 the_low_target.new_fork (parent_proc, child_proc);
495
496 /* Save fork info in the parent thread. */
497 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
498 event_lwp->waitstatus.value.related_pid = ptid;
499 /* The status_pending field contains bits denoting the
500 extended event, so when the pending event is handled,
501 the handler will look at lwp->waitstatus. */
502 event_lwp->status_pending_p = 1;
503 event_lwp->status_pending = wstat;
504
505 /* Report the event. */
506 return 0;
507 }
508
509 if (debug_threads)
510 debug_printf ("HEW: Got clone event "
511 "from LWP %ld, new child is LWP %ld\n",
512 lwpid_of (event_thr), new_pid);
513
514 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
515 new_lwp = add_lwp (ptid);
516
517 /* Either we're going to immediately resume the new thread
518 or leave it stopped. linux_resume_one_lwp is a nop if it
519 thinks the thread is currently running, so set this first
520 before calling linux_resume_one_lwp. */
521 new_lwp->stopped = 1;
522
523 /* If we're suspending all threads, leave this one suspended
524 too. */
525 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
526 new_lwp->suspended = 1;
527
528 /* Normally we will get the pending SIGSTOP. But in some cases
529 we might get another signal delivered to the group first.
530 If we do get another signal, be sure not to lose it. */
531 if (WSTOPSIG (status) != SIGSTOP)
532 {
533 new_lwp->stop_expected = 1;
534 new_lwp->status_pending_p = 1;
535 new_lwp->status_pending = status;
536 }
537
538 /* Don't report the event. */
539 return 1;
540 }
541
542 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
543 }
544
545 /* Return the PC as read from the regcache of LWP, without any
546 adjustment. */
547
548 static CORE_ADDR
549 get_pc (struct lwp_info *lwp)
550 {
551 struct thread_info *saved_thread;
552 struct regcache *regcache;
553 CORE_ADDR pc;
554
555 if (the_low_target.get_pc == NULL)
556 return 0;
557
558 saved_thread = current_thread;
559 current_thread = get_lwp_thread (lwp);
560
561 regcache = get_thread_regcache (current_thread, 1);
562 pc = (*the_low_target.get_pc) (regcache);
563
564 if (debug_threads)
565 debug_printf ("pc is 0x%lx\n", (long) pc);
566
567 current_thread = saved_thread;
568 return pc;
569 }
570
571 /* This function should only be called if LWP got a SIGTRAP.
572 The SIGTRAP could mean several things.
573
574 On i386, where decr_pc_after_break is non-zero:
575
576 If we were single-stepping this process using PTRACE_SINGLESTEP, we
577 will get only the one SIGTRAP. The value of $eip will be the next
578 instruction. If the instruction we stepped over was a breakpoint,
579 we need to decrement the PC.
580
581 If we continue the process using PTRACE_CONT, we will get a
582 SIGTRAP when we hit a breakpoint. The value of $eip will be
583 the instruction after the breakpoint (i.e. needs to be
584 decremented). If we report the SIGTRAP to GDB, we must also
585 report the undecremented PC. If the breakpoint is removed, we
586 must resume at the decremented PC.
587
588 On a non-decr_pc_after_break machine with hardware or kernel
589 single-step:
590
591 If we either single-step a breakpoint instruction, or continue and
592 hit a breakpoint instruction, our PC will point at the breakpoint
593 instruction. */
594
595 static int
596 check_stopped_by_breakpoint (struct lwp_info *lwp)
597 {
598 CORE_ADDR pc;
599 CORE_ADDR sw_breakpoint_pc;
600 struct thread_info *saved_thread;
601 #if USE_SIGTRAP_SIGINFO
602 siginfo_t siginfo;
603 #endif
604
605 if (the_low_target.get_pc == NULL)
606 return 0;
607
608 pc = get_pc (lwp);
609 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
610
611 /* breakpoint_at reads from the current thread. */
612 saved_thread = current_thread;
613 current_thread = get_lwp_thread (lwp);
614
615 #if USE_SIGTRAP_SIGINFO
616 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
617 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
618 {
619 if (siginfo.si_signo == SIGTRAP)
620 {
621 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
622 {
623 if (debug_threads)
624 {
625 struct thread_info *thr = get_lwp_thread (lwp);
626
627 debug_printf ("CSBB: %s stopped by software breakpoint\n",
628 target_pid_to_str (ptid_of (thr)));
629 }
630
631 /* Back up the PC if necessary. */
632 if (pc != sw_breakpoint_pc)
633 {
634 struct regcache *regcache
635 = get_thread_regcache (current_thread, 1);
636 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
637 }
638
639 lwp->stop_pc = sw_breakpoint_pc;
640 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
641 current_thread = saved_thread;
642 return 1;
643 }
644 else if (siginfo.si_code == TRAP_HWBKPT)
645 {
646 if (debug_threads)
647 {
648 struct thread_info *thr = get_lwp_thread (lwp);
649
650 debug_printf ("CSBB: %s stopped by hardware "
651 "breakpoint/watchpoint\n",
652 target_pid_to_str (ptid_of (thr)));
653 }
654
655 lwp->stop_pc = pc;
656 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
657 current_thread = saved_thread;
658 return 1;
659 }
660 else if (siginfo.si_code == TRAP_TRACE)
661 {
662 if (debug_threads)
663 {
664 struct thread_info *thr = get_lwp_thread (lwp);
665
666 debug_printf ("CSBB: %s stopped by trace\n",
667 target_pid_to_str (ptid_of (thr)));
668 }
669 }
670 }
671 }
672 #else
673 /* We may have just stepped a breakpoint instruction. E.g., in
674 non-stop mode, GDB first tells the thread A to step a range, and
675 then the user inserts a breakpoint inside the range. In that
676 case we need to report the breakpoint PC. */
677 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
678 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
679 {
680 if (debug_threads)
681 {
682 struct thread_info *thr = get_lwp_thread (lwp);
683
684 debug_printf ("CSBB: %s stopped by software breakpoint\n",
685 target_pid_to_str (ptid_of (thr)));
686 }
687
688 /* Back up the PC if necessary. */
689 if (pc != sw_breakpoint_pc)
690 {
691 struct regcache *regcache
692 = get_thread_regcache (current_thread, 1);
693 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
694 }
695
696 lwp->stop_pc = sw_breakpoint_pc;
697 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
698 current_thread = saved_thread;
699 return 1;
700 }
701
702 if (hardware_breakpoint_inserted_here (pc))
703 {
704 if (debug_threads)
705 {
706 struct thread_info *thr = get_lwp_thread (lwp);
707
708 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
709 target_pid_to_str (ptid_of (thr)));
710 }
711
712 lwp->stop_pc = pc;
713 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
714 current_thread = saved_thread;
715 return 1;
716 }
717 #endif
718
719 current_thread = saved_thread;
720 return 0;
721 }
722
723 static struct lwp_info *
724 add_lwp (ptid_t ptid)
725 {
726 struct lwp_info *lwp;
727
728 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
729 memset (lwp, 0, sizeof (*lwp));
730
731 if (the_low_target.new_thread != NULL)
732 the_low_target.new_thread (lwp);
733
734 lwp->thread = add_thread (ptid, lwp);
735
736 return lwp;
737 }
738
739 /* Start an inferior process and returns its pid.
740 ALLARGS is a vector of program-name and args. */
741
742 static int
743 linux_create_inferior (char *program, char **allargs)
744 {
745 struct lwp_info *new_lwp;
746 int pid;
747 ptid_t ptid;
748 struct cleanup *restore_personality
749 = maybe_disable_address_space_randomization (disable_randomization);
750
751 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
752 pid = vfork ();
753 #else
754 pid = fork ();
755 #endif
756 if (pid < 0)
757 perror_with_name ("fork");
758
759 if (pid == 0)
760 {
761 close_most_fds ();
762 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
763
764 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
765 signal (__SIGRTMIN + 1, SIG_DFL);
766 #endif
767
768 setpgid (0, 0);
769
770 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
771 stdout to stderr so that inferior i/o doesn't corrupt the connection.
772 Also, redirect stdin to /dev/null. */
773 if (remote_connection_is_stdio ())
774 {
775 close (0);
776 open ("/dev/null", O_RDONLY);
777 dup2 (2, 1);
778 if (write (2, "stdin/stdout redirected\n",
779 sizeof ("stdin/stdout redirected\n") - 1) < 0)
780 {
781 /* Errors ignored. */;
782 }
783 }
784
785 execv (program, allargs);
786 if (errno == ENOENT)
787 execvp (program, allargs);
788
789 fprintf (stderr, "Cannot exec %s: %s.\n", program,
790 strerror (errno));
791 fflush (stderr);
792 _exit (0177);
793 }
794
795 do_cleanups (restore_personality);
796
797 linux_add_process (pid, 0);
798
799 ptid = ptid_build (pid, pid, 0);
800 new_lwp = add_lwp (ptid);
801 new_lwp->must_set_ptrace_flags = 1;
802
803 return pid;
804 }
805
806 /* Attach to an inferior process. Returns 0 on success, ERRNO on
807 error. */
808
809 int
810 linux_attach_lwp (ptid_t ptid)
811 {
812 struct lwp_info *new_lwp;
813 int lwpid = ptid_get_lwp (ptid);
814
815 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
816 != 0)
817 return errno;
818
819 new_lwp = add_lwp (ptid);
820
821 /* We need to wait for SIGSTOP before being able to make the next
822 ptrace call on this LWP. */
823 new_lwp->must_set_ptrace_flags = 1;
824
825 if (linux_proc_pid_is_stopped (lwpid))
826 {
827 if (debug_threads)
828 debug_printf ("Attached to a stopped process\n");
829
830 /* The process is definitely stopped. It is in a job control
831 stop, unless the kernel predates the TASK_STOPPED /
832 TASK_TRACED distinction, in which case it might be in a
833 ptrace stop. Make sure it is in a ptrace stop; from there we
834 can kill it, signal it, et cetera.
835
836 First make sure there is a pending SIGSTOP. Since we are
837 already attached, the process can not transition from stopped
838 to running without a PTRACE_CONT; so we know this signal will
839 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
840 probably already in the queue (unless this kernel is old
841 enough to use TASK_STOPPED for ptrace stops); but since
842 SIGSTOP is not an RT signal, it can only be queued once. */
843 kill_lwp (lwpid, SIGSTOP);
844
845 /* Finally, resume the stopped process. This will deliver the
846 SIGSTOP (or a higher priority signal, just like normal
847 PTRACE_ATTACH), which we'll catch later on. */
848 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
849 }
850
851 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
852 brings it to a halt.
853
854 There are several cases to consider here:
855
856 1) gdbserver has already attached to the process and is being notified
857 of a new thread that is being created.
858 In this case we should ignore that SIGSTOP and resume the
859 process. This is handled below by setting stop_expected = 1,
860 and the fact that add_thread sets last_resume_kind ==
861 resume_continue.
862
863 2) This is the first thread (the process thread), and we're attaching
864 to it via attach_inferior.
865 In this case we want the process thread to stop.
866 This is handled by having linux_attach set last_resume_kind ==
867 resume_stop after we return.
868
869 If the pid we are attaching to is also the tgid, we attach to and
870 stop all the existing threads. Otherwise, we attach to pid and
871 ignore any other threads in the same group as this pid.
872
873 3) GDB is connecting to gdbserver and is requesting an enumeration of all
874 existing threads.
875 In this case we want the thread to stop.
876 FIXME: This case is currently not properly handled.
877 We should wait for the SIGSTOP but don't. Things work apparently
878 because enough time passes between when we ptrace (ATTACH) and when
879 gdb makes the next ptrace call on the thread.
880
881 On the other hand, if we are currently trying to stop all threads, we
882 should treat the new thread as if we had sent it a SIGSTOP. This works
883 because we are guaranteed that the add_lwp call above added us to the
884 end of the list, and so the new thread has not yet reached
885 wait_for_sigstop (but will). */
886 new_lwp->stop_expected = 1;
887
888 return 0;
889 }
890
891 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
892 already attached. Returns true if a new LWP is found, false
893 otherwise. */
894
895 static int
896 attach_proc_task_lwp_callback (ptid_t ptid)
897 {
898 /* Is this a new thread? */
899 if (find_thread_ptid (ptid) == NULL)
900 {
901 int lwpid = ptid_get_lwp (ptid);
902 int err;
903
904 if (debug_threads)
905 debug_printf ("Found new lwp %d\n", lwpid);
906
907 err = linux_attach_lwp (ptid);
908
909 /* Be quiet if we simply raced with the thread exiting. EPERM
910 is returned if the thread's task still exists, and is marked
911 as exited or zombie, as well as other conditions, so in that
912 case, confirm the status in /proc/PID/status. */
913 if (err == ESRCH
914 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
915 {
916 if (debug_threads)
917 {
918 debug_printf ("Cannot attach to lwp %d: "
919 "thread is gone (%d: %s)\n",
920 lwpid, err, strerror (err));
921 }
922 }
923 else if (err != 0)
924 {
925 warning (_("Cannot attach to lwp %d: %s"),
926 lwpid,
927 linux_ptrace_attach_fail_reason_string (ptid, err));
928 }
929
930 return 1;
931 }
932 return 0;
933 }
934
935 /* Attach to PID. If PID is the tgid, attach to it and all
936 of its threads. */
937
938 static int
939 linux_attach (unsigned long pid)
940 {
941 ptid_t ptid = ptid_build (pid, pid, 0);
942 int err;
943
944 /* Attach to PID. We will check for other threads
945 soon. */
946 err = linux_attach_lwp (ptid);
947 if (err != 0)
948 error ("Cannot attach to process %ld: %s",
949 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
950
951 linux_add_process (pid, 1);
952
953 if (!non_stop)
954 {
955 struct thread_info *thread;
956
957 /* Don't ignore the initial SIGSTOP if we just attached to this
958 process. It will be collected by wait shortly. */
959 thread = find_thread_ptid (ptid_build (pid, pid, 0));
960 thread->last_resume_kind = resume_stop;
961 }
962
963 /* We must attach to every LWP. If /proc is mounted, use that to
964 find them now. On the one hand, the inferior may be using raw
965 clone instead of using pthreads. On the other hand, even if it
966 is using pthreads, GDB may not be connected yet (thread_db needs
967 to do symbol lookups, through qSymbol). Also, thread_db walks
968 structures in the inferior's address space to find the list of
969 threads/LWPs, and those structures may well be corrupted. Note
970 that once thread_db is loaded, we'll still use it to list threads
971 and associate pthread info with each LWP. */
972 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
973 return 0;
974 }
975
976 struct counter
977 {
978 int pid;
979 int count;
980 };
981
982 static int
983 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
984 {
985 struct counter *counter = args;
986
987 if (ptid_get_pid (entry->id) == counter->pid)
988 {
989 if (++counter->count > 1)
990 return 1;
991 }
992
993 return 0;
994 }
995
996 static int
997 last_thread_of_process_p (int pid)
998 {
999 struct counter counter = { pid , 0 };
1000
1001 return (find_inferior (&all_threads,
1002 second_thread_of_pid_p, &counter) == NULL);
1003 }
1004
1005 /* Kill LWP. */
1006
1007 static void
1008 linux_kill_one_lwp (struct lwp_info *lwp)
1009 {
1010 struct thread_info *thr = get_lwp_thread (lwp);
1011 int pid = lwpid_of (thr);
1012
1013 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1014 there is no signal context, and ptrace(PTRACE_KILL) (or
1015 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1016 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1017 alternative is to kill with SIGKILL. We only need one SIGKILL
1018 per process, not one for each thread. But since we still support
1019 linuxthreads, and we also support debugging programs using raw
1020 clone without CLONE_THREAD, we send one for each thread. For
1021 years, we used PTRACE_KILL only, so we're being a bit paranoid
1022 about some old kernels where PTRACE_KILL might work better
1023 (dubious if there are any such, but that's why it's paranoia), so
1024 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1025 everywhere. */
1026
1027 errno = 0;
1028 kill_lwp (pid, SIGKILL);
1029 if (debug_threads)
1030 {
1031 int save_errno = errno;
1032
1033 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1034 target_pid_to_str (ptid_of (thr)),
1035 save_errno ? strerror (save_errno) : "OK");
1036 }
1037
1038 errno = 0;
1039 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1040 if (debug_threads)
1041 {
1042 int save_errno = errno;
1043
1044 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1045 target_pid_to_str (ptid_of (thr)),
1046 save_errno ? strerror (save_errno) : "OK");
1047 }
1048 }
1049
1050 /* Kill LWP and wait for it to die. */
1051
1052 static void
1053 kill_wait_lwp (struct lwp_info *lwp)
1054 {
1055 struct thread_info *thr = get_lwp_thread (lwp);
1056 int pid = ptid_get_pid (ptid_of (thr));
1057 int lwpid = ptid_get_lwp (ptid_of (thr));
1058 int wstat;
1059 int res;
1060
1061 if (debug_threads)
1062 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1063
1064 do
1065 {
1066 linux_kill_one_lwp (lwp);
1067
1068 /* Make sure it died. Notes:
1069
1070 - The loop is most likely unnecessary.
1071
1072 - We don't use linux_wait_for_event as that could delete lwps
1073 while we're iterating over them. We're not interested in
1074 any pending status at this point, only in making sure all
1075 wait status on the kernel side are collected until the
1076 process is reaped.
1077
1078 - We don't use __WALL here as the __WALL emulation relies on
1079 SIGCHLD, and killing a stopped process doesn't generate
1080 one, nor an exit status.
1081 */
1082 res = my_waitpid (lwpid, &wstat, 0);
1083 if (res == -1 && errno == ECHILD)
1084 res = my_waitpid (lwpid, &wstat, __WCLONE);
1085 } while (res > 0 && WIFSTOPPED (wstat));
1086
1087 gdb_assert (res > 0);
1088 }
1089
1090 /* Callback for `find_inferior'. Kills an lwp of a given process,
1091 except the leader. */
1092
1093 static int
1094 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1095 {
1096 struct thread_info *thread = (struct thread_info *) entry;
1097 struct lwp_info *lwp = get_thread_lwp (thread);
1098 int pid = * (int *) args;
1099
1100 if (ptid_get_pid (entry->id) != pid)
1101 return 0;
1102
1103 /* We avoid killing the first thread here, because of a Linux kernel (at
1104 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1105 the children get a chance to be reaped, it will remain a zombie
1106 forever. */
1107
1108 if (lwpid_of (thread) == pid)
1109 {
1110 if (debug_threads)
1111 debug_printf ("lkop: is last of process %s\n",
1112 target_pid_to_str (entry->id));
1113 return 0;
1114 }
1115
1116 kill_wait_lwp (lwp);
1117 return 0;
1118 }
1119
1120 static int
1121 linux_kill (int pid)
1122 {
1123 struct process_info *process;
1124 struct lwp_info *lwp;
1125
1126 process = find_process_pid (pid);
1127 if (process == NULL)
1128 return -1;
1129
1130 /* If we're killing a running inferior, make sure it is stopped
1131 first, as PTRACE_KILL will not work otherwise. */
1132 stop_all_lwps (0, NULL);
1133
1134 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1135
1136 /* See the comment in linux_kill_one_lwp. We did not kill the first
1137 thread in the list, so do so now. */
1138 lwp = find_lwp_pid (pid_to_ptid (pid));
1139
1140 if (lwp == NULL)
1141 {
1142 if (debug_threads)
1143 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1144 pid);
1145 }
1146 else
1147 kill_wait_lwp (lwp);
1148
1149 the_target->mourn (process);
1150
1151 /* Since we presently can only stop all lwps of all processes, we
1152 need to unstop lwps of other processes. */
1153 unstop_all_lwps (0, NULL);
1154 return 0;
1155 }
1156
1157 /* Get pending signal of THREAD, for detaching purposes. This is the
1158 signal the thread last stopped for, which we need to deliver to the
1159 thread when detaching, otherwise, it'd be suppressed/lost. */
1160
1161 static int
1162 get_detach_signal (struct thread_info *thread)
1163 {
1164 enum gdb_signal signo = GDB_SIGNAL_0;
1165 int status;
1166 struct lwp_info *lp = get_thread_lwp (thread);
1167
1168 if (lp->status_pending_p)
1169 status = lp->status_pending;
1170 else
1171 {
1172 /* If the thread had been suspended by gdbserver, and it stopped
1173 cleanly, then it'll have stopped with SIGSTOP. But we don't
1174 want to deliver that SIGSTOP. */
1175 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1176 || thread->last_status.value.sig == GDB_SIGNAL_0)
1177 return 0;
1178
1179 /* Otherwise, we may need to deliver the signal we
1180 intercepted. */
1181 status = lp->last_status;
1182 }
1183
1184 if (!WIFSTOPPED (status))
1185 {
1186 if (debug_threads)
1187 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1188 target_pid_to_str (ptid_of (thread)));
1189 return 0;
1190 }
1191
1192 /* Extended wait statuses aren't real SIGTRAPs. */
1193 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1194 {
1195 if (debug_threads)
1196 debug_printf ("GPS: lwp %s had stopped with extended "
1197 "status: no pending signal\n",
1198 target_pid_to_str (ptid_of (thread)));
1199 return 0;
1200 }
1201
1202 signo = gdb_signal_from_host (WSTOPSIG (status));
1203
1204 if (program_signals_p && !program_signals[signo])
1205 {
1206 if (debug_threads)
1207 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1208 target_pid_to_str (ptid_of (thread)),
1209 gdb_signal_to_string (signo));
1210 return 0;
1211 }
1212 else if (!program_signals_p
1213 /* If we have no way to know which signals GDB does not
1214 want to have passed to the program, assume
1215 SIGTRAP/SIGINT, which is GDB's default. */
1216 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1217 {
1218 if (debug_threads)
1219 debug_printf ("GPS: lwp %s had signal %s, "
1220 "but we don't know if we should pass it. "
1221 "Default to not.\n",
1222 target_pid_to_str (ptid_of (thread)),
1223 gdb_signal_to_string (signo));
1224 return 0;
1225 }
1226 else
1227 {
1228 if (debug_threads)
1229 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1230 target_pid_to_str (ptid_of (thread)),
1231 gdb_signal_to_string (signo));
1232
1233 return WSTOPSIG (status);
1234 }
1235 }
1236
1237 static int
1238 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1239 {
1240 struct thread_info *thread = (struct thread_info *) entry;
1241 struct lwp_info *lwp = get_thread_lwp (thread);
1242 int pid = * (int *) args;
1243 int sig;
1244
1245 if (ptid_get_pid (entry->id) != pid)
1246 return 0;
1247
1248 /* If there is a pending SIGSTOP, get rid of it. */
1249 if (lwp->stop_expected)
1250 {
1251 if (debug_threads)
1252 debug_printf ("Sending SIGCONT to %s\n",
1253 target_pid_to_str (ptid_of (thread)));
1254
1255 kill_lwp (lwpid_of (thread), SIGCONT);
1256 lwp->stop_expected = 0;
1257 }
1258
1259 /* Flush any pending changes to the process's registers. */
1260 regcache_invalidate_thread (thread);
1261
1262 /* Pass on any pending signal for this thread. */
1263 sig = get_detach_signal (thread);
1264
1265 /* Finally, let it resume. */
1266 if (the_low_target.prepare_to_resume != NULL)
1267 the_low_target.prepare_to_resume (lwp);
1268 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1269 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1270 error (_("Can't detach %s: %s"),
1271 target_pid_to_str (ptid_of (thread)),
1272 strerror (errno));
1273
1274 delete_lwp (lwp);
1275 return 0;
1276 }
1277
1278 static int
1279 linux_detach (int pid)
1280 {
1281 struct process_info *process;
1282
1283 process = find_process_pid (pid);
1284 if (process == NULL)
1285 return -1;
1286
1287 /* Stop all threads before detaching. First, ptrace requires that
1288 the thread is stopped to sucessfully detach. Second, thread_db
1289 may need to uninstall thread event breakpoints from memory, which
1290 only works with a stopped process anyway. */
1291 stop_all_lwps (0, NULL);
1292
1293 #ifdef USE_THREAD_DB
1294 thread_db_detach (process);
1295 #endif
1296
1297 /* Stabilize threads (move out of jump pads). */
1298 stabilize_threads ();
1299
1300 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1301
1302 the_target->mourn (process);
1303
1304 /* Since we presently can only stop all lwps of all processes, we
1305 need to unstop lwps of other processes. */
1306 unstop_all_lwps (0, NULL);
1307 return 0;
1308 }
1309
1310 /* Remove all LWPs that belong to process PROC from the lwp list. */
1311
1312 static int
1313 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1314 {
1315 struct thread_info *thread = (struct thread_info *) entry;
1316 struct lwp_info *lwp = get_thread_lwp (thread);
1317 struct process_info *process = proc;
1318
1319 if (pid_of (thread) == pid_of (process))
1320 delete_lwp (lwp);
1321
1322 return 0;
1323 }
1324
1325 static void
1326 linux_mourn (struct process_info *process)
1327 {
1328 struct process_info_private *priv;
1329
1330 #ifdef USE_THREAD_DB
1331 thread_db_mourn (process);
1332 #endif
1333
1334 find_inferior (&all_threads, delete_lwp_callback, process);
1335
1336 /* Freeing all private data. */
1337 priv = process->priv;
1338 free (priv->arch_private);
1339 free (priv);
1340 process->priv = NULL;
1341
1342 remove_process (process);
1343 }
1344
1345 static void
1346 linux_join (int pid)
1347 {
1348 int status, ret;
1349
1350 do {
1351 ret = my_waitpid (pid, &status, 0);
1352 if (WIFEXITED (status) || WIFSIGNALED (status))
1353 break;
1354 } while (ret != -1 || errno != ECHILD);
1355 }
1356
1357 /* Return nonzero if the given thread is still alive. */
1358 static int
1359 linux_thread_alive (ptid_t ptid)
1360 {
1361 struct lwp_info *lwp = find_lwp_pid (ptid);
1362
1363 /* We assume we always know if a thread exits. If a whole process
1364 exited but we still haven't been able to report it to GDB, we'll
1365 hold on to the last lwp of the dead process. */
1366 if (lwp != NULL)
1367 return !lwp->dead;
1368 else
1369 return 0;
1370 }
1371
1372 /* Return 1 if this lwp still has an interesting status pending. If
1373 not (e.g., it had stopped for a breakpoint that is gone), return
1374 false. */
1375
1376 static int
1377 thread_still_has_status_pending_p (struct thread_info *thread)
1378 {
1379 struct lwp_info *lp = get_thread_lwp (thread);
1380
1381 if (!lp->status_pending_p)
1382 return 0;
1383
1384 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1385 report any status pending the LWP may have. */
1386 if (thread->last_resume_kind == resume_stop
1387 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1388 return 0;
1389
1390 if (thread->last_resume_kind != resume_stop
1391 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1392 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1393 {
1394 struct thread_info *saved_thread;
1395 CORE_ADDR pc;
1396 int discard = 0;
1397
1398 gdb_assert (lp->last_status != 0);
1399
1400 pc = get_pc (lp);
1401
1402 saved_thread = current_thread;
1403 current_thread = thread;
1404
1405 if (pc != lp->stop_pc)
1406 {
1407 if (debug_threads)
1408 debug_printf ("PC of %ld changed\n",
1409 lwpid_of (thread));
1410 discard = 1;
1411 }
1412
1413 #if !USE_SIGTRAP_SIGINFO
1414 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1415 && !(*the_low_target.breakpoint_at) (pc))
1416 {
1417 if (debug_threads)
1418 debug_printf ("previous SW breakpoint of %ld gone\n",
1419 lwpid_of (thread));
1420 discard = 1;
1421 }
1422 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1423 && !hardware_breakpoint_inserted_here (pc))
1424 {
1425 if (debug_threads)
1426 debug_printf ("previous HW breakpoint of %ld gone\n",
1427 lwpid_of (thread));
1428 discard = 1;
1429 }
1430 #endif
1431
1432 current_thread = saved_thread;
1433
1434 if (discard)
1435 {
1436 if (debug_threads)
1437 debug_printf ("discarding pending breakpoint status\n");
1438 lp->status_pending_p = 0;
1439 return 0;
1440 }
1441 }
1442
1443 return 1;
1444 }
1445
1446 /* Return 1 if this lwp has an interesting status pending. */
1447 static int
1448 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1449 {
1450 struct thread_info *thread = (struct thread_info *) entry;
1451 struct lwp_info *lp = get_thread_lwp (thread);
1452 ptid_t ptid = * (ptid_t *) arg;
1453
1454 /* Check if we're only interested in events from a specific process
1455 or a specific LWP. */
1456 if (!ptid_match (ptid_of (thread), ptid))
1457 return 0;
1458
1459 if (lp->status_pending_p
1460 && !thread_still_has_status_pending_p (thread))
1461 {
1462 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1463 return 0;
1464 }
1465
1466 return lp->status_pending_p;
1467 }
1468
1469 static int
1470 same_lwp (struct inferior_list_entry *entry, void *data)
1471 {
1472 ptid_t ptid = *(ptid_t *) data;
1473 int lwp;
1474
1475 if (ptid_get_lwp (ptid) != 0)
1476 lwp = ptid_get_lwp (ptid);
1477 else
1478 lwp = ptid_get_pid (ptid);
1479
1480 if (ptid_get_lwp (entry->id) == lwp)
1481 return 1;
1482
1483 return 0;
1484 }
1485
1486 struct lwp_info *
1487 find_lwp_pid (ptid_t ptid)
1488 {
1489 struct inferior_list_entry *thread
1490 = find_inferior (&all_threads, same_lwp, &ptid);
1491
1492 if (thread == NULL)
1493 return NULL;
1494
1495 return get_thread_lwp ((struct thread_info *) thread);
1496 }
1497
1498 /* Return the number of known LWPs in the tgid given by PID. */
1499
1500 static int
1501 num_lwps (int pid)
1502 {
1503 struct inferior_list_entry *inf, *tmp;
1504 int count = 0;
1505
1506 ALL_INFERIORS (&all_threads, inf, tmp)
1507 {
1508 if (ptid_get_pid (inf->id) == pid)
1509 count++;
1510 }
1511
1512 return count;
1513 }
1514
1515 /* The arguments passed to iterate_over_lwps. */
1516
1517 struct iterate_over_lwps_args
1518 {
1519 /* The FILTER argument passed to iterate_over_lwps. */
1520 ptid_t filter;
1521
1522 /* The CALLBACK argument passed to iterate_over_lwps. */
1523 iterate_over_lwps_ftype *callback;
1524
1525 /* The DATA argument passed to iterate_over_lwps. */
1526 void *data;
1527 };
1528
1529 /* Callback for find_inferior used by iterate_over_lwps to filter
1530 calls to the callback supplied to that function. Returning a
1531 nonzero value causes find_inferiors to stop iterating and return
1532 the current inferior_list_entry. Returning zero indicates that
1533 find_inferiors should continue iterating. */
1534
1535 static int
1536 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1537 {
1538 struct iterate_over_lwps_args *args
1539 = (struct iterate_over_lwps_args *) args_p;
1540
1541 if (ptid_match (entry->id, args->filter))
1542 {
1543 struct thread_info *thr = (struct thread_info *) entry;
1544 struct lwp_info *lwp = get_thread_lwp (thr);
1545
1546 return (*args->callback) (lwp, args->data);
1547 }
1548
1549 return 0;
1550 }
1551
1552 /* See nat/linux-nat.h. */
1553
1554 struct lwp_info *
1555 iterate_over_lwps (ptid_t filter,
1556 iterate_over_lwps_ftype callback,
1557 void *data)
1558 {
1559 struct iterate_over_lwps_args args = {filter, callback, data};
1560 struct inferior_list_entry *entry;
1561
1562 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1563 if (entry == NULL)
1564 return NULL;
1565
1566 return get_thread_lwp ((struct thread_info *) entry);
1567 }
1568
1569 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1570 their exits until all other threads in the group have exited. */
1571
1572 static void
1573 check_zombie_leaders (void)
1574 {
1575 struct process_info *proc, *tmp;
1576
1577 ALL_PROCESSES (proc, tmp)
1578 {
1579 pid_t leader_pid = pid_of (proc);
1580 struct lwp_info *leader_lp;
1581
1582 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1583
1584 if (debug_threads)
1585 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1586 "num_lwps=%d, zombie=%d\n",
1587 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1588 linux_proc_pid_is_zombie (leader_pid));
1589
1590 if (leader_lp != NULL
1591 /* Check if there are other threads in the group, as we may
1592 have raced with the inferior simply exiting. */
1593 && !last_thread_of_process_p (leader_pid)
1594 && linux_proc_pid_is_zombie (leader_pid))
1595 {
1596 /* A leader zombie can mean one of two things:
1597
1598 - It exited, and there's an exit status pending
1599 available, or only the leader exited (not the whole
1600 program). In the latter case, we can't waitpid the
1601 leader's exit status until all other threads are gone.
1602
1603 - There are 3 or more threads in the group, and a thread
1604 other than the leader exec'd. On an exec, the Linux
1605 kernel destroys all other threads (except the execing
1606 one) in the thread group, and resets the execing thread's
1607 tid to the tgid. No exit notification is sent for the
1608 execing thread -- from the ptracer's perspective, it
1609 appears as though the execing thread just vanishes.
1610 Until we reap all other threads except the leader and the
1611 execing thread, the leader will be zombie, and the
1612 execing thread will be in `D (disc sleep)'. As soon as
1613 all other threads are reaped, the execing thread changes
1614 it's tid to the tgid, and the previous (zombie) leader
1615 vanishes, giving place to the "new" leader. We could try
1616 distinguishing the exit and exec cases, by waiting once
1617 more, and seeing if something comes out, but it doesn't
1618 sound useful. The previous leader _does_ go away, and
1619 we'll re-add the new one once we see the exec event
1620 (which is just the same as what would happen if the
1621 previous leader did exit voluntarily before some other
1622 thread execs). */
1623
1624 if (debug_threads)
1625 fprintf (stderr,
1626 "CZL: Thread group leader %d zombie "
1627 "(it exited, or another thread execd).\n",
1628 leader_pid);
1629
1630 delete_lwp (leader_lp);
1631 }
1632 }
1633 }
1634
1635 /* Callback for `find_inferior'. Returns the first LWP that is not
1636 stopped. ARG is a PTID filter. */
1637
1638 static int
1639 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1640 {
1641 struct thread_info *thr = (struct thread_info *) entry;
1642 struct lwp_info *lwp;
1643 ptid_t filter = *(ptid_t *) arg;
1644
1645 if (!ptid_match (ptid_of (thr), filter))
1646 return 0;
1647
1648 lwp = get_thread_lwp (thr);
1649 if (!lwp->stopped)
1650 return 1;
1651
1652 return 0;
1653 }
1654
1655 /* This function should only be called if the LWP got a SIGTRAP.
1656
1657 Handle any tracepoint steps or hits. Return true if a tracepoint
1658 event was handled, 0 otherwise. */
1659
1660 static int
1661 handle_tracepoints (struct lwp_info *lwp)
1662 {
1663 struct thread_info *tinfo = get_lwp_thread (lwp);
1664 int tpoint_related_event = 0;
1665
1666 gdb_assert (lwp->suspended == 0);
1667
1668 /* If this tracepoint hit causes a tracing stop, we'll immediately
1669 uninsert tracepoints. To do this, we temporarily pause all
1670 threads, unpatch away, and then unpause threads. We need to make
1671 sure the unpausing doesn't resume LWP too. */
1672 lwp->suspended++;
1673
1674 /* And we need to be sure that any all-threads-stopping doesn't try
1675 to move threads out of the jump pads, as it could deadlock the
1676 inferior (LWP could be in the jump pad, maybe even holding the
1677 lock.) */
1678
1679 /* Do any necessary step collect actions. */
1680 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1681
1682 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1683
1684 /* See if we just hit a tracepoint and do its main collect
1685 actions. */
1686 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1687
1688 lwp->suspended--;
1689
1690 gdb_assert (lwp->suspended == 0);
1691 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1692
1693 if (tpoint_related_event)
1694 {
1695 if (debug_threads)
1696 debug_printf ("got a tracepoint event\n");
1697 return 1;
1698 }
1699
1700 return 0;
1701 }
1702
1703 /* Convenience wrapper. Returns true if LWP is presently collecting a
1704 fast tracepoint. */
1705
1706 static int
1707 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1708 struct fast_tpoint_collect_status *status)
1709 {
1710 CORE_ADDR thread_area;
1711 struct thread_info *thread = get_lwp_thread (lwp);
1712
1713 if (the_low_target.get_thread_area == NULL)
1714 return 0;
1715
1716 /* Get the thread area address. This is used to recognize which
1717 thread is which when tracing with the in-process agent library.
1718 We don't read anything from the address, and treat it as opaque;
1719 it's the address itself that we assume is unique per-thread. */
1720 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1721 return 0;
1722
1723 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1724 }
1725
1726 /* The reason we resume in the caller, is because we want to be able
1727 to pass lwp->status_pending as WSTAT, and we need to clear
1728 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1729 refuses to resume. */
1730
1731 static int
1732 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1733 {
1734 struct thread_info *saved_thread;
1735
1736 saved_thread = current_thread;
1737 current_thread = get_lwp_thread (lwp);
1738
1739 if ((wstat == NULL
1740 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1741 && supports_fast_tracepoints ()
1742 && agent_loaded_p ())
1743 {
1744 struct fast_tpoint_collect_status status;
1745 int r;
1746
1747 if (debug_threads)
1748 debug_printf ("Checking whether LWP %ld needs to move out of the "
1749 "jump pad.\n",
1750 lwpid_of (current_thread));
1751
1752 r = linux_fast_tracepoint_collecting (lwp, &status);
1753
1754 if (wstat == NULL
1755 || (WSTOPSIG (*wstat) != SIGILL
1756 && WSTOPSIG (*wstat) != SIGFPE
1757 && WSTOPSIG (*wstat) != SIGSEGV
1758 && WSTOPSIG (*wstat) != SIGBUS))
1759 {
1760 lwp->collecting_fast_tracepoint = r;
1761
1762 if (r != 0)
1763 {
1764 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1765 {
1766 /* Haven't executed the original instruction yet.
1767 Set breakpoint there, and wait till it's hit,
1768 then single-step until exiting the jump pad. */
1769 lwp->exit_jump_pad_bkpt
1770 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1771 }
1772
1773 if (debug_threads)
1774 debug_printf ("Checking whether LWP %ld needs to move out of "
1775 "the jump pad...it does\n",
1776 lwpid_of (current_thread));
1777 current_thread = saved_thread;
1778
1779 return 1;
1780 }
1781 }
1782 else
1783 {
1784 /* If we get a synchronous signal while collecting, *and*
1785 while executing the (relocated) original instruction,
1786 reset the PC to point at the tpoint address, before
1787 reporting to GDB. Otherwise, it's an IPA lib bug: just
1788 report the signal to GDB, and pray for the best. */
1789
1790 lwp->collecting_fast_tracepoint = 0;
1791
1792 if (r != 0
1793 && (status.adjusted_insn_addr <= lwp->stop_pc
1794 && lwp->stop_pc < status.adjusted_insn_addr_end))
1795 {
1796 siginfo_t info;
1797 struct regcache *regcache;
1798
1799 /* The si_addr on a few signals references the address
1800 of the faulting instruction. Adjust that as
1801 well. */
1802 if ((WSTOPSIG (*wstat) == SIGILL
1803 || WSTOPSIG (*wstat) == SIGFPE
1804 || WSTOPSIG (*wstat) == SIGBUS
1805 || WSTOPSIG (*wstat) == SIGSEGV)
1806 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1807 (PTRACE_TYPE_ARG3) 0, &info) == 0
1808 /* Final check just to make sure we don't clobber
1809 the siginfo of non-kernel-sent signals. */
1810 && (uintptr_t) info.si_addr == lwp->stop_pc)
1811 {
1812 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1813 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1814 (PTRACE_TYPE_ARG3) 0, &info);
1815 }
1816
1817 regcache = get_thread_regcache (current_thread, 1);
1818 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1819 lwp->stop_pc = status.tpoint_addr;
1820
1821 /* Cancel any fast tracepoint lock this thread was
1822 holding. */
1823 force_unlock_trace_buffer ();
1824 }
1825
1826 if (lwp->exit_jump_pad_bkpt != NULL)
1827 {
1828 if (debug_threads)
1829 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1830 "stopping all threads momentarily.\n");
1831
1832 stop_all_lwps (1, lwp);
1833
1834 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1835 lwp->exit_jump_pad_bkpt = NULL;
1836
1837 unstop_all_lwps (1, lwp);
1838
1839 gdb_assert (lwp->suspended >= 0);
1840 }
1841 }
1842 }
1843
1844 if (debug_threads)
1845 debug_printf ("Checking whether LWP %ld needs to move out of the "
1846 "jump pad...no\n",
1847 lwpid_of (current_thread));
1848
1849 current_thread = saved_thread;
1850 return 0;
1851 }
1852
1853 /* Enqueue one signal in the "signals to report later when out of the
1854 jump pad" list. */
1855
1856 static void
1857 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1858 {
1859 struct pending_signals *p_sig;
1860 struct thread_info *thread = get_lwp_thread (lwp);
1861
1862 if (debug_threads)
1863 debug_printf ("Deferring signal %d for LWP %ld.\n",
1864 WSTOPSIG (*wstat), lwpid_of (thread));
1865
1866 if (debug_threads)
1867 {
1868 struct pending_signals *sig;
1869
1870 for (sig = lwp->pending_signals_to_report;
1871 sig != NULL;
1872 sig = sig->prev)
1873 debug_printf (" Already queued %d\n",
1874 sig->signal);
1875
1876 debug_printf (" (no more currently queued signals)\n");
1877 }
1878
1879 /* Don't enqueue non-RT signals if they are already in the deferred
1880 queue. (SIGSTOP being the easiest signal to see ending up here
1881 twice) */
1882 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1883 {
1884 struct pending_signals *sig;
1885
1886 for (sig = lwp->pending_signals_to_report;
1887 sig != NULL;
1888 sig = sig->prev)
1889 {
1890 if (sig->signal == WSTOPSIG (*wstat))
1891 {
1892 if (debug_threads)
1893 debug_printf ("Not requeuing already queued non-RT signal %d"
1894 " for LWP %ld\n",
1895 sig->signal,
1896 lwpid_of (thread));
1897 return;
1898 }
1899 }
1900 }
1901
1902 p_sig = xmalloc (sizeof (*p_sig));
1903 p_sig->prev = lwp->pending_signals_to_report;
1904 p_sig->signal = WSTOPSIG (*wstat);
1905 memset (&p_sig->info, 0, sizeof (siginfo_t));
1906 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1907 &p_sig->info);
1908
1909 lwp->pending_signals_to_report = p_sig;
1910 }
1911
1912 /* Dequeue one signal from the "signals to report later when out of
1913 the jump pad" list. */
1914
1915 static int
1916 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1917 {
1918 struct thread_info *thread = get_lwp_thread (lwp);
1919
1920 if (lwp->pending_signals_to_report != NULL)
1921 {
1922 struct pending_signals **p_sig;
1923
1924 p_sig = &lwp->pending_signals_to_report;
1925 while ((*p_sig)->prev != NULL)
1926 p_sig = &(*p_sig)->prev;
1927
1928 *wstat = W_STOPCODE ((*p_sig)->signal);
1929 if ((*p_sig)->info.si_signo != 0)
1930 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1931 &(*p_sig)->info);
1932 free (*p_sig);
1933 *p_sig = NULL;
1934
1935 if (debug_threads)
1936 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1937 WSTOPSIG (*wstat), lwpid_of (thread));
1938
1939 if (debug_threads)
1940 {
1941 struct pending_signals *sig;
1942
1943 for (sig = lwp->pending_signals_to_report;
1944 sig != NULL;
1945 sig = sig->prev)
1946 debug_printf (" Still queued %d\n",
1947 sig->signal);
1948
1949 debug_printf (" (no more queued signals)\n");
1950 }
1951
1952 return 1;
1953 }
1954
1955 return 0;
1956 }
1957
1958 /* Fetch the possibly triggered data watchpoint info and store it in
1959 CHILD.
1960
1961 On some archs, like x86, that use debug registers to set
1962 watchpoints, it's possible that the way to know which watched
1963 address trapped, is to check the register that is used to select
1964 which address to watch. Problem is, between setting the watchpoint
1965 and reading back which data address trapped, the user may change
1966 the set of watchpoints, and, as a consequence, GDB changes the
1967 debug registers in the inferior. To avoid reading back a stale
1968 stopped-data-address when that happens, we cache in LP the fact
1969 that a watchpoint trapped, and the corresponding data address, as
1970 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1971 registers meanwhile, we have the cached data we can rely on. */
1972
1973 static int
1974 check_stopped_by_watchpoint (struct lwp_info *child)
1975 {
1976 if (the_low_target.stopped_by_watchpoint != NULL)
1977 {
1978 struct thread_info *saved_thread;
1979
1980 saved_thread = current_thread;
1981 current_thread = get_lwp_thread (child);
1982
1983 if (the_low_target.stopped_by_watchpoint ())
1984 {
1985 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1986
1987 if (the_low_target.stopped_data_address != NULL)
1988 child->stopped_data_address
1989 = the_low_target.stopped_data_address ();
1990 else
1991 child->stopped_data_address = 0;
1992 }
1993
1994 current_thread = saved_thread;
1995 }
1996
1997 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
1998 }
1999
2000 /* Return the ptrace options that we want to try to enable. */
2001
2002 static int
2003 linux_low_ptrace_options (int attached)
2004 {
2005 int options = 0;
2006
2007 if (!attached)
2008 options |= PTRACE_O_EXITKILL;
2009
2010 if (report_fork_events)
2011 options |= PTRACE_O_TRACEFORK;
2012
2013 return options;
2014 }
2015
2016 /* Do low-level handling of the event, and check if we should go on
2017 and pass it to caller code. Return the affected lwp if we are, or
2018 NULL otherwise. */
2019
2020 static struct lwp_info *
2021 linux_low_filter_event (int lwpid, int wstat)
2022 {
2023 struct lwp_info *child;
2024 struct thread_info *thread;
2025 int have_stop_pc = 0;
2026
2027 child = find_lwp_pid (pid_to_ptid (lwpid));
2028
2029 /* If we didn't find a process, one of two things presumably happened:
2030 - A process we started and then detached from has exited. Ignore it.
2031 - A process we are controlling has forked and the new child's stop
2032 was reported to us by the kernel. Save its PID. */
2033 if (child == NULL && WIFSTOPPED (wstat))
2034 {
2035 add_to_pid_list (&stopped_pids, lwpid, wstat);
2036 return NULL;
2037 }
2038 else if (child == NULL)
2039 return NULL;
2040
2041 thread = get_lwp_thread (child);
2042
2043 child->stopped = 1;
2044
2045 child->last_status = wstat;
2046
2047 /* Check if the thread has exited. */
2048 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2049 {
2050 if (debug_threads)
2051 debug_printf ("LLFE: %d exited.\n", lwpid);
2052 if (num_lwps (pid_of (thread)) > 1)
2053 {
2054
2055 /* If there is at least one more LWP, then the exit signal was
2056 not the end of the debugged application and should be
2057 ignored. */
2058 delete_lwp (child);
2059 return NULL;
2060 }
2061 else
2062 {
2063 /* This was the last lwp in the process. Since events are
2064 serialized to GDB core, and we can't report this one
2065 right now, but GDB core and the other target layers will
2066 want to be notified about the exit code/signal, leave the
2067 status pending for the next time we're able to report
2068 it. */
2069 mark_lwp_dead (child, wstat);
2070 return child;
2071 }
2072 }
2073
2074 gdb_assert (WIFSTOPPED (wstat));
2075
2076 if (WIFSTOPPED (wstat))
2077 {
2078 struct process_info *proc;
2079
2080 /* Architecture-specific setup after inferior is running. This
2081 needs to happen after we have attached to the inferior and it
2082 is stopped for the first time, but before we access any
2083 inferior registers. */
2084 proc = find_process_pid (pid_of (thread));
2085 if (proc->priv->new_inferior)
2086 {
2087 struct thread_info *saved_thread;
2088
2089 saved_thread = current_thread;
2090 current_thread = thread;
2091
2092 the_low_target.arch_setup ();
2093
2094 current_thread = saved_thread;
2095
2096 proc->priv->new_inferior = 0;
2097 }
2098 }
2099
2100 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2101 {
2102 struct process_info *proc = find_process_pid (pid_of (thread));
2103 int options = linux_low_ptrace_options (proc->attached);
2104
2105 linux_enable_event_reporting (lwpid, options);
2106 child->must_set_ptrace_flags = 0;
2107 }
2108
2109 /* Be careful to not overwrite stop_pc until
2110 check_stopped_by_breakpoint is called. */
2111 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2112 && linux_is_extended_waitstatus (wstat))
2113 {
2114 child->stop_pc = get_pc (child);
2115 if (handle_extended_wait (child, wstat))
2116 {
2117 /* The event has been handled, so just return without
2118 reporting it. */
2119 return NULL;
2120 }
2121 }
2122
2123 /* Check first whether this was a SW/HW breakpoint before checking
2124 watchpoints, because at least s390 can't tell the data address of
2125 hardware watchpoint hits, and returns stopped-by-watchpoint as
2126 long as there's a watchpoint set. */
2127 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2128 {
2129 if (check_stopped_by_breakpoint (child))
2130 have_stop_pc = 1;
2131 }
2132
2133 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2134 or hardware watchpoint. Check which is which if we got
2135 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2136 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2137 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2138 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2139 check_stopped_by_watchpoint (child);
2140
2141 if (!have_stop_pc)
2142 child->stop_pc = get_pc (child);
2143
2144 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2145 && child->stop_expected)
2146 {
2147 if (debug_threads)
2148 debug_printf ("Expected stop.\n");
2149 child->stop_expected = 0;
2150
2151 if (thread->last_resume_kind == resume_stop)
2152 {
2153 /* We want to report the stop to the core. Treat the
2154 SIGSTOP as a normal event. */
2155 if (debug_threads)
2156 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2157 target_pid_to_str (ptid_of (thread)));
2158 }
2159 else if (stopping_threads != NOT_STOPPING_THREADS)
2160 {
2161 /* Stopping threads. We don't want this SIGSTOP to end up
2162 pending. */
2163 if (debug_threads)
2164 debug_printf ("LLW: SIGSTOP caught for %s "
2165 "while stopping threads.\n",
2166 target_pid_to_str (ptid_of (thread)));
2167 return NULL;
2168 }
2169 else
2170 {
2171 /* This is a delayed SIGSTOP. Filter out the event. */
2172 if (debug_threads)
2173 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2174 child->stepping ? "step" : "continue",
2175 target_pid_to_str (ptid_of (thread)));
2176
2177 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2178 return NULL;
2179 }
2180 }
2181
2182 child->status_pending_p = 1;
2183 child->status_pending = wstat;
2184 return child;
2185 }
2186
2187 /* Resume LWPs that are currently stopped without any pending status
2188 to report, but are resumed from the core's perspective. */
2189
2190 static void
2191 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2192 {
2193 struct thread_info *thread = (struct thread_info *) entry;
2194 struct lwp_info *lp = get_thread_lwp (thread);
2195
2196 if (lp->stopped
2197 && !lp->status_pending_p
2198 && thread->last_resume_kind != resume_stop
2199 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2200 {
2201 int step = thread->last_resume_kind == resume_step;
2202
2203 if (debug_threads)
2204 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2205 target_pid_to_str (ptid_of (thread)),
2206 paddress (lp->stop_pc),
2207 step);
2208
2209 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2210 }
2211 }
2212
2213 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2214 match FILTER_PTID (leaving others pending). The PTIDs can be:
2215 minus_one_ptid, to specify any child; a pid PTID, specifying all
2216 lwps of a thread group; or a PTID representing a single lwp. Store
2217 the stop status through the status pointer WSTAT. OPTIONS is
2218 passed to the waitpid call. Return 0 if no event was found and
2219 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2220 was found. Return the PID of the stopped child otherwise. */
2221
2222 static int
2223 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2224 int *wstatp, int options)
2225 {
2226 struct thread_info *event_thread;
2227 struct lwp_info *event_child, *requested_child;
2228 sigset_t block_mask, prev_mask;
2229
2230 retry:
2231 /* N.B. event_thread points to the thread_info struct that contains
2232 event_child. Keep them in sync. */
2233 event_thread = NULL;
2234 event_child = NULL;
2235 requested_child = NULL;
2236
2237 /* Check for a lwp with a pending status. */
2238
2239 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2240 {
2241 event_thread = (struct thread_info *)
2242 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2243 if (event_thread != NULL)
2244 event_child = get_thread_lwp (event_thread);
2245 if (debug_threads && event_thread)
2246 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2247 }
2248 else if (!ptid_equal (filter_ptid, null_ptid))
2249 {
2250 requested_child = find_lwp_pid (filter_ptid);
2251
2252 if (stopping_threads == NOT_STOPPING_THREADS
2253 && requested_child->status_pending_p
2254 && requested_child->collecting_fast_tracepoint)
2255 {
2256 enqueue_one_deferred_signal (requested_child,
2257 &requested_child->status_pending);
2258 requested_child->status_pending_p = 0;
2259 requested_child->status_pending = 0;
2260 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2261 }
2262
2263 if (requested_child->suspended
2264 && requested_child->status_pending_p)
2265 {
2266 internal_error (__FILE__, __LINE__,
2267 "requesting an event out of a"
2268 " suspended child?");
2269 }
2270
2271 if (requested_child->status_pending_p)
2272 {
2273 event_child = requested_child;
2274 event_thread = get_lwp_thread (event_child);
2275 }
2276 }
2277
2278 if (event_child != NULL)
2279 {
2280 if (debug_threads)
2281 debug_printf ("Got an event from pending child %ld (%04x)\n",
2282 lwpid_of (event_thread), event_child->status_pending);
2283 *wstatp = event_child->status_pending;
2284 event_child->status_pending_p = 0;
2285 event_child->status_pending = 0;
2286 current_thread = event_thread;
2287 return lwpid_of (event_thread);
2288 }
2289
2290 /* But if we don't find a pending event, we'll have to wait.
2291
2292 We only enter this loop if no process has a pending wait status.
2293 Thus any action taken in response to a wait status inside this
2294 loop is responding as soon as we detect the status, not after any
2295 pending events. */
2296
2297 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2298 all signals while here. */
2299 sigfillset (&block_mask);
2300 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2301
2302 /* Always pull all events out of the kernel. We'll randomly select
2303 an event LWP out of all that have events, to prevent
2304 starvation. */
2305 while (event_child == NULL)
2306 {
2307 pid_t ret = 0;
2308
2309 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2310 quirks:
2311
2312 - If the thread group leader exits while other threads in the
2313 thread group still exist, waitpid(TGID, ...) hangs. That
2314 waitpid won't return an exit status until the other threads
2315 in the group are reaped.
2316
2317 - When a non-leader thread execs, that thread just vanishes
2318 without reporting an exit (so we'd hang if we waited for it
2319 explicitly in that case). The exec event is reported to
2320 the TGID pid (although we don't currently enable exec
2321 events). */
2322 errno = 0;
2323 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2324
2325 if (debug_threads)
2326 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2327 ret, errno ? strerror (errno) : "ERRNO-OK");
2328
2329 if (ret > 0)
2330 {
2331 if (debug_threads)
2332 {
2333 debug_printf ("LLW: waitpid %ld received %s\n",
2334 (long) ret, status_to_str (*wstatp));
2335 }
2336
2337 /* Filter all events. IOW, leave all events pending. We'll
2338 randomly select an event LWP out of all that have events
2339 below. */
2340 linux_low_filter_event (ret, *wstatp);
2341 /* Retry until nothing comes out of waitpid. A single
2342 SIGCHLD can indicate more than one child stopped. */
2343 continue;
2344 }
2345
2346 /* Now that we've pulled all events out of the kernel, resume
2347 LWPs that don't have an interesting event to report. */
2348 if (stopping_threads == NOT_STOPPING_THREADS)
2349 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2350
2351 /* ... and find an LWP with a status to report to the core, if
2352 any. */
2353 event_thread = (struct thread_info *)
2354 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2355 if (event_thread != NULL)
2356 {
2357 event_child = get_thread_lwp (event_thread);
2358 *wstatp = event_child->status_pending;
2359 event_child->status_pending_p = 0;
2360 event_child->status_pending = 0;
2361 break;
2362 }
2363
2364 /* Check for zombie thread group leaders. Those can't be reaped
2365 until all other threads in the thread group are. */
2366 check_zombie_leaders ();
2367
2368 /* If there are no resumed children left in the set of LWPs we
2369 want to wait for, bail. We can't just block in
2370 waitpid/sigsuspend, because lwps might have been left stopped
2371 in trace-stop state, and we'd be stuck forever waiting for
2372 their status to change (which would only happen if we resumed
2373 them). Even if WNOHANG is set, this return code is preferred
2374 over 0 (below), as it is more detailed. */
2375 if ((find_inferior (&all_threads,
2376 not_stopped_callback,
2377 &wait_ptid) == NULL))
2378 {
2379 if (debug_threads)
2380 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2381 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2382 return -1;
2383 }
2384
2385 /* No interesting event to report to the caller. */
2386 if ((options & WNOHANG))
2387 {
2388 if (debug_threads)
2389 debug_printf ("WNOHANG set, no event found\n");
2390
2391 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2392 return 0;
2393 }
2394
2395 /* Block until we get an event reported with SIGCHLD. */
2396 if (debug_threads)
2397 debug_printf ("sigsuspend'ing\n");
2398
2399 sigsuspend (&prev_mask);
2400 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2401 goto retry;
2402 }
2403
2404 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2405
2406 current_thread = event_thread;
2407
2408 /* Check for thread exit. */
2409 if (! WIFSTOPPED (*wstatp))
2410 {
2411 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2412
2413 if (debug_threads)
2414 debug_printf ("LWP %d is the last lwp of process. "
2415 "Process %ld exiting.\n",
2416 pid_of (event_thread), lwpid_of (event_thread));
2417 return lwpid_of (event_thread);
2418 }
2419
2420 return lwpid_of (event_thread);
2421 }
2422
2423 /* Wait for an event from child(ren) PTID. PTIDs can be:
2424 minus_one_ptid, to specify any child; a pid PTID, specifying all
2425 lwps of a thread group; or a PTID representing a single lwp. Store
2426 the stop status through the status pointer WSTAT. OPTIONS is
2427 passed to the waitpid call. Return 0 if no event was found and
2428 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2429 was found. Return the PID of the stopped child otherwise. */
2430
2431 static int
2432 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2433 {
2434 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2435 }
2436
2437 /* Count the LWP's that have had events. */
2438
2439 static int
2440 count_events_callback (struct inferior_list_entry *entry, void *data)
2441 {
2442 struct thread_info *thread = (struct thread_info *) entry;
2443 struct lwp_info *lp = get_thread_lwp (thread);
2444 int *count = data;
2445
2446 gdb_assert (count != NULL);
2447
2448 /* Count only resumed LWPs that have an event pending. */
2449 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2450 && lp->status_pending_p)
2451 (*count)++;
2452
2453 return 0;
2454 }
2455
2456 /* Select the LWP (if any) that is currently being single-stepped. */
2457
2458 static int
2459 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2460 {
2461 struct thread_info *thread = (struct thread_info *) entry;
2462 struct lwp_info *lp = get_thread_lwp (thread);
2463
2464 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2465 && thread->last_resume_kind == resume_step
2466 && lp->status_pending_p)
2467 return 1;
2468 else
2469 return 0;
2470 }
2471
2472 /* Select the Nth LWP that has had an event. */
2473
2474 static int
2475 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2476 {
2477 struct thread_info *thread = (struct thread_info *) entry;
2478 struct lwp_info *lp = get_thread_lwp (thread);
2479 int *selector = data;
2480
2481 gdb_assert (selector != NULL);
2482
2483 /* Select only resumed LWPs that have an event pending. */
2484 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2485 && lp->status_pending_p)
2486 if ((*selector)-- == 0)
2487 return 1;
2488
2489 return 0;
2490 }
2491
2492 /* Select one LWP out of those that have events pending. */
2493
2494 static void
2495 select_event_lwp (struct lwp_info **orig_lp)
2496 {
2497 int num_events = 0;
2498 int random_selector;
2499 struct thread_info *event_thread = NULL;
2500
2501 /* In all-stop, give preference to the LWP that is being
2502 single-stepped. There will be at most one, and it's the LWP that
2503 the core is most interested in. If we didn't do this, then we'd
2504 have to handle pending step SIGTRAPs somehow in case the core
2505 later continues the previously-stepped thread, otherwise we'd
2506 report the pending SIGTRAP, and the core, not having stepped the
2507 thread, wouldn't understand what the trap was for, and therefore
2508 would report it to the user as a random signal. */
2509 if (!non_stop)
2510 {
2511 event_thread
2512 = (struct thread_info *) find_inferior (&all_threads,
2513 select_singlestep_lwp_callback,
2514 NULL);
2515 if (event_thread != NULL)
2516 {
2517 if (debug_threads)
2518 debug_printf ("SEL: Select single-step %s\n",
2519 target_pid_to_str (ptid_of (event_thread)));
2520 }
2521 }
2522 if (event_thread == NULL)
2523 {
2524 /* No single-stepping LWP. Select one at random, out of those
2525 which have had events. */
2526
2527 /* First see how many events we have. */
2528 find_inferior (&all_threads, count_events_callback, &num_events);
2529 gdb_assert (num_events > 0);
2530
2531 /* Now randomly pick a LWP out of those that have had
2532 events. */
2533 random_selector = (int)
2534 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2535
2536 if (debug_threads && num_events > 1)
2537 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2538 num_events, random_selector);
2539
2540 event_thread
2541 = (struct thread_info *) find_inferior (&all_threads,
2542 select_event_lwp_callback,
2543 &random_selector);
2544 }
2545
2546 if (event_thread != NULL)
2547 {
2548 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2549
2550 /* Switch the event LWP. */
2551 *orig_lp = event_lp;
2552 }
2553 }
2554
2555 /* Decrement the suspend count of an LWP. */
2556
2557 static int
2558 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2559 {
2560 struct thread_info *thread = (struct thread_info *) entry;
2561 struct lwp_info *lwp = get_thread_lwp (thread);
2562
2563 /* Ignore EXCEPT. */
2564 if (lwp == except)
2565 return 0;
2566
2567 lwp->suspended--;
2568
2569 gdb_assert (lwp->suspended >= 0);
2570 return 0;
2571 }
2572
2573 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2574 NULL. */
2575
2576 static void
2577 unsuspend_all_lwps (struct lwp_info *except)
2578 {
2579 find_inferior (&all_threads, unsuspend_one_lwp, except);
2580 }
2581
2582 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2583 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2584 void *data);
2585 static int lwp_running (struct inferior_list_entry *entry, void *data);
2586 static ptid_t linux_wait_1 (ptid_t ptid,
2587 struct target_waitstatus *ourstatus,
2588 int target_options);
2589
2590 /* Stabilize threads (move out of jump pads).
2591
2592 If a thread is midway collecting a fast tracepoint, we need to
2593 finish the collection and move it out of the jump pad before
2594 reporting the signal.
2595
2596 This avoids recursion while collecting (when a signal arrives
2597 midway, and the signal handler itself collects), which would trash
2598 the trace buffer. In case the user set a breakpoint in a signal
2599 handler, this avoids the backtrace showing the jump pad, etc..
2600 Most importantly, there are certain things we can't do safely if
2601 threads are stopped in a jump pad (or in its callee's). For
2602 example:
2603
2604 - starting a new trace run. A thread still collecting the
2605 previous run, could trash the trace buffer when resumed. The trace
2606 buffer control structures would have been reset but the thread had
2607 no way to tell. The thread could even midway memcpy'ing to the
2608 buffer, which would mean that when resumed, it would clobber the
2609 trace buffer that had been set for a new run.
2610
2611 - we can't rewrite/reuse the jump pads for new tracepoints
2612 safely. Say you do tstart while a thread is stopped midway while
2613 collecting. When the thread is later resumed, it finishes the
2614 collection, and returns to the jump pad, to execute the original
2615 instruction that was under the tracepoint jump at the time the
2616 older run had been started. If the jump pad had been rewritten
2617 since for something else in the new run, the thread would now
2618 execute the wrong / random instructions. */
2619
2620 static void
2621 linux_stabilize_threads (void)
2622 {
2623 struct thread_info *saved_thread;
2624 struct thread_info *thread_stuck;
2625
2626 thread_stuck
2627 = (struct thread_info *) find_inferior (&all_threads,
2628 stuck_in_jump_pad_callback,
2629 NULL);
2630 if (thread_stuck != NULL)
2631 {
2632 if (debug_threads)
2633 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2634 lwpid_of (thread_stuck));
2635 return;
2636 }
2637
2638 saved_thread = current_thread;
2639
2640 stabilizing_threads = 1;
2641
2642 /* Kick 'em all. */
2643 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2644
2645 /* Loop until all are stopped out of the jump pads. */
2646 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2647 {
2648 struct target_waitstatus ourstatus;
2649 struct lwp_info *lwp;
2650 int wstat;
2651
2652 /* Note that we go through the full wait even loop. While
2653 moving threads out of jump pad, we need to be able to step
2654 over internal breakpoints and such. */
2655 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2656
2657 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2658 {
2659 lwp = get_thread_lwp (current_thread);
2660
2661 /* Lock it. */
2662 lwp->suspended++;
2663
2664 if (ourstatus.value.sig != GDB_SIGNAL_0
2665 || current_thread->last_resume_kind == resume_stop)
2666 {
2667 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2668 enqueue_one_deferred_signal (lwp, &wstat);
2669 }
2670 }
2671 }
2672
2673 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2674
2675 stabilizing_threads = 0;
2676
2677 current_thread = saved_thread;
2678
2679 if (debug_threads)
2680 {
2681 thread_stuck
2682 = (struct thread_info *) find_inferior (&all_threads,
2683 stuck_in_jump_pad_callback,
2684 NULL);
2685 if (thread_stuck != NULL)
2686 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2687 lwpid_of (thread_stuck));
2688 }
2689 }
2690
2691 static void async_file_mark (void);
2692
2693 /* Convenience function that is called when the kernel reports an
2694 event that is not passed out to GDB. */
2695
2696 static ptid_t
2697 ignore_event (struct target_waitstatus *ourstatus)
2698 {
2699 /* If we got an event, there may still be others, as a single
2700 SIGCHLD can indicate more than one child stopped. This forces
2701 another target_wait call. */
2702 async_file_mark ();
2703
2704 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2705 return null_ptid;
2706 }
2707
2708 /* Return non-zero if WAITSTATUS reflects an extended linux
2709 event. Otherwise, return zero. */
2710
2711 static int
2712 extended_event_reported (const struct target_waitstatus *waitstatus)
2713 {
2714 if (waitstatus == NULL)
2715 return 0;
2716
2717 return (waitstatus->kind == TARGET_WAITKIND_FORKED);
2718 }
2719
2720 /* Wait for process, returns status. */
2721
2722 static ptid_t
2723 linux_wait_1 (ptid_t ptid,
2724 struct target_waitstatus *ourstatus, int target_options)
2725 {
2726 int w;
2727 struct lwp_info *event_child;
2728 int options;
2729 int pid;
2730 int step_over_finished;
2731 int bp_explains_trap;
2732 int maybe_internal_trap;
2733 int report_to_gdb;
2734 int trace_event;
2735 int in_step_range;
2736
2737 if (debug_threads)
2738 {
2739 debug_enter ();
2740 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2741 }
2742
2743 /* Translate generic target options into linux options. */
2744 options = __WALL;
2745 if (target_options & TARGET_WNOHANG)
2746 options |= WNOHANG;
2747
2748 bp_explains_trap = 0;
2749 trace_event = 0;
2750 in_step_range = 0;
2751 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2752
2753 if (ptid_equal (step_over_bkpt, null_ptid))
2754 pid = linux_wait_for_event (ptid, &w, options);
2755 else
2756 {
2757 if (debug_threads)
2758 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2759 target_pid_to_str (step_over_bkpt));
2760 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2761 }
2762
2763 if (pid == 0)
2764 {
2765 gdb_assert (target_options & TARGET_WNOHANG);
2766
2767 if (debug_threads)
2768 {
2769 debug_printf ("linux_wait_1 ret = null_ptid, "
2770 "TARGET_WAITKIND_IGNORE\n");
2771 debug_exit ();
2772 }
2773
2774 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2775 return null_ptid;
2776 }
2777 else if (pid == -1)
2778 {
2779 if (debug_threads)
2780 {
2781 debug_printf ("linux_wait_1 ret = null_ptid, "
2782 "TARGET_WAITKIND_NO_RESUMED\n");
2783 debug_exit ();
2784 }
2785
2786 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2787 return null_ptid;
2788 }
2789
2790 event_child = get_thread_lwp (current_thread);
2791
2792 /* linux_wait_for_event only returns an exit status for the last
2793 child of a process. Report it. */
2794 if (WIFEXITED (w) || WIFSIGNALED (w))
2795 {
2796 if (WIFEXITED (w))
2797 {
2798 ourstatus->kind = TARGET_WAITKIND_EXITED;
2799 ourstatus->value.integer = WEXITSTATUS (w);
2800
2801 if (debug_threads)
2802 {
2803 debug_printf ("linux_wait_1 ret = %s, exited with "
2804 "retcode %d\n",
2805 target_pid_to_str (ptid_of (current_thread)),
2806 WEXITSTATUS (w));
2807 debug_exit ();
2808 }
2809 }
2810 else
2811 {
2812 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2813 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2814
2815 if (debug_threads)
2816 {
2817 debug_printf ("linux_wait_1 ret = %s, terminated with "
2818 "signal %d\n",
2819 target_pid_to_str (ptid_of (current_thread)),
2820 WTERMSIG (w));
2821 debug_exit ();
2822 }
2823 }
2824
2825 return ptid_of (current_thread);
2826 }
2827
2828 /* If step-over executes a breakpoint instruction, it means a
2829 gdb/gdbserver breakpoint had been planted on top of a permanent
2830 breakpoint. The PC has been adjusted by
2831 check_stopped_by_breakpoint to point at the breakpoint address.
2832 Advance the PC manually past the breakpoint, otherwise the
2833 program would keep trapping the permanent breakpoint forever. */
2834 if (!ptid_equal (step_over_bkpt, null_ptid)
2835 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2836 {
2837 unsigned int increment_pc = the_low_target.breakpoint_len;
2838
2839 if (debug_threads)
2840 {
2841 debug_printf ("step-over for %s executed software breakpoint\n",
2842 target_pid_to_str (ptid_of (current_thread)));
2843 }
2844
2845 if (increment_pc != 0)
2846 {
2847 struct regcache *regcache
2848 = get_thread_regcache (current_thread, 1);
2849
2850 event_child->stop_pc += increment_pc;
2851 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2852
2853 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2854 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2855 }
2856 }
2857
2858 /* If this event was not handled before, and is not a SIGTRAP, we
2859 report it. SIGILL and SIGSEGV are also treated as traps in case
2860 a breakpoint is inserted at the current PC. If this target does
2861 not support internal breakpoints at all, we also report the
2862 SIGTRAP without further processing; it's of no concern to us. */
2863 maybe_internal_trap
2864 = (supports_breakpoints ()
2865 && (WSTOPSIG (w) == SIGTRAP
2866 || ((WSTOPSIG (w) == SIGILL
2867 || WSTOPSIG (w) == SIGSEGV)
2868 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2869
2870 if (maybe_internal_trap)
2871 {
2872 /* Handle anything that requires bookkeeping before deciding to
2873 report the event or continue waiting. */
2874
2875 /* First check if we can explain the SIGTRAP with an internal
2876 breakpoint, or if we should possibly report the event to GDB.
2877 Do this before anything that may remove or insert a
2878 breakpoint. */
2879 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2880
2881 /* We have a SIGTRAP, possibly a step-over dance has just
2882 finished. If so, tweak the state machine accordingly,
2883 reinsert breakpoints and delete any reinsert (software
2884 single-step) breakpoints. */
2885 step_over_finished = finish_step_over (event_child);
2886
2887 /* Now invoke the callbacks of any internal breakpoints there. */
2888 check_breakpoints (event_child->stop_pc);
2889
2890 /* Handle tracepoint data collecting. This may overflow the
2891 trace buffer, and cause a tracing stop, removing
2892 breakpoints. */
2893 trace_event = handle_tracepoints (event_child);
2894
2895 if (bp_explains_trap)
2896 {
2897 /* If we stepped or ran into an internal breakpoint, we've
2898 already handled it. So next time we resume (from this
2899 PC), we should step over it. */
2900 if (debug_threads)
2901 debug_printf ("Hit a gdbserver breakpoint.\n");
2902
2903 if (breakpoint_here (event_child->stop_pc))
2904 event_child->need_step_over = 1;
2905 }
2906 }
2907 else
2908 {
2909 /* We have some other signal, possibly a step-over dance was in
2910 progress, and it should be cancelled too. */
2911 step_over_finished = finish_step_over (event_child);
2912 }
2913
2914 /* We have all the data we need. Either report the event to GDB, or
2915 resume threads and keep waiting for more. */
2916
2917 /* If we're collecting a fast tracepoint, finish the collection and
2918 move out of the jump pad before delivering a signal. See
2919 linux_stabilize_threads. */
2920
2921 if (WIFSTOPPED (w)
2922 && WSTOPSIG (w) != SIGTRAP
2923 && supports_fast_tracepoints ()
2924 && agent_loaded_p ())
2925 {
2926 if (debug_threads)
2927 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2928 "to defer or adjust it.\n",
2929 WSTOPSIG (w), lwpid_of (current_thread));
2930
2931 /* Allow debugging the jump pad itself. */
2932 if (current_thread->last_resume_kind != resume_step
2933 && maybe_move_out_of_jump_pad (event_child, &w))
2934 {
2935 enqueue_one_deferred_signal (event_child, &w);
2936
2937 if (debug_threads)
2938 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2939 WSTOPSIG (w), lwpid_of (current_thread));
2940
2941 linux_resume_one_lwp (event_child, 0, 0, NULL);
2942
2943 return ignore_event (ourstatus);
2944 }
2945 }
2946
2947 if (event_child->collecting_fast_tracepoint)
2948 {
2949 if (debug_threads)
2950 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2951 "Check if we're already there.\n",
2952 lwpid_of (current_thread),
2953 event_child->collecting_fast_tracepoint);
2954
2955 trace_event = 1;
2956
2957 event_child->collecting_fast_tracepoint
2958 = linux_fast_tracepoint_collecting (event_child, NULL);
2959
2960 if (event_child->collecting_fast_tracepoint != 1)
2961 {
2962 /* No longer need this breakpoint. */
2963 if (event_child->exit_jump_pad_bkpt != NULL)
2964 {
2965 if (debug_threads)
2966 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2967 "stopping all threads momentarily.\n");
2968
2969 /* Other running threads could hit this breakpoint.
2970 We don't handle moribund locations like GDB does,
2971 instead we always pause all threads when removing
2972 breakpoints, so that any step-over or
2973 decr_pc_after_break adjustment is always taken
2974 care of while the breakpoint is still
2975 inserted. */
2976 stop_all_lwps (1, event_child);
2977
2978 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2979 event_child->exit_jump_pad_bkpt = NULL;
2980
2981 unstop_all_lwps (1, event_child);
2982
2983 gdb_assert (event_child->suspended >= 0);
2984 }
2985 }
2986
2987 if (event_child->collecting_fast_tracepoint == 0)
2988 {
2989 if (debug_threads)
2990 debug_printf ("fast tracepoint finished "
2991 "collecting successfully.\n");
2992
2993 /* We may have a deferred signal to report. */
2994 if (dequeue_one_deferred_signal (event_child, &w))
2995 {
2996 if (debug_threads)
2997 debug_printf ("dequeued one signal.\n");
2998 }
2999 else
3000 {
3001 if (debug_threads)
3002 debug_printf ("no deferred signals.\n");
3003
3004 if (stabilizing_threads)
3005 {
3006 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3007 ourstatus->value.sig = GDB_SIGNAL_0;
3008
3009 if (debug_threads)
3010 {
3011 debug_printf ("linux_wait_1 ret = %s, stopped "
3012 "while stabilizing threads\n",
3013 target_pid_to_str (ptid_of (current_thread)));
3014 debug_exit ();
3015 }
3016
3017 return ptid_of (current_thread);
3018 }
3019 }
3020 }
3021 }
3022
3023 /* Check whether GDB would be interested in this event. */
3024
3025 /* If GDB is not interested in this signal, don't stop other
3026 threads, and don't report it to GDB. Just resume the inferior
3027 right away. We do this for threading-related signals as well as
3028 any that GDB specifically requested we ignore. But never ignore
3029 SIGSTOP if we sent it ourselves, and do not ignore signals when
3030 stepping - they may require special handling to skip the signal
3031 handler. Also never ignore signals that could be caused by a
3032 breakpoint. */
3033 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3034 thread library? */
3035 if (WIFSTOPPED (w)
3036 && current_thread->last_resume_kind != resume_step
3037 && (
3038 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3039 (current_process ()->priv->thread_db != NULL
3040 && (WSTOPSIG (w) == __SIGRTMIN
3041 || WSTOPSIG (w) == __SIGRTMIN + 1))
3042 ||
3043 #endif
3044 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3045 && !(WSTOPSIG (w) == SIGSTOP
3046 && current_thread->last_resume_kind == resume_stop)
3047 && !linux_wstatus_maybe_breakpoint (w))))
3048 {
3049 siginfo_t info, *info_p;
3050
3051 if (debug_threads)
3052 debug_printf ("Ignored signal %d for LWP %ld.\n",
3053 WSTOPSIG (w), lwpid_of (current_thread));
3054
3055 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3056 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3057 info_p = &info;
3058 else
3059 info_p = NULL;
3060 linux_resume_one_lwp (event_child, event_child->stepping,
3061 WSTOPSIG (w), info_p);
3062 return ignore_event (ourstatus);
3063 }
3064
3065 /* Note that all addresses are always "out of the step range" when
3066 there's no range to begin with. */
3067 in_step_range = lwp_in_step_range (event_child);
3068
3069 /* If GDB wanted this thread to single step, and the thread is out
3070 of the step range, we always want to report the SIGTRAP, and let
3071 GDB handle it. Watchpoints should always be reported. So should
3072 signals we can't explain. A SIGTRAP we can't explain could be a
3073 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3074 do, we're be able to handle GDB breakpoints on top of internal
3075 breakpoints, by handling the internal breakpoint and still
3076 reporting the event to GDB. If we don't, we're out of luck, GDB
3077 won't see the breakpoint hit. */
3078 report_to_gdb = (!maybe_internal_trap
3079 || (current_thread->last_resume_kind == resume_step
3080 && !in_step_range)
3081 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3082 || (!step_over_finished && !in_step_range
3083 && !bp_explains_trap && !trace_event)
3084 || (gdb_breakpoint_here (event_child->stop_pc)
3085 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3086 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3087 || extended_event_reported (&event_child->waitstatus));
3088
3089 run_breakpoint_commands (event_child->stop_pc);
3090
3091 /* We found no reason GDB would want us to stop. We either hit one
3092 of our own breakpoints, or finished an internal step GDB
3093 shouldn't know about. */
3094 if (!report_to_gdb)
3095 {
3096 if (debug_threads)
3097 {
3098 if (bp_explains_trap)
3099 debug_printf ("Hit a gdbserver breakpoint.\n");
3100 if (step_over_finished)
3101 debug_printf ("Step-over finished.\n");
3102 if (trace_event)
3103 debug_printf ("Tracepoint event.\n");
3104 if (lwp_in_step_range (event_child))
3105 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3106 paddress (event_child->stop_pc),
3107 paddress (event_child->step_range_start),
3108 paddress (event_child->step_range_end));
3109 if (extended_event_reported (&event_child->waitstatus))
3110 {
3111 char *str = target_waitstatus_to_string (ourstatus);
3112 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3113 lwpid_of (get_lwp_thread (event_child)), str);
3114 xfree (str);
3115 }
3116 }
3117
3118 /* We're not reporting this breakpoint to GDB, so apply the
3119 decr_pc_after_break adjustment to the inferior's regcache
3120 ourselves. */
3121
3122 if (the_low_target.set_pc != NULL)
3123 {
3124 struct regcache *regcache
3125 = get_thread_regcache (current_thread, 1);
3126 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3127 }
3128
3129 /* We may have finished stepping over a breakpoint. If so,
3130 we've stopped and suspended all LWPs momentarily except the
3131 stepping one. This is where we resume them all again. We're
3132 going to keep waiting, so use proceed, which handles stepping
3133 over the next breakpoint. */
3134 if (debug_threads)
3135 debug_printf ("proceeding all threads.\n");
3136
3137 if (step_over_finished)
3138 unsuspend_all_lwps (event_child);
3139
3140 proceed_all_lwps ();
3141 return ignore_event (ourstatus);
3142 }
3143
3144 if (debug_threads)
3145 {
3146 if (current_thread->last_resume_kind == resume_step)
3147 {
3148 if (event_child->step_range_start == event_child->step_range_end)
3149 debug_printf ("GDB wanted to single-step, reporting event.\n");
3150 else if (!lwp_in_step_range (event_child))
3151 debug_printf ("Out of step range, reporting event.\n");
3152 }
3153 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3154 debug_printf ("Stopped by watchpoint.\n");
3155 else if (gdb_breakpoint_here (event_child->stop_pc))
3156 debug_printf ("Stopped by GDB breakpoint.\n");
3157 if (debug_threads)
3158 debug_printf ("Hit a non-gdbserver trap event.\n");
3159 }
3160
3161 /* Alright, we're going to report a stop. */
3162
3163 if (!stabilizing_threads)
3164 {
3165 /* In all-stop, stop all threads. */
3166 if (!non_stop)
3167 stop_all_lwps (0, NULL);
3168
3169 /* If we're not waiting for a specific LWP, choose an event LWP
3170 from among those that have had events. Giving equal priority
3171 to all LWPs that have had events helps prevent
3172 starvation. */
3173 if (ptid_equal (ptid, minus_one_ptid))
3174 {
3175 event_child->status_pending_p = 1;
3176 event_child->status_pending = w;
3177
3178 select_event_lwp (&event_child);
3179
3180 /* current_thread and event_child must stay in sync. */
3181 current_thread = get_lwp_thread (event_child);
3182
3183 event_child->status_pending_p = 0;
3184 w = event_child->status_pending;
3185 }
3186
3187 if (step_over_finished)
3188 {
3189 if (!non_stop)
3190 {
3191 /* If we were doing a step-over, all other threads but
3192 the stepping one had been paused in start_step_over,
3193 with their suspend counts incremented. We don't want
3194 to do a full unstop/unpause, because we're in
3195 all-stop mode (so we want threads stopped), but we
3196 still need to unsuspend the other threads, to
3197 decrement their `suspended' count back. */
3198 unsuspend_all_lwps (event_child);
3199 }
3200 else
3201 {
3202 /* If we just finished a step-over, then all threads had
3203 been momentarily paused. In all-stop, that's fine,
3204 we want threads stopped by now anyway. In non-stop,
3205 we need to re-resume threads that GDB wanted to be
3206 running. */
3207 unstop_all_lwps (1, event_child);
3208 }
3209 }
3210
3211 /* Stabilize threads (move out of jump pads). */
3212 if (!non_stop)
3213 stabilize_threads ();
3214 }
3215 else
3216 {
3217 /* If we just finished a step-over, then all threads had been
3218 momentarily paused. In all-stop, that's fine, we want
3219 threads stopped by now anyway. In non-stop, we need to
3220 re-resume threads that GDB wanted to be running. */
3221 if (step_over_finished)
3222 unstop_all_lwps (1, event_child);
3223 }
3224
3225 if (extended_event_reported (&event_child->waitstatus))
3226 {
3227 /* If the reported event is a fork, vfork or exec, let GDB know. */
3228 ourstatus->kind = event_child->waitstatus.kind;
3229 ourstatus->value = event_child->waitstatus.value;
3230
3231 /* Clear the event lwp's waitstatus since we handled it already. */
3232 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3233 }
3234 else
3235 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3236
3237 /* Now that we've selected our final event LWP, un-adjust its PC if
3238 it was a software breakpoint, and the client doesn't know we can
3239 adjust the breakpoint ourselves. */
3240 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3241 && !swbreak_feature)
3242 {
3243 int decr_pc = the_low_target.decr_pc_after_break;
3244
3245 if (decr_pc != 0)
3246 {
3247 struct regcache *regcache
3248 = get_thread_regcache (current_thread, 1);
3249 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3250 }
3251 }
3252
3253 if (current_thread->last_resume_kind == resume_stop
3254 && WSTOPSIG (w) == SIGSTOP)
3255 {
3256 /* A thread that has been requested to stop by GDB with vCont;t,
3257 and it stopped cleanly, so report as SIG0. The use of
3258 SIGSTOP is an implementation detail. */
3259 ourstatus->value.sig = GDB_SIGNAL_0;
3260 }
3261 else if (current_thread->last_resume_kind == resume_stop
3262 && WSTOPSIG (w) != SIGSTOP)
3263 {
3264 /* A thread that has been requested to stop by GDB with vCont;t,
3265 but, it stopped for other reasons. */
3266 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3267 }
3268 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3269 {
3270 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3271 }
3272
3273 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3274
3275 if (debug_threads)
3276 {
3277 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3278 target_pid_to_str (ptid_of (current_thread)),
3279 ourstatus->kind, ourstatus->value.sig);
3280 debug_exit ();
3281 }
3282
3283 return ptid_of (current_thread);
3284 }
3285
3286 /* Get rid of any pending event in the pipe. */
3287 static void
3288 async_file_flush (void)
3289 {
3290 int ret;
3291 char buf;
3292
3293 do
3294 ret = read (linux_event_pipe[0], &buf, 1);
3295 while (ret >= 0 || (ret == -1 && errno == EINTR));
3296 }
3297
3298 /* Put something in the pipe, so the event loop wakes up. */
3299 static void
3300 async_file_mark (void)
3301 {
3302 int ret;
3303
3304 async_file_flush ();
3305
3306 do
3307 ret = write (linux_event_pipe[1], "+", 1);
3308 while (ret == 0 || (ret == -1 && errno == EINTR));
3309
3310 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3311 be awakened anyway. */
3312 }
3313
3314 static ptid_t
3315 linux_wait (ptid_t ptid,
3316 struct target_waitstatus *ourstatus, int target_options)
3317 {
3318 ptid_t event_ptid;
3319
3320 /* Flush the async file first. */
3321 if (target_is_async_p ())
3322 async_file_flush ();
3323
3324 do
3325 {
3326 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3327 }
3328 while ((target_options & TARGET_WNOHANG) == 0
3329 && ptid_equal (event_ptid, null_ptid)
3330 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3331
3332 /* If at least one stop was reported, there may be more. A single
3333 SIGCHLD can signal more than one child stop. */
3334 if (target_is_async_p ()
3335 && (target_options & TARGET_WNOHANG) != 0
3336 && !ptid_equal (event_ptid, null_ptid))
3337 async_file_mark ();
3338
3339 return event_ptid;
3340 }
3341
3342 /* Send a signal to an LWP. */
3343
3344 static int
3345 kill_lwp (unsigned long lwpid, int signo)
3346 {
3347 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3348 fails, then we are not using nptl threads and we should be using kill. */
3349
3350 #ifdef __NR_tkill
3351 {
3352 static int tkill_failed;
3353
3354 if (!tkill_failed)
3355 {
3356 int ret;
3357
3358 errno = 0;
3359 ret = syscall (__NR_tkill, lwpid, signo);
3360 if (errno != ENOSYS)
3361 return ret;
3362 tkill_failed = 1;
3363 }
3364 }
3365 #endif
3366
3367 return kill (lwpid, signo);
3368 }
3369
3370 void
3371 linux_stop_lwp (struct lwp_info *lwp)
3372 {
3373 send_sigstop (lwp);
3374 }
3375
3376 static void
3377 send_sigstop (struct lwp_info *lwp)
3378 {
3379 int pid;
3380
3381 pid = lwpid_of (get_lwp_thread (lwp));
3382
3383 /* If we already have a pending stop signal for this process, don't
3384 send another. */
3385 if (lwp->stop_expected)
3386 {
3387 if (debug_threads)
3388 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3389
3390 return;
3391 }
3392
3393 if (debug_threads)
3394 debug_printf ("Sending sigstop to lwp %d\n", pid);
3395
3396 lwp->stop_expected = 1;
3397 kill_lwp (pid, SIGSTOP);
3398 }
3399
3400 static int
3401 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3402 {
3403 struct thread_info *thread = (struct thread_info *) entry;
3404 struct lwp_info *lwp = get_thread_lwp (thread);
3405
3406 /* Ignore EXCEPT. */
3407 if (lwp == except)
3408 return 0;
3409
3410 if (lwp->stopped)
3411 return 0;
3412
3413 send_sigstop (lwp);
3414 return 0;
3415 }
3416
3417 /* Increment the suspend count of an LWP, and stop it, if not stopped
3418 yet. */
3419 static int
3420 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3421 void *except)
3422 {
3423 struct thread_info *thread = (struct thread_info *) entry;
3424 struct lwp_info *lwp = get_thread_lwp (thread);
3425
3426 /* Ignore EXCEPT. */
3427 if (lwp == except)
3428 return 0;
3429
3430 lwp->suspended++;
3431
3432 return send_sigstop_callback (entry, except);
3433 }
3434
3435 static void
3436 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3437 {
3438 /* It's dead, really. */
3439 lwp->dead = 1;
3440
3441 /* Store the exit status for later. */
3442 lwp->status_pending_p = 1;
3443 lwp->status_pending = wstat;
3444
3445 /* Prevent trying to stop it. */
3446 lwp->stopped = 1;
3447
3448 /* No further stops are expected from a dead lwp. */
3449 lwp->stop_expected = 0;
3450 }
3451
3452 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3453
3454 static void
3455 wait_for_sigstop (void)
3456 {
3457 struct thread_info *saved_thread;
3458 ptid_t saved_tid;
3459 int wstat;
3460 int ret;
3461
3462 saved_thread = current_thread;
3463 if (saved_thread != NULL)
3464 saved_tid = saved_thread->entry.id;
3465 else
3466 saved_tid = null_ptid; /* avoid bogus unused warning */
3467
3468 if (debug_threads)
3469 debug_printf ("wait_for_sigstop: pulling events\n");
3470
3471 /* Passing NULL_PTID as filter indicates we want all events to be
3472 left pending. Eventually this returns when there are no
3473 unwaited-for children left. */
3474 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3475 &wstat, __WALL);
3476 gdb_assert (ret == -1);
3477
3478 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3479 current_thread = saved_thread;
3480 else
3481 {
3482 if (debug_threads)
3483 debug_printf ("Previously current thread died.\n");
3484
3485 if (non_stop)
3486 {
3487 /* We can't change the current inferior behind GDB's back,
3488 otherwise, a subsequent command may apply to the wrong
3489 process. */
3490 current_thread = NULL;
3491 }
3492 else
3493 {
3494 /* Set a valid thread as current. */
3495 set_desired_thread (0);
3496 }
3497 }
3498 }
3499
3500 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3501 move it out, because we need to report the stop event to GDB. For
3502 example, if the user puts a breakpoint in the jump pad, it's
3503 because she wants to debug it. */
3504
3505 static int
3506 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3507 {
3508 struct thread_info *thread = (struct thread_info *) entry;
3509 struct lwp_info *lwp = get_thread_lwp (thread);
3510
3511 gdb_assert (lwp->suspended == 0);
3512 gdb_assert (lwp->stopped);
3513
3514 /* Allow debugging the jump pad, gdb_collect, etc.. */
3515 return (supports_fast_tracepoints ()
3516 && agent_loaded_p ()
3517 && (gdb_breakpoint_here (lwp->stop_pc)
3518 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3519 || thread->last_resume_kind == resume_step)
3520 && linux_fast_tracepoint_collecting (lwp, NULL));
3521 }
3522
3523 static void
3524 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3525 {
3526 struct thread_info *thread = (struct thread_info *) entry;
3527 struct lwp_info *lwp = get_thread_lwp (thread);
3528 int *wstat;
3529
3530 gdb_assert (lwp->suspended == 0);
3531 gdb_assert (lwp->stopped);
3532
3533 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3534
3535 /* Allow debugging the jump pad, gdb_collect, etc. */
3536 if (!gdb_breakpoint_here (lwp->stop_pc)
3537 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3538 && thread->last_resume_kind != resume_step
3539 && maybe_move_out_of_jump_pad (lwp, wstat))
3540 {
3541 if (debug_threads)
3542 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3543 lwpid_of (thread));
3544
3545 if (wstat)
3546 {
3547 lwp->status_pending_p = 0;
3548 enqueue_one_deferred_signal (lwp, wstat);
3549
3550 if (debug_threads)
3551 debug_printf ("Signal %d for LWP %ld deferred "
3552 "(in jump pad)\n",
3553 WSTOPSIG (*wstat), lwpid_of (thread));
3554 }
3555
3556 linux_resume_one_lwp (lwp, 0, 0, NULL);
3557 }
3558 else
3559 lwp->suspended++;
3560 }
3561
3562 static int
3563 lwp_running (struct inferior_list_entry *entry, void *data)
3564 {
3565 struct thread_info *thread = (struct thread_info *) entry;
3566 struct lwp_info *lwp = get_thread_lwp (thread);
3567
3568 if (lwp->dead)
3569 return 0;
3570 if (lwp->stopped)
3571 return 0;
3572 return 1;
3573 }
3574
3575 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3576 If SUSPEND, then also increase the suspend count of every LWP,
3577 except EXCEPT. */
3578
3579 static void
3580 stop_all_lwps (int suspend, struct lwp_info *except)
3581 {
3582 /* Should not be called recursively. */
3583 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3584
3585 if (debug_threads)
3586 {
3587 debug_enter ();
3588 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3589 suspend ? "stop-and-suspend" : "stop",
3590 except != NULL
3591 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3592 : "none");
3593 }
3594
3595 stopping_threads = (suspend
3596 ? STOPPING_AND_SUSPENDING_THREADS
3597 : STOPPING_THREADS);
3598
3599 if (suspend)
3600 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3601 else
3602 find_inferior (&all_threads, send_sigstop_callback, except);
3603 wait_for_sigstop ();
3604 stopping_threads = NOT_STOPPING_THREADS;
3605
3606 if (debug_threads)
3607 {
3608 debug_printf ("stop_all_lwps done, setting stopping_threads "
3609 "back to !stopping\n");
3610 debug_exit ();
3611 }
3612 }
3613
3614 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3615 SIGNAL is nonzero, give it that signal. */
3616
3617 static void
3618 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3619 int step, int signal, siginfo_t *info)
3620 {
3621 struct thread_info *thread = get_lwp_thread (lwp);
3622 struct thread_info *saved_thread;
3623 int fast_tp_collecting;
3624
3625 if (lwp->stopped == 0)
3626 return;
3627
3628 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3629
3630 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3631
3632 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3633 user used the "jump" command, or "set $pc = foo"). */
3634 if (lwp->stop_pc != get_pc (lwp))
3635 {
3636 /* Collecting 'while-stepping' actions doesn't make sense
3637 anymore. */
3638 release_while_stepping_state_list (thread);
3639 }
3640
3641 /* If we have pending signals or status, and a new signal, enqueue the
3642 signal. Also enqueue the signal if we are waiting to reinsert a
3643 breakpoint; it will be picked up again below. */
3644 if (signal != 0
3645 && (lwp->status_pending_p
3646 || lwp->pending_signals != NULL
3647 || lwp->bp_reinsert != 0
3648 || fast_tp_collecting))
3649 {
3650 struct pending_signals *p_sig;
3651 p_sig = xmalloc (sizeof (*p_sig));
3652 p_sig->prev = lwp->pending_signals;
3653 p_sig->signal = signal;
3654 if (info == NULL)
3655 memset (&p_sig->info, 0, sizeof (siginfo_t));
3656 else
3657 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3658 lwp->pending_signals = p_sig;
3659 }
3660
3661 if (lwp->status_pending_p)
3662 {
3663 if (debug_threads)
3664 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3665 " has pending status\n",
3666 lwpid_of (thread), step ? "step" : "continue", signal,
3667 lwp->stop_expected ? "expected" : "not expected");
3668 return;
3669 }
3670
3671 saved_thread = current_thread;
3672 current_thread = thread;
3673
3674 if (debug_threads)
3675 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3676 lwpid_of (thread), step ? "step" : "continue", signal,
3677 lwp->stop_expected ? "expected" : "not expected");
3678
3679 /* This bit needs some thinking about. If we get a signal that
3680 we must report while a single-step reinsert is still pending,
3681 we often end up resuming the thread. It might be better to
3682 (ew) allow a stack of pending events; then we could be sure that
3683 the reinsert happened right away and not lose any signals.
3684
3685 Making this stack would also shrink the window in which breakpoints are
3686 uninserted (see comment in linux_wait_for_lwp) but not enough for
3687 complete correctness, so it won't solve that problem. It may be
3688 worthwhile just to solve this one, however. */
3689 if (lwp->bp_reinsert != 0)
3690 {
3691 if (debug_threads)
3692 debug_printf (" pending reinsert at 0x%s\n",
3693 paddress (lwp->bp_reinsert));
3694
3695 if (can_hardware_single_step ())
3696 {
3697 if (fast_tp_collecting == 0)
3698 {
3699 if (step == 0)
3700 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3701 if (lwp->suspended)
3702 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3703 lwp->suspended);
3704 }
3705
3706 step = 1;
3707 }
3708
3709 /* Postpone any pending signal. It was enqueued above. */
3710 signal = 0;
3711 }
3712
3713 if (fast_tp_collecting == 1)
3714 {
3715 if (debug_threads)
3716 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3717 " (exit-jump-pad-bkpt)\n",
3718 lwpid_of (thread));
3719
3720 /* Postpone any pending signal. It was enqueued above. */
3721 signal = 0;
3722 }
3723 else if (fast_tp_collecting == 2)
3724 {
3725 if (debug_threads)
3726 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3727 " single-stepping\n",
3728 lwpid_of (thread));
3729
3730 if (can_hardware_single_step ())
3731 step = 1;
3732 else
3733 {
3734 internal_error (__FILE__, __LINE__,
3735 "moving out of jump pad single-stepping"
3736 " not implemented on this target");
3737 }
3738
3739 /* Postpone any pending signal. It was enqueued above. */
3740 signal = 0;
3741 }
3742
3743 /* If we have while-stepping actions in this thread set it stepping.
3744 If we have a signal to deliver, it may or may not be set to
3745 SIG_IGN, we don't know. Assume so, and allow collecting
3746 while-stepping into a signal handler. A possible smart thing to
3747 do would be to set an internal breakpoint at the signal return
3748 address, continue, and carry on catching this while-stepping
3749 action only when that breakpoint is hit. A future
3750 enhancement. */
3751 if (thread->while_stepping != NULL
3752 && can_hardware_single_step ())
3753 {
3754 if (debug_threads)
3755 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3756 lwpid_of (thread));
3757 step = 1;
3758 }
3759
3760 if (the_low_target.get_pc != NULL)
3761 {
3762 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3763
3764 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3765
3766 if (debug_threads)
3767 {
3768 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3769 (long) lwp->stop_pc);
3770 }
3771 }
3772
3773 /* If we have pending signals, consume one unless we are trying to
3774 reinsert a breakpoint or we're trying to finish a fast tracepoint
3775 collect. */
3776 if (lwp->pending_signals != NULL
3777 && lwp->bp_reinsert == 0
3778 && fast_tp_collecting == 0)
3779 {
3780 struct pending_signals **p_sig;
3781
3782 p_sig = &lwp->pending_signals;
3783 while ((*p_sig)->prev != NULL)
3784 p_sig = &(*p_sig)->prev;
3785
3786 signal = (*p_sig)->signal;
3787 if ((*p_sig)->info.si_signo != 0)
3788 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3789 &(*p_sig)->info);
3790
3791 free (*p_sig);
3792 *p_sig = NULL;
3793 }
3794
3795 if (the_low_target.prepare_to_resume != NULL)
3796 the_low_target.prepare_to_resume (lwp);
3797
3798 regcache_invalidate_thread (thread);
3799 errno = 0;
3800 lwp->stepping = step;
3801 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3802 (PTRACE_TYPE_ARG3) 0,
3803 /* Coerce to a uintptr_t first to avoid potential gcc warning
3804 of coercing an 8 byte integer to a 4 byte pointer. */
3805 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3806
3807 current_thread = saved_thread;
3808 if (errno)
3809 perror_with_name ("resuming thread");
3810
3811 /* Successfully resumed. Clear state that no longer makes sense,
3812 and mark the LWP as running. Must not do this before resuming
3813 otherwise if that fails other code will be confused. E.g., we'd
3814 later try to stop the LWP and hang forever waiting for a stop
3815 status. Note that we must not throw after this is cleared,
3816 otherwise handle_zombie_lwp_error would get confused. */
3817 lwp->stopped = 0;
3818 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3819 }
3820
3821 /* Called when we try to resume a stopped LWP and that errors out. If
3822 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3823 or about to become), discard the error, clear any pending status
3824 the LWP may have, and return true (we'll collect the exit status
3825 soon enough). Otherwise, return false. */
3826
3827 static int
3828 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3829 {
3830 struct thread_info *thread = get_lwp_thread (lp);
3831
3832 /* If we get an error after resuming the LWP successfully, we'd
3833 confuse !T state for the LWP being gone. */
3834 gdb_assert (lp->stopped);
3835
3836 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3837 because even if ptrace failed with ESRCH, the tracee may be "not
3838 yet fully dead", but already refusing ptrace requests. In that
3839 case the tracee has 'R (Running)' state for a little bit
3840 (observed in Linux 3.18). See also the note on ESRCH in the
3841 ptrace(2) man page. Instead, check whether the LWP has any state
3842 other than ptrace-stopped. */
3843
3844 /* Don't assume anything if /proc/PID/status can't be read. */
3845 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3846 {
3847 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3848 lp->status_pending_p = 0;
3849 return 1;
3850 }
3851 return 0;
3852 }
3853
3854 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3855 disappears while we try to resume it. */
3856
3857 static void
3858 linux_resume_one_lwp (struct lwp_info *lwp,
3859 int step, int signal, siginfo_t *info)
3860 {
3861 TRY
3862 {
3863 linux_resume_one_lwp_throw (lwp, step, signal, info);
3864 }
3865 CATCH (ex, RETURN_MASK_ERROR)
3866 {
3867 if (!check_ptrace_stopped_lwp_gone (lwp))
3868 throw_exception (ex);
3869 }
3870 END_CATCH
3871 }
3872
3873 struct thread_resume_array
3874 {
3875 struct thread_resume *resume;
3876 size_t n;
3877 };
3878
3879 /* This function is called once per thread via find_inferior.
3880 ARG is a pointer to a thread_resume_array struct.
3881 We look up the thread specified by ENTRY in ARG, and mark the thread
3882 with a pointer to the appropriate resume request.
3883
3884 This algorithm is O(threads * resume elements), but resume elements
3885 is small (and will remain small at least until GDB supports thread
3886 suspension). */
3887
3888 static int
3889 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3890 {
3891 struct thread_info *thread = (struct thread_info *) entry;
3892 struct lwp_info *lwp = get_thread_lwp (thread);
3893 int ndx;
3894 struct thread_resume_array *r;
3895
3896 r = arg;
3897
3898 for (ndx = 0; ndx < r->n; ndx++)
3899 {
3900 ptid_t ptid = r->resume[ndx].thread;
3901 if (ptid_equal (ptid, minus_one_ptid)
3902 || ptid_equal (ptid, entry->id)
3903 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3904 of PID'. */
3905 || (ptid_get_pid (ptid) == pid_of (thread)
3906 && (ptid_is_pid (ptid)
3907 || ptid_get_lwp (ptid) == -1)))
3908 {
3909 if (r->resume[ndx].kind == resume_stop
3910 && thread->last_resume_kind == resume_stop)
3911 {
3912 if (debug_threads)
3913 debug_printf ("already %s LWP %ld at GDB's request\n",
3914 (thread->last_status.kind
3915 == TARGET_WAITKIND_STOPPED)
3916 ? "stopped"
3917 : "stopping",
3918 lwpid_of (thread));
3919
3920 continue;
3921 }
3922
3923 lwp->resume = &r->resume[ndx];
3924 thread->last_resume_kind = lwp->resume->kind;
3925
3926 lwp->step_range_start = lwp->resume->step_range_start;
3927 lwp->step_range_end = lwp->resume->step_range_end;
3928
3929 /* If we had a deferred signal to report, dequeue one now.
3930 This can happen if LWP gets more than one signal while
3931 trying to get out of a jump pad. */
3932 if (lwp->stopped
3933 && !lwp->status_pending_p
3934 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3935 {
3936 lwp->status_pending_p = 1;
3937
3938 if (debug_threads)
3939 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3940 "leaving status pending.\n",
3941 WSTOPSIG (lwp->status_pending),
3942 lwpid_of (thread));
3943 }
3944
3945 return 0;
3946 }
3947 }
3948
3949 /* No resume action for this thread. */
3950 lwp->resume = NULL;
3951
3952 return 0;
3953 }
3954
3955 /* find_inferior callback for linux_resume.
3956 Set *FLAG_P if this lwp has an interesting status pending. */
3957
3958 static int
3959 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3960 {
3961 struct thread_info *thread = (struct thread_info *) entry;
3962 struct lwp_info *lwp = get_thread_lwp (thread);
3963
3964 /* LWPs which will not be resumed are not interesting, because
3965 we might not wait for them next time through linux_wait. */
3966 if (lwp->resume == NULL)
3967 return 0;
3968
3969 if (thread_still_has_status_pending_p (thread))
3970 * (int *) flag_p = 1;
3971
3972 return 0;
3973 }
3974
3975 /* Return 1 if this lwp that GDB wants running is stopped at an
3976 internal breakpoint that we need to step over. It assumes that any
3977 required STOP_PC adjustment has already been propagated to the
3978 inferior's regcache. */
3979
3980 static int
3981 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3982 {
3983 struct thread_info *thread = (struct thread_info *) entry;
3984 struct lwp_info *lwp = get_thread_lwp (thread);
3985 struct thread_info *saved_thread;
3986 CORE_ADDR pc;
3987
3988 /* LWPs which will not be resumed are not interesting, because we
3989 might not wait for them next time through linux_wait. */
3990
3991 if (!lwp->stopped)
3992 {
3993 if (debug_threads)
3994 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3995 lwpid_of (thread));
3996 return 0;
3997 }
3998
3999 if (thread->last_resume_kind == resume_stop)
4000 {
4001 if (debug_threads)
4002 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4003 " stopped\n",
4004 lwpid_of (thread));
4005 return 0;
4006 }
4007
4008 gdb_assert (lwp->suspended >= 0);
4009
4010 if (lwp->suspended)
4011 {
4012 if (debug_threads)
4013 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4014 lwpid_of (thread));
4015 return 0;
4016 }
4017
4018 if (!lwp->need_step_over)
4019 {
4020 if (debug_threads)
4021 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4022 }
4023
4024 if (lwp->status_pending_p)
4025 {
4026 if (debug_threads)
4027 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4028 " status.\n",
4029 lwpid_of (thread));
4030 return 0;
4031 }
4032
4033 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4034 or we have. */
4035 pc = get_pc (lwp);
4036
4037 /* If the PC has changed since we stopped, then don't do anything,
4038 and let the breakpoint/tracepoint be hit. This happens if, for
4039 instance, GDB handled the decr_pc_after_break subtraction itself,
4040 GDB is OOL stepping this thread, or the user has issued a "jump"
4041 command, or poked thread's registers herself. */
4042 if (pc != lwp->stop_pc)
4043 {
4044 if (debug_threads)
4045 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4046 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4047 lwpid_of (thread),
4048 paddress (lwp->stop_pc), paddress (pc));
4049
4050 lwp->need_step_over = 0;
4051 return 0;
4052 }
4053
4054 saved_thread = current_thread;
4055 current_thread = thread;
4056
4057 /* We can only step over breakpoints we know about. */
4058 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4059 {
4060 /* Don't step over a breakpoint that GDB expects to hit
4061 though. If the condition is being evaluated on the target's side
4062 and it evaluate to false, step over this breakpoint as well. */
4063 if (gdb_breakpoint_here (pc)
4064 && gdb_condition_true_at_breakpoint (pc)
4065 && gdb_no_commands_at_breakpoint (pc))
4066 {
4067 if (debug_threads)
4068 debug_printf ("Need step over [LWP %ld]? yes, but found"
4069 " GDB breakpoint at 0x%s; skipping step over\n",
4070 lwpid_of (thread), paddress (pc));
4071
4072 current_thread = saved_thread;
4073 return 0;
4074 }
4075 else
4076 {
4077 if (debug_threads)
4078 debug_printf ("Need step over [LWP %ld]? yes, "
4079 "found breakpoint at 0x%s\n",
4080 lwpid_of (thread), paddress (pc));
4081
4082 /* We've found an lwp that needs stepping over --- return 1 so
4083 that find_inferior stops looking. */
4084 current_thread = saved_thread;
4085
4086 /* If the step over is cancelled, this is set again. */
4087 lwp->need_step_over = 0;
4088 return 1;
4089 }
4090 }
4091
4092 current_thread = saved_thread;
4093
4094 if (debug_threads)
4095 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4096 " at 0x%s\n",
4097 lwpid_of (thread), paddress (pc));
4098
4099 return 0;
4100 }
4101
4102 /* Start a step-over operation on LWP. When LWP stopped at a
4103 breakpoint, to make progress, we need to remove the breakpoint out
4104 of the way. If we let other threads run while we do that, they may
4105 pass by the breakpoint location and miss hitting it. To avoid
4106 that, a step-over momentarily stops all threads while LWP is
4107 single-stepped while the breakpoint is temporarily uninserted from
4108 the inferior. When the single-step finishes, we reinsert the
4109 breakpoint, and let all threads that are supposed to be running,
4110 run again.
4111
4112 On targets that don't support hardware single-step, we don't
4113 currently support full software single-stepping. Instead, we only
4114 support stepping over the thread event breakpoint, by asking the
4115 low target where to place a reinsert breakpoint. Since this
4116 routine assumes the breakpoint being stepped over is a thread event
4117 breakpoint, it usually assumes the return address of the current
4118 function is a good enough place to set the reinsert breakpoint. */
4119
4120 static int
4121 start_step_over (struct lwp_info *lwp)
4122 {
4123 struct thread_info *thread = get_lwp_thread (lwp);
4124 struct thread_info *saved_thread;
4125 CORE_ADDR pc;
4126 int step;
4127
4128 if (debug_threads)
4129 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4130 lwpid_of (thread));
4131
4132 stop_all_lwps (1, lwp);
4133 gdb_assert (lwp->suspended == 0);
4134
4135 if (debug_threads)
4136 debug_printf ("Done stopping all threads for step-over.\n");
4137
4138 /* Note, we should always reach here with an already adjusted PC,
4139 either by GDB (if we're resuming due to GDB's request), or by our
4140 caller, if we just finished handling an internal breakpoint GDB
4141 shouldn't care about. */
4142 pc = get_pc (lwp);
4143
4144 saved_thread = current_thread;
4145 current_thread = thread;
4146
4147 lwp->bp_reinsert = pc;
4148 uninsert_breakpoints_at (pc);
4149 uninsert_fast_tracepoint_jumps_at (pc);
4150
4151 if (can_hardware_single_step ())
4152 {
4153 step = 1;
4154 }
4155 else
4156 {
4157 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4158 set_reinsert_breakpoint (raddr);
4159 step = 0;
4160 }
4161
4162 current_thread = saved_thread;
4163
4164 linux_resume_one_lwp (lwp, step, 0, NULL);
4165
4166 /* Require next event from this LWP. */
4167 step_over_bkpt = thread->entry.id;
4168 return 1;
4169 }
4170
4171 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4172 start_step_over, if still there, and delete any reinsert
4173 breakpoints we've set, on non hardware single-step targets. */
4174
4175 static int
4176 finish_step_over (struct lwp_info *lwp)
4177 {
4178 if (lwp->bp_reinsert != 0)
4179 {
4180 if (debug_threads)
4181 debug_printf ("Finished step over.\n");
4182
4183 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4184 may be no breakpoint to reinsert there by now. */
4185 reinsert_breakpoints_at (lwp->bp_reinsert);
4186 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4187
4188 lwp->bp_reinsert = 0;
4189
4190 /* Delete any software-single-step reinsert breakpoints. No
4191 longer needed. We don't have to worry about other threads
4192 hitting this trap, and later not being able to explain it,
4193 because we were stepping over a breakpoint, and we hold all
4194 threads but LWP stopped while doing that. */
4195 if (!can_hardware_single_step ())
4196 delete_reinsert_breakpoints ();
4197
4198 step_over_bkpt = null_ptid;
4199 return 1;
4200 }
4201 else
4202 return 0;
4203 }
4204
4205 /* This function is called once per thread. We check the thread's resume
4206 request, which will tell us whether to resume, step, or leave the thread
4207 stopped; and what signal, if any, it should be sent.
4208
4209 For threads which we aren't explicitly told otherwise, we preserve
4210 the stepping flag; this is used for stepping over gdbserver-placed
4211 breakpoints.
4212
4213 If pending_flags was set in any thread, we queue any needed
4214 signals, since we won't actually resume. We already have a pending
4215 event to report, so we don't need to preserve any step requests;
4216 they should be re-issued if necessary. */
4217
4218 static int
4219 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4220 {
4221 struct thread_info *thread = (struct thread_info *) entry;
4222 struct lwp_info *lwp = get_thread_lwp (thread);
4223 int step;
4224 int leave_all_stopped = * (int *) arg;
4225 int leave_pending;
4226
4227 if (lwp->resume == NULL)
4228 return 0;
4229
4230 if (lwp->resume->kind == resume_stop)
4231 {
4232 if (debug_threads)
4233 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4234
4235 if (!lwp->stopped)
4236 {
4237 if (debug_threads)
4238 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4239
4240 /* Stop the thread, and wait for the event asynchronously,
4241 through the event loop. */
4242 send_sigstop (lwp);
4243 }
4244 else
4245 {
4246 if (debug_threads)
4247 debug_printf ("already stopped LWP %ld\n",
4248 lwpid_of (thread));
4249
4250 /* The LWP may have been stopped in an internal event that
4251 was not meant to be notified back to GDB (e.g., gdbserver
4252 breakpoint), so we should be reporting a stop event in
4253 this case too. */
4254
4255 /* If the thread already has a pending SIGSTOP, this is a
4256 no-op. Otherwise, something later will presumably resume
4257 the thread and this will cause it to cancel any pending
4258 operation, due to last_resume_kind == resume_stop. If
4259 the thread already has a pending status to report, we
4260 will still report it the next time we wait - see
4261 status_pending_p_callback. */
4262
4263 /* If we already have a pending signal to report, then
4264 there's no need to queue a SIGSTOP, as this means we're
4265 midway through moving the LWP out of the jumppad, and we
4266 will report the pending signal as soon as that is
4267 finished. */
4268 if (lwp->pending_signals_to_report == NULL)
4269 send_sigstop (lwp);
4270 }
4271
4272 /* For stop requests, we're done. */
4273 lwp->resume = NULL;
4274 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4275 return 0;
4276 }
4277
4278 /* If this thread which is about to be resumed has a pending status,
4279 then don't resume any threads - we can just report the pending
4280 status. Make sure to queue any signals that would otherwise be
4281 sent. In all-stop mode, we do this decision based on if *any*
4282 thread has a pending status. If there's a thread that needs the
4283 step-over-breakpoint dance, then don't resume any other thread
4284 but that particular one. */
4285 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4286
4287 if (!leave_pending)
4288 {
4289 if (debug_threads)
4290 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4291
4292 step = (lwp->resume->kind == resume_step);
4293 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4294 }
4295 else
4296 {
4297 if (debug_threads)
4298 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4299
4300 /* If we have a new signal, enqueue the signal. */
4301 if (lwp->resume->sig != 0)
4302 {
4303 struct pending_signals *p_sig;
4304 p_sig = xmalloc (sizeof (*p_sig));
4305 p_sig->prev = lwp->pending_signals;
4306 p_sig->signal = lwp->resume->sig;
4307 memset (&p_sig->info, 0, sizeof (siginfo_t));
4308
4309 /* If this is the same signal we were previously stopped by,
4310 make sure to queue its siginfo. We can ignore the return
4311 value of ptrace; if it fails, we'll skip
4312 PTRACE_SETSIGINFO. */
4313 if (WIFSTOPPED (lwp->last_status)
4314 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4315 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4316 &p_sig->info);
4317
4318 lwp->pending_signals = p_sig;
4319 }
4320 }
4321
4322 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4323 lwp->resume = NULL;
4324 return 0;
4325 }
4326
4327 static void
4328 linux_resume (struct thread_resume *resume_info, size_t n)
4329 {
4330 struct thread_resume_array array = { resume_info, n };
4331 struct thread_info *need_step_over = NULL;
4332 int any_pending;
4333 int leave_all_stopped;
4334
4335 if (debug_threads)
4336 {
4337 debug_enter ();
4338 debug_printf ("linux_resume:\n");
4339 }
4340
4341 find_inferior (&all_threads, linux_set_resume_request, &array);
4342
4343 /* If there is a thread which would otherwise be resumed, which has
4344 a pending status, then don't resume any threads - we can just
4345 report the pending status. Make sure to queue any signals that
4346 would otherwise be sent. In non-stop mode, we'll apply this
4347 logic to each thread individually. We consume all pending events
4348 before considering to start a step-over (in all-stop). */
4349 any_pending = 0;
4350 if (!non_stop)
4351 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4352
4353 /* If there is a thread which would otherwise be resumed, which is
4354 stopped at a breakpoint that needs stepping over, then don't
4355 resume any threads - have it step over the breakpoint with all
4356 other threads stopped, then resume all threads again. Make sure
4357 to queue any signals that would otherwise be delivered or
4358 queued. */
4359 if (!any_pending && supports_breakpoints ())
4360 need_step_over
4361 = (struct thread_info *) find_inferior (&all_threads,
4362 need_step_over_p, NULL);
4363
4364 leave_all_stopped = (need_step_over != NULL || any_pending);
4365
4366 if (debug_threads)
4367 {
4368 if (need_step_over != NULL)
4369 debug_printf ("Not resuming all, need step over\n");
4370 else if (any_pending)
4371 debug_printf ("Not resuming, all-stop and found "
4372 "an LWP with pending status\n");
4373 else
4374 debug_printf ("Resuming, no pending status or step over needed\n");
4375 }
4376
4377 /* Even if we're leaving threads stopped, queue all signals we'd
4378 otherwise deliver. */
4379 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4380
4381 if (need_step_over)
4382 start_step_over (get_thread_lwp (need_step_over));
4383
4384 if (debug_threads)
4385 {
4386 debug_printf ("linux_resume done\n");
4387 debug_exit ();
4388 }
4389 }
4390
4391 /* This function is called once per thread. We check the thread's
4392 last resume request, which will tell us whether to resume, step, or
4393 leave the thread stopped. Any signal the client requested to be
4394 delivered has already been enqueued at this point.
4395
4396 If any thread that GDB wants running is stopped at an internal
4397 breakpoint that needs stepping over, we start a step-over operation
4398 on that particular thread, and leave all others stopped. */
4399
4400 static int
4401 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4402 {
4403 struct thread_info *thread = (struct thread_info *) entry;
4404 struct lwp_info *lwp = get_thread_lwp (thread);
4405 int step;
4406
4407 if (lwp == except)
4408 return 0;
4409
4410 if (debug_threads)
4411 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4412
4413 if (!lwp->stopped)
4414 {
4415 if (debug_threads)
4416 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4417 return 0;
4418 }
4419
4420 if (thread->last_resume_kind == resume_stop
4421 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4422 {
4423 if (debug_threads)
4424 debug_printf (" client wants LWP to remain %ld stopped\n",
4425 lwpid_of (thread));
4426 return 0;
4427 }
4428
4429 if (lwp->status_pending_p)
4430 {
4431 if (debug_threads)
4432 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4433 lwpid_of (thread));
4434 return 0;
4435 }
4436
4437 gdb_assert (lwp->suspended >= 0);
4438
4439 if (lwp->suspended)
4440 {
4441 if (debug_threads)
4442 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4443 return 0;
4444 }
4445
4446 if (thread->last_resume_kind == resume_stop
4447 && lwp->pending_signals_to_report == NULL
4448 && lwp->collecting_fast_tracepoint == 0)
4449 {
4450 /* We haven't reported this LWP as stopped yet (otherwise, the
4451 last_status.kind check above would catch it, and we wouldn't
4452 reach here. This LWP may have been momentarily paused by a
4453 stop_all_lwps call while handling for example, another LWP's
4454 step-over. In that case, the pending expected SIGSTOP signal
4455 that was queued at vCont;t handling time will have already
4456 been consumed by wait_for_sigstop, and so we need to requeue
4457 another one here. Note that if the LWP already has a SIGSTOP
4458 pending, this is a no-op. */
4459
4460 if (debug_threads)
4461 debug_printf ("Client wants LWP %ld to stop. "
4462 "Making sure it has a SIGSTOP pending\n",
4463 lwpid_of (thread));
4464
4465 send_sigstop (lwp);
4466 }
4467
4468 step = thread->last_resume_kind == resume_step;
4469 linux_resume_one_lwp (lwp, step, 0, NULL);
4470 return 0;
4471 }
4472
4473 static int
4474 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4475 {
4476 struct thread_info *thread = (struct thread_info *) entry;
4477 struct lwp_info *lwp = get_thread_lwp (thread);
4478
4479 if (lwp == except)
4480 return 0;
4481
4482 lwp->suspended--;
4483 gdb_assert (lwp->suspended >= 0);
4484
4485 return proceed_one_lwp (entry, except);
4486 }
4487
4488 /* When we finish a step-over, set threads running again. If there's
4489 another thread that may need a step-over, now's the time to start
4490 it. Eventually, we'll move all threads past their breakpoints. */
4491
4492 static void
4493 proceed_all_lwps (void)
4494 {
4495 struct thread_info *need_step_over;
4496
4497 /* If there is a thread which would otherwise be resumed, which is
4498 stopped at a breakpoint that needs stepping over, then don't
4499 resume any threads - have it step over the breakpoint with all
4500 other threads stopped, then resume all threads again. */
4501
4502 if (supports_breakpoints ())
4503 {
4504 need_step_over
4505 = (struct thread_info *) find_inferior (&all_threads,
4506 need_step_over_p, NULL);
4507
4508 if (need_step_over != NULL)
4509 {
4510 if (debug_threads)
4511 debug_printf ("proceed_all_lwps: found "
4512 "thread %ld needing a step-over\n",
4513 lwpid_of (need_step_over));
4514
4515 start_step_over (get_thread_lwp (need_step_over));
4516 return;
4517 }
4518 }
4519
4520 if (debug_threads)
4521 debug_printf ("Proceeding, no step-over needed\n");
4522
4523 find_inferior (&all_threads, proceed_one_lwp, NULL);
4524 }
4525
4526 /* Stopped LWPs that the client wanted to be running, that don't have
4527 pending statuses, are set to run again, except for EXCEPT, if not
4528 NULL. This undoes a stop_all_lwps call. */
4529
4530 static void
4531 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4532 {
4533 if (debug_threads)
4534 {
4535 debug_enter ();
4536 if (except)
4537 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4538 lwpid_of (get_lwp_thread (except)));
4539 else
4540 debug_printf ("unstopping all lwps\n");
4541 }
4542
4543 if (unsuspend)
4544 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4545 else
4546 find_inferior (&all_threads, proceed_one_lwp, except);
4547
4548 if (debug_threads)
4549 {
4550 debug_printf ("unstop_all_lwps done\n");
4551 debug_exit ();
4552 }
4553 }
4554
4555
4556 #ifdef HAVE_LINUX_REGSETS
4557
4558 #define use_linux_regsets 1
4559
4560 /* Returns true if REGSET has been disabled. */
4561
4562 static int
4563 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4564 {
4565 return (info->disabled_regsets != NULL
4566 && info->disabled_regsets[regset - info->regsets]);
4567 }
4568
4569 /* Disable REGSET. */
4570
4571 static void
4572 disable_regset (struct regsets_info *info, struct regset_info *regset)
4573 {
4574 int dr_offset;
4575
4576 dr_offset = regset - info->regsets;
4577 if (info->disabled_regsets == NULL)
4578 info->disabled_regsets = xcalloc (1, info->num_regsets);
4579 info->disabled_regsets[dr_offset] = 1;
4580 }
4581
4582 static int
4583 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4584 struct regcache *regcache)
4585 {
4586 struct regset_info *regset;
4587 int saw_general_regs = 0;
4588 int pid;
4589 struct iovec iov;
4590
4591 pid = lwpid_of (current_thread);
4592 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4593 {
4594 void *buf, *data;
4595 int nt_type, res;
4596
4597 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4598 continue;
4599
4600 buf = xmalloc (regset->size);
4601
4602 nt_type = regset->nt_type;
4603 if (nt_type)
4604 {
4605 iov.iov_base = buf;
4606 iov.iov_len = regset->size;
4607 data = (void *) &iov;
4608 }
4609 else
4610 data = buf;
4611
4612 #ifndef __sparc__
4613 res = ptrace (regset->get_request, pid,
4614 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4615 #else
4616 res = ptrace (regset->get_request, pid, data, nt_type);
4617 #endif
4618 if (res < 0)
4619 {
4620 if (errno == EIO)
4621 {
4622 /* If we get EIO on a regset, do not try it again for
4623 this process mode. */
4624 disable_regset (regsets_info, regset);
4625 }
4626 else if (errno == ENODATA)
4627 {
4628 /* ENODATA may be returned if the regset is currently
4629 not "active". This can happen in normal operation,
4630 so suppress the warning in this case. */
4631 }
4632 else
4633 {
4634 char s[256];
4635 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4636 pid);
4637 perror (s);
4638 }
4639 }
4640 else
4641 {
4642 if (regset->type == GENERAL_REGS)
4643 saw_general_regs = 1;
4644 regset->store_function (regcache, buf);
4645 }
4646 free (buf);
4647 }
4648 if (saw_general_regs)
4649 return 0;
4650 else
4651 return 1;
4652 }
4653
4654 static int
4655 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4656 struct regcache *regcache)
4657 {
4658 struct regset_info *regset;
4659 int saw_general_regs = 0;
4660 int pid;
4661 struct iovec iov;
4662
4663 pid = lwpid_of (current_thread);
4664 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4665 {
4666 void *buf, *data;
4667 int nt_type, res;
4668
4669 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4670 || regset->fill_function == NULL)
4671 continue;
4672
4673 buf = xmalloc (regset->size);
4674
4675 /* First fill the buffer with the current register set contents,
4676 in case there are any items in the kernel's regset that are
4677 not in gdbserver's regcache. */
4678
4679 nt_type = regset->nt_type;
4680 if (nt_type)
4681 {
4682 iov.iov_base = buf;
4683 iov.iov_len = regset->size;
4684 data = (void *) &iov;
4685 }
4686 else
4687 data = buf;
4688
4689 #ifndef __sparc__
4690 res = ptrace (regset->get_request, pid,
4691 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4692 #else
4693 res = ptrace (regset->get_request, pid, data, nt_type);
4694 #endif
4695
4696 if (res == 0)
4697 {
4698 /* Then overlay our cached registers on that. */
4699 regset->fill_function (regcache, buf);
4700
4701 /* Only now do we write the register set. */
4702 #ifndef __sparc__
4703 res = ptrace (regset->set_request, pid,
4704 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4705 #else
4706 res = ptrace (regset->set_request, pid, data, nt_type);
4707 #endif
4708 }
4709
4710 if (res < 0)
4711 {
4712 if (errno == EIO)
4713 {
4714 /* If we get EIO on a regset, do not try it again for
4715 this process mode. */
4716 disable_regset (regsets_info, regset);
4717 }
4718 else if (errno == ESRCH)
4719 {
4720 /* At this point, ESRCH should mean the process is
4721 already gone, in which case we simply ignore attempts
4722 to change its registers. See also the related
4723 comment in linux_resume_one_lwp. */
4724 free (buf);
4725 return 0;
4726 }
4727 else
4728 {
4729 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4730 }
4731 }
4732 else if (regset->type == GENERAL_REGS)
4733 saw_general_regs = 1;
4734 free (buf);
4735 }
4736 if (saw_general_regs)
4737 return 0;
4738 else
4739 return 1;
4740 }
4741
4742 #else /* !HAVE_LINUX_REGSETS */
4743
4744 #define use_linux_regsets 0
4745 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4746 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4747
4748 #endif
4749
4750 /* Return 1 if register REGNO is supported by one of the regset ptrace
4751 calls or 0 if it has to be transferred individually. */
4752
4753 static int
4754 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4755 {
4756 unsigned char mask = 1 << (regno % 8);
4757 size_t index = regno / 8;
4758
4759 return (use_linux_regsets
4760 && (regs_info->regset_bitmap == NULL
4761 || (regs_info->regset_bitmap[index] & mask) != 0));
4762 }
4763
4764 #ifdef HAVE_LINUX_USRREGS
4765
4766 int
4767 register_addr (const struct usrregs_info *usrregs, int regnum)
4768 {
4769 int addr;
4770
4771 if (regnum < 0 || regnum >= usrregs->num_regs)
4772 error ("Invalid register number %d.", regnum);
4773
4774 addr = usrregs->regmap[regnum];
4775
4776 return addr;
4777 }
4778
4779 /* Fetch one register. */
4780 static void
4781 fetch_register (const struct usrregs_info *usrregs,
4782 struct regcache *regcache, int regno)
4783 {
4784 CORE_ADDR regaddr;
4785 int i, size;
4786 char *buf;
4787 int pid;
4788
4789 if (regno >= usrregs->num_regs)
4790 return;
4791 if ((*the_low_target.cannot_fetch_register) (regno))
4792 return;
4793
4794 regaddr = register_addr (usrregs, regno);
4795 if (regaddr == -1)
4796 return;
4797
4798 size = ((register_size (regcache->tdesc, regno)
4799 + sizeof (PTRACE_XFER_TYPE) - 1)
4800 & -sizeof (PTRACE_XFER_TYPE));
4801 buf = alloca (size);
4802
4803 pid = lwpid_of (current_thread);
4804 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4805 {
4806 errno = 0;
4807 *(PTRACE_XFER_TYPE *) (buf + i) =
4808 ptrace (PTRACE_PEEKUSER, pid,
4809 /* Coerce to a uintptr_t first to avoid potential gcc warning
4810 of coercing an 8 byte integer to a 4 byte pointer. */
4811 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4812 regaddr += sizeof (PTRACE_XFER_TYPE);
4813 if (errno != 0)
4814 error ("reading register %d: %s", regno, strerror (errno));
4815 }
4816
4817 if (the_low_target.supply_ptrace_register)
4818 the_low_target.supply_ptrace_register (regcache, regno, buf);
4819 else
4820 supply_register (regcache, regno, buf);
4821 }
4822
4823 /* Store one register. */
4824 static void
4825 store_register (const struct usrregs_info *usrregs,
4826 struct regcache *regcache, int regno)
4827 {
4828 CORE_ADDR regaddr;
4829 int i, size;
4830 char *buf;
4831 int pid;
4832
4833 if (regno >= usrregs->num_regs)
4834 return;
4835 if ((*the_low_target.cannot_store_register) (regno))
4836 return;
4837
4838 regaddr = register_addr (usrregs, regno);
4839 if (regaddr == -1)
4840 return;
4841
4842 size = ((register_size (regcache->tdesc, regno)
4843 + sizeof (PTRACE_XFER_TYPE) - 1)
4844 & -sizeof (PTRACE_XFER_TYPE));
4845 buf = alloca (size);
4846 memset (buf, 0, size);
4847
4848 if (the_low_target.collect_ptrace_register)
4849 the_low_target.collect_ptrace_register (regcache, regno, buf);
4850 else
4851 collect_register (regcache, regno, buf);
4852
4853 pid = lwpid_of (current_thread);
4854 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4855 {
4856 errno = 0;
4857 ptrace (PTRACE_POKEUSER, pid,
4858 /* Coerce to a uintptr_t first to avoid potential gcc warning
4859 about coercing an 8 byte integer to a 4 byte pointer. */
4860 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4861 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4862 if (errno != 0)
4863 {
4864 /* At this point, ESRCH should mean the process is
4865 already gone, in which case we simply ignore attempts
4866 to change its registers. See also the related
4867 comment in linux_resume_one_lwp. */
4868 if (errno == ESRCH)
4869 return;
4870
4871 if ((*the_low_target.cannot_store_register) (regno) == 0)
4872 error ("writing register %d: %s", regno, strerror (errno));
4873 }
4874 regaddr += sizeof (PTRACE_XFER_TYPE);
4875 }
4876 }
4877
4878 /* Fetch all registers, or just one, from the child process.
4879 If REGNO is -1, do this for all registers, skipping any that are
4880 assumed to have been retrieved by regsets_fetch_inferior_registers,
4881 unless ALL is non-zero.
4882 Otherwise, REGNO specifies which register (so we can save time). */
4883 static void
4884 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4885 struct regcache *regcache, int regno, int all)
4886 {
4887 struct usrregs_info *usr = regs_info->usrregs;
4888
4889 if (regno == -1)
4890 {
4891 for (regno = 0; regno < usr->num_regs; regno++)
4892 if (all || !linux_register_in_regsets (regs_info, regno))
4893 fetch_register (usr, regcache, regno);
4894 }
4895 else
4896 fetch_register (usr, regcache, regno);
4897 }
4898
4899 /* Store our register values back into the inferior.
4900 If REGNO is -1, do this for all registers, skipping any that are
4901 assumed to have been saved by regsets_store_inferior_registers,
4902 unless ALL is non-zero.
4903 Otherwise, REGNO specifies which register (so we can save time). */
4904 static void
4905 usr_store_inferior_registers (const struct regs_info *regs_info,
4906 struct regcache *regcache, int regno, int all)
4907 {
4908 struct usrregs_info *usr = regs_info->usrregs;
4909
4910 if (regno == -1)
4911 {
4912 for (regno = 0; regno < usr->num_regs; regno++)
4913 if (all || !linux_register_in_regsets (regs_info, regno))
4914 store_register (usr, regcache, regno);
4915 }
4916 else
4917 store_register (usr, regcache, regno);
4918 }
4919
4920 #else /* !HAVE_LINUX_USRREGS */
4921
4922 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4923 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4924
4925 #endif
4926
4927
4928 void
4929 linux_fetch_registers (struct regcache *regcache, int regno)
4930 {
4931 int use_regsets;
4932 int all = 0;
4933 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4934
4935 if (regno == -1)
4936 {
4937 if (the_low_target.fetch_register != NULL
4938 && regs_info->usrregs != NULL)
4939 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4940 (*the_low_target.fetch_register) (regcache, regno);
4941
4942 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4943 if (regs_info->usrregs != NULL)
4944 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4945 }
4946 else
4947 {
4948 if (the_low_target.fetch_register != NULL
4949 && (*the_low_target.fetch_register) (regcache, regno))
4950 return;
4951
4952 use_regsets = linux_register_in_regsets (regs_info, regno);
4953 if (use_regsets)
4954 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4955 regcache);
4956 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4957 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4958 }
4959 }
4960
4961 void
4962 linux_store_registers (struct regcache *regcache, int regno)
4963 {
4964 int use_regsets;
4965 int all = 0;
4966 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4967
4968 if (regno == -1)
4969 {
4970 all = regsets_store_inferior_registers (regs_info->regsets_info,
4971 regcache);
4972 if (regs_info->usrregs != NULL)
4973 usr_store_inferior_registers (regs_info, regcache, regno, all);
4974 }
4975 else
4976 {
4977 use_regsets = linux_register_in_regsets (regs_info, regno);
4978 if (use_regsets)
4979 all = regsets_store_inferior_registers (regs_info->regsets_info,
4980 regcache);
4981 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4982 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4983 }
4984 }
4985
4986
4987 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4988 to debugger memory starting at MYADDR. */
4989
4990 static int
4991 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4992 {
4993 int pid = lwpid_of (current_thread);
4994 register PTRACE_XFER_TYPE *buffer;
4995 register CORE_ADDR addr;
4996 register int count;
4997 char filename[64];
4998 register int i;
4999 int ret;
5000 int fd;
5001
5002 /* Try using /proc. Don't bother for one word. */
5003 if (len >= 3 * sizeof (long))
5004 {
5005 int bytes;
5006
5007 /* We could keep this file open and cache it - possibly one per
5008 thread. That requires some juggling, but is even faster. */
5009 sprintf (filename, "/proc/%d/mem", pid);
5010 fd = open (filename, O_RDONLY | O_LARGEFILE);
5011 if (fd == -1)
5012 goto no_proc;
5013
5014 /* If pread64 is available, use it. It's faster if the kernel
5015 supports it (only one syscall), and it's 64-bit safe even on
5016 32-bit platforms (for instance, SPARC debugging a SPARC64
5017 application). */
5018 #ifdef HAVE_PREAD64
5019 bytes = pread64 (fd, myaddr, len, memaddr);
5020 #else
5021 bytes = -1;
5022 if (lseek (fd, memaddr, SEEK_SET) != -1)
5023 bytes = read (fd, myaddr, len);
5024 #endif
5025
5026 close (fd);
5027 if (bytes == len)
5028 return 0;
5029
5030 /* Some data was read, we'll try to get the rest with ptrace. */
5031 if (bytes > 0)
5032 {
5033 memaddr += bytes;
5034 myaddr += bytes;
5035 len -= bytes;
5036 }
5037 }
5038
5039 no_proc:
5040 /* Round starting address down to longword boundary. */
5041 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5042 /* Round ending address up; get number of longwords that makes. */
5043 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5044 / sizeof (PTRACE_XFER_TYPE));
5045 /* Allocate buffer of that many longwords. */
5046 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5047
5048 /* Read all the longwords */
5049 errno = 0;
5050 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5051 {
5052 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5053 about coercing an 8 byte integer to a 4 byte pointer. */
5054 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5055 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5056 (PTRACE_TYPE_ARG4) 0);
5057 if (errno)
5058 break;
5059 }
5060 ret = errno;
5061
5062 /* Copy appropriate bytes out of the buffer. */
5063 if (i > 0)
5064 {
5065 i *= sizeof (PTRACE_XFER_TYPE);
5066 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5067 memcpy (myaddr,
5068 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5069 i < len ? i : len);
5070 }
5071
5072 return ret;
5073 }
5074
5075 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5076 memory at MEMADDR. On failure (cannot write to the inferior)
5077 returns the value of errno. Always succeeds if LEN is zero. */
5078
5079 static int
5080 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5081 {
5082 register int i;
5083 /* Round starting address down to longword boundary. */
5084 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5085 /* Round ending address up; get number of longwords that makes. */
5086 register int count
5087 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5088 / sizeof (PTRACE_XFER_TYPE);
5089
5090 /* Allocate buffer of that many longwords. */
5091 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5092 alloca (count * sizeof (PTRACE_XFER_TYPE));
5093
5094 int pid = lwpid_of (current_thread);
5095
5096 if (len == 0)
5097 {
5098 /* Zero length write always succeeds. */
5099 return 0;
5100 }
5101
5102 if (debug_threads)
5103 {
5104 /* Dump up to four bytes. */
5105 unsigned int val = * (unsigned int *) myaddr;
5106 if (len == 1)
5107 val = val & 0xff;
5108 else if (len == 2)
5109 val = val & 0xffff;
5110 else if (len == 3)
5111 val = val & 0xffffff;
5112 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5113 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5114 }
5115
5116 /* Fill start and end extra bytes of buffer with existing memory data. */
5117
5118 errno = 0;
5119 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5120 about coercing an 8 byte integer to a 4 byte pointer. */
5121 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5122 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5123 (PTRACE_TYPE_ARG4) 0);
5124 if (errno)
5125 return errno;
5126
5127 if (count > 1)
5128 {
5129 errno = 0;
5130 buffer[count - 1]
5131 = ptrace (PTRACE_PEEKTEXT, pid,
5132 /* Coerce to a uintptr_t first to avoid potential gcc warning
5133 about coercing an 8 byte integer to a 4 byte pointer. */
5134 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5135 * sizeof (PTRACE_XFER_TYPE)),
5136 (PTRACE_TYPE_ARG4) 0);
5137 if (errno)
5138 return errno;
5139 }
5140
5141 /* Copy data to be written over corresponding part of buffer. */
5142
5143 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5144 myaddr, len);
5145
5146 /* Write the entire buffer. */
5147
5148 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5149 {
5150 errno = 0;
5151 ptrace (PTRACE_POKETEXT, pid,
5152 /* Coerce to a uintptr_t first to avoid potential gcc warning
5153 about coercing an 8 byte integer to a 4 byte pointer. */
5154 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5155 (PTRACE_TYPE_ARG4) buffer[i]);
5156 if (errno)
5157 return errno;
5158 }
5159
5160 return 0;
5161 }
5162
5163 static void
5164 linux_look_up_symbols (void)
5165 {
5166 #ifdef USE_THREAD_DB
5167 struct process_info *proc = current_process ();
5168
5169 if (proc->priv->thread_db != NULL)
5170 return;
5171
5172 /* If the kernel supports tracing clones, then we don't need to
5173 use the magic thread event breakpoint to learn about
5174 threads. */
5175 thread_db_init (!linux_supports_traceclone ());
5176 #endif
5177 }
5178
5179 static void
5180 linux_request_interrupt (void)
5181 {
5182 extern unsigned long signal_pid;
5183
5184 /* Send a SIGINT to the process group. This acts just like the user
5185 typed a ^C on the controlling terminal. */
5186 kill (-signal_pid, SIGINT);
5187 }
5188
5189 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5190 to debugger memory starting at MYADDR. */
5191
5192 static int
5193 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5194 {
5195 char filename[PATH_MAX];
5196 int fd, n;
5197 int pid = lwpid_of (current_thread);
5198
5199 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5200
5201 fd = open (filename, O_RDONLY);
5202 if (fd < 0)
5203 return -1;
5204
5205 if (offset != (CORE_ADDR) 0
5206 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5207 n = -1;
5208 else
5209 n = read (fd, myaddr, len);
5210
5211 close (fd);
5212
5213 return n;
5214 }
5215
5216 /* These breakpoint and watchpoint related wrapper functions simply
5217 pass on the function call if the target has registered a
5218 corresponding function. */
5219
5220 static int
5221 linux_supports_z_point_type (char z_type)
5222 {
5223 return (the_low_target.supports_z_point_type != NULL
5224 && the_low_target.supports_z_point_type (z_type));
5225 }
5226
5227 static int
5228 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5229 int size, struct raw_breakpoint *bp)
5230 {
5231 if (type == raw_bkpt_type_sw)
5232 return insert_memory_breakpoint (bp);
5233 else if (the_low_target.insert_point != NULL)
5234 return the_low_target.insert_point (type, addr, size, bp);
5235 else
5236 /* Unsupported (see target.h). */
5237 return 1;
5238 }
5239
5240 static int
5241 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5242 int size, struct raw_breakpoint *bp)
5243 {
5244 if (type == raw_bkpt_type_sw)
5245 return remove_memory_breakpoint (bp);
5246 else if (the_low_target.remove_point != NULL)
5247 return the_low_target.remove_point (type, addr, size, bp);
5248 else
5249 /* Unsupported (see target.h). */
5250 return 1;
5251 }
5252
5253 /* Implement the to_stopped_by_sw_breakpoint target_ops
5254 method. */
5255
5256 static int
5257 linux_stopped_by_sw_breakpoint (void)
5258 {
5259 struct lwp_info *lwp = get_thread_lwp (current_thread);
5260
5261 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5262 }
5263
5264 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5265 method. */
5266
5267 static int
5268 linux_supports_stopped_by_sw_breakpoint (void)
5269 {
5270 return USE_SIGTRAP_SIGINFO;
5271 }
5272
5273 /* Implement the to_stopped_by_hw_breakpoint target_ops
5274 method. */
5275
5276 static int
5277 linux_stopped_by_hw_breakpoint (void)
5278 {
5279 struct lwp_info *lwp = get_thread_lwp (current_thread);
5280
5281 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5282 }
5283
5284 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5285 method. */
5286
5287 static int
5288 linux_supports_stopped_by_hw_breakpoint (void)
5289 {
5290 return USE_SIGTRAP_SIGINFO;
5291 }
5292
5293 /* Implement the supports_conditional_breakpoints target_ops
5294 method. */
5295
5296 static int
5297 linux_supports_conditional_breakpoints (void)
5298 {
5299 /* GDBserver needs to step over the breakpoint if the condition is
5300 false. GDBserver software single step is too simple, so disable
5301 conditional breakpoints if the target doesn't have hardware single
5302 step. */
5303 return can_hardware_single_step ();
5304 }
5305
5306 static int
5307 linux_stopped_by_watchpoint (void)
5308 {
5309 struct lwp_info *lwp = get_thread_lwp (current_thread);
5310
5311 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5312 }
5313
5314 static CORE_ADDR
5315 linux_stopped_data_address (void)
5316 {
5317 struct lwp_info *lwp = get_thread_lwp (current_thread);
5318
5319 return lwp->stopped_data_address;
5320 }
5321
5322 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5323 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5324 && defined(PT_TEXT_END_ADDR)
5325
5326 /* This is only used for targets that define PT_TEXT_ADDR,
5327 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5328 the target has different ways of acquiring this information, like
5329 loadmaps. */
5330
5331 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5332 to tell gdb about. */
5333
5334 static int
5335 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5336 {
5337 unsigned long text, text_end, data;
5338 int pid = lwpid_of (current_thread);
5339
5340 errno = 0;
5341
5342 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5343 (PTRACE_TYPE_ARG4) 0);
5344 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5345 (PTRACE_TYPE_ARG4) 0);
5346 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5347 (PTRACE_TYPE_ARG4) 0);
5348
5349 if (errno == 0)
5350 {
5351 /* Both text and data offsets produced at compile-time (and so
5352 used by gdb) are relative to the beginning of the program,
5353 with the data segment immediately following the text segment.
5354 However, the actual runtime layout in memory may put the data
5355 somewhere else, so when we send gdb a data base-address, we
5356 use the real data base address and subtract the compile-time
5357 data base-address from it (which is just the length of the
5358 text segment). BSS immediately follows data in both
5359 cases. */
5360 *text_p = text;
5361 *data_p = data - (text_end - text);
5362
5363 return 1;
5364 }
5365 return 0;
5366 }
5367 #endif
5368
5369 static int
5370 linux_qxfer_osdata (const char *annex,
5371 unsigned char *readbuf, unsigned const char *writebuf,
5372 CORE_ADDR offset, int len)
5373 {
5374 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5375 }
5376
5377 /* Convert a native/host siginfo object, into/from the siginfo in the
5378 layout of the inferiors' architecture. */
5379
5380 static void
5381 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5382 {
5383 int done = 0;
5384
5385 if (the_low_target.siginfo_fixup != NULL)
5386 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5387
5388 /* If there was no callback, or the callback didn't do anything,
5389 then just do a straight memcpy. */
5390 if (!done)
5391 {
5392 if (direction == 1)
5393 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5394 else
5395 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5396 }
5397 }
5398
5399 static int
5400 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5401 unsigned const char *writebuf, CORE_ADDR offset, int len)
5402 {
5403 int pid;
5404 siginfo_t siginfo;
5405 char inf_siginfo[sizeof (siginfo_t)];
5406
5407 if (current_thread == NULL)
5408 return -1;
5409
5410 pid = lwpid_of (current_thread);
5411
5412 if (debug_threads)
5413 debug_printf ("%s siginfo for lwp %d.\n",
5414 readbuf != NULL ? "Reading" : "Writing",
5415 pid);
5416
5417 if (offset >= sizeof (siginfo))
5418 return -1;
5419
5420 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5421 return -1;
5422
5423 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5424 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5425 inferior with a 64-bit GDBSERVER should look the same as debugging it
5426 with a 32-bit GDBSERVER, we need to convert it. */
5427 siginfo_fixup (&siginfo, inf_siginfo, 0);
5428
5429 if (offset + len > sizeof (siginfo))
5430 len = sizeof (siginfo) - offset;
5431
5432 if (readbuf != NULL)
5433 memcpy (readbuf, inf_siginfo + offset, len);
5434 else
5435 {
5436 memcpy (inf_siginfo + offset, writebuf, len);
5437
5438 /* Convert back to ptrace layout before flushing it out. */
5439 siginfo_fixup (&siginfo, inf_siginfo, 1);
5440
5441 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5442 return -1;
5443 }
5444
5445 return len;
5446 }
5447
5448 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5449 so we notice when children change state; as the handler for the
5450 sigsuspend in my_waitpid. */
5451
5452 static void
5453 sigchld_handler (int signo)
5454 {
5455 int old_errno = errno;
5456
5457 if (debug_threads)
5458 {
5459 do
5460 {
5461 /* fprintf is not async-signal-safe, so call write
5462 directly. */
5463 if (write (2, "sigchld_handler\n",
5464 sizeof ("sigchld_handler\n") - 1) < 0)
5465 break; /* just ignore */
5466 } while (0);
5467 }
5468
5469 if (target_is_async_p ())
5470 async_file_mark (); /* trigger a linux_wait */
5471
5472 errno = old_errno;
5473 }
5474
5475 static int
5476 linux_supports_non_stop (void)
5477 {
5478 return 1;
5479 }
5480
5481 static int
5482 linux_async (int enable)
5483 {
5484 int previous = target_is_async_p ();
5485
5486 if (debug_threads)
5487 debug_printf ("linux_async (%d), previous=%d\n",
5488 enable, previous);
5489
5490 if (previous != enable)
5491 {
5492 sigset_t mask;
5493 sigemptyset (&mask);
5494 sigaddset (&mask, SIGCHLD);
5495
5496 sigprocmask (SIG_BLOCK, &mask, NULL);
5497
5498 if (enable)
5499 {
5500 if (pipe (linux_event_pipe) == -1)
5501 {
5502 linux_event_pipe[0] = -1;
5503 linux_event_pipe[1] = -1;
5504 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5505
5506 warning ("creating event pipe failed.");
5507 return previous;
5508 }
5509
5510 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5511 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5512
5513 /* Register the event loop handler. */
5514 add_file_handler (linux_event_pipe[0],
5515 handle_target_event, NULL);
5516
5517 /* Always trigger a linux_wait. */
5518 async_file_mark ();
5519 }
5520 else
5521 {
5522 delete_file_handler (linux_event_pipe[0]);
5523
5524 close (linux_event_pipe[0]);
5525 close (linux_event_pipe[1]);
5526 linux_event_pipe[0] = -1;
5527 linux_event_pipe[1] = -1;
5528 }
5529
5530 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5531 }
5532
5533 return previous;
5534 }
5535
5536 static int
5537 linux_start_non_stop (int nonstop)
5538 {
5539 /* Register or unregister from event-loop accordingly. */
5540 linux_async (nonstop);
5541
5542 if (target_is_async_p () != (nonstop != 0))
5543 return -1;
5544
5545 return 0;
5546 }
5547
5548 static int
5549 linux_supports_multi_process (void)
5550 {
5551 return 1;
5552 }
5553
5554 /* Check if fork events are supported. */
5555
5556 static int
5557 linux_supports_fork_events (void)
5558 {
5559 return linux_supports_tracefork ();
5560 }
5561
5562 /* Check if vfork events are supported. */
5563
5564 static int
5565 linux_supports_vfork_events (void)
5566 {
5567 return linux_supports_tracefork ();
5568 }
5569
5570 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5571 options for the specified lwp. */
5572
5573 static int
5574 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5575 void *args)
5576 {
5577 struct thread_info *thread = (struct thread_info *) entry;
5578 struct lwp_info *lwp = get_thread_lwp (thread);
5579
5580 if (!lwp->stopped)
5581 {
5582 /* Stop the lwp so we can modify its ptrace options. */
5583 lwp->must_set_ptrace_flags = 1;
5584 linux_stop_lwp (lwp);
5585 }
5586 else
5587 {
5588 /* Already stopped; go ahead and set the ptrace options. */
5589 struct process_info *proc = find_process_pid (pid_of (thread));
5590 int options = linux_low_ptrace_options (proc->attached);
5591
5592 linux_enable_event_reporting (lwpid_of (thread), options);
5593 lwp->must_set_ptrace_flags = 0;
5594 }
5595
5596 return 0;
5597 }
5598
5599 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5600 ptrace flags for all inferiors. This is in case the new GDB connection
5601 doesn't support the same set of events that the previous one did. */
5602
5603 static void
5604 linux_handle_new_gdb_connection (void)
5605 {
5606 pid_t pid;
5607
5608 /* Request that all the lwps reset their ptrace options. */
5609 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5610 }
5611
5612 static int
5613 linux_supports_disable_randomization (void)
5614 {
5615 #ifdef HAVE_PERSONALITY
5616 return 1;
5617 #else
5618 return 0;
5619 #endif
5620 }
5621
5622 static int
5623 linux_supports_agent (void)
5624 {
5625 return 1;
5626 }
5627
5628 static int
5629 linux_supports_range_stepping (void)
5630 {
5631 if (*the_low_target.supports_range_stepping == NULL)
5632 return 0;
5633
5634 return (*the_low_target.supports_range_stepping) ();
5635 }
5636
5637 /* Enumerate spufs IDs for process PID. */
5638 static int
5639 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5640 {
5641 int pos = 0;
5642 int written = 0;
5643 char path[128];
5644 DIR *dir;
5645 struct dirent *entry;
5646
5647 sprintf (path, "/proc/%ld/fd", pid);
5648 dir = opendir (path);
5649 if (!dir)
5650 return -1;
5651
5652 rewinddir (dir);
5653 while ((entry = readdir (dir)) != NULL)
5654 {
5655 struct stat st;
5656 struct statfs stfs;
5657 int fd;
5658
5659 fd = atoi (entry->d_name);
5660 if (!fd)
5661 continue;
5662
5663 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5664 if (stat (path, &st) != 0)
5665 continue;
5666 if (!S_ISDIR (st.st_mode))
5667 continue;
5668
5669 if (statfs (path, &stfs) != 0)
5670 continue;
5671 if (stfs.f_type != SPUFS_MAGIC)
5672 continue;
5673
5674 if (pos >= offset && pos + 4 <= offset + len)
5675 {
5676 *(unsigned int *)(buf + pos - offset) = fd;
5677 written += 4;
5678 }
5679 pos += 4;
5680 }
5681
5682 closedir (dir);
5683 return written;
5684 }
5685
5686 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5687 object type, using the /proc file system. */
5688 static int
5689 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5690 unsigned const char *writebuf,
5691 CORE_ADDR offset, int len)
5692 {
5693 long pid = lwpid_of (current_thread);
5694 char buf[128];
5695 int fd = 0;
5696 int ret = 0;
5697
5698 if (!writebuf && !readbuf)
5699 return -1;
5700
5701 if (!*annex)
5702 {
5703 if (!readbuf)
5704 return -1;
5705 else
5706 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5707 }
5708
5709 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5710 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5711 if (fd <= 0)
5712 return -1;
5713
5714 if (offset != 0
5715 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5716 {
5717 close (fd);
5718 return 0;
5719 }
5720
5721 if (writebuf)
5722 ret = write (fd, writebuf, (size_t) len);
5723 else
5724 ret = read (fd, readbuf, (size_t) len);
5725
5726 close (fd);
5727 return ret;
5728 }
5729
5730 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5731 struct target_loadseg
5732 {
5733 /* Core address to which the segment is mapped. */
5734 Elf32_Addr addr;
5735 /* VMA recorded in the program header. */
5736 Elf32_Addr p_vaddr;
5737 /* Size of this segment in memory. */
5738 Elf32_Word p_memsz;
5739 };
5740
5741 # if defined PT_GETDSBT
5742 struct target_loadmap
5743 {
5744 /* Protocol version number, must be zero. */
5745 Elf32_Word version;
5746 /* Pointer to the DSBT table, its size, and the DSBT index. */
5747 unsigned *dsbt_table;
5748 unsigned dsbt_size, dsbt_index;
5749 /* Number of segments in this map. */
5750 Elf32_Word nsegs;
5751 /* The actual memory map. */
5752 struct target_loadseg segs[/*nsegs*/];
5753 };
5754 # define LINUX_LOADMAP PT_GETDSBT
5755 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5756 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5757 # else
5758 struct target_loadmap
5759 {
5760 /* Protocol version number, must be zero. */
5761 Elf32_Half version;
5762 /* Number of segments in this map. */
5763 Elf32_Half nsegs;
5764 /* The actual memory map. */
5765 struct target_loadseg segs[/*nsegs*/];
5766 };
5767 # define LINUX_LOADMAP PTRACE_GETFDPIC
5768 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5769 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5770 # endif
5771
5772 static int
5773 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5774 unsigned char *myaddr, unsigned int len)
5775 {
5776 int pid = lwpid_of (current_thread);
5777 int addr = -1;
5778 struct target_loadmap *data = NULL;
5779 unsigned int actual_length, copy_length;
5780
5781 if (strcmp (annex, "exec") == 0)
5782 addr = (int) LINUX_LOADMAP_EXEC;
5783 else if (strcmp (annex, "interp") == 0)
5784 addr = (int) LINUX_LOADMAP_INTERP;
5785 else
5786 return -1;
5787
5788 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5789 return -1;
5790
5791 if (data == NULL)
5792 return -1;
5793
5794 actual_length = sizeof (struct target_loadmap)
5795 + sizeof (struct target_loadseg) * data->nsegs;
5796
5797 if (offset < 0 || offset > actual_length)
5798 return -1;
5799
5800 copy_length = actual_length - offset < len ? actual_length - offset : len;
5801 memcpy (myaddr, (char *) data + offset, copy_length);
5802 return copy_length;
5803 }
5804 #else
5805 # define linux_read_loadmap NULL
5806 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5807
5808 static void
5809 linux_process_qsupported (const char *query)
5810 {
5811 if (the_low_target.process_qsupported != NULL)
5812 the_low_target.process_qsupported (query);
5813 }
5814
5815 static int
5816 linux_supports_tracepoints (void)
5817 {
5818 if (*the_low_target.supports_tracepoints == NULL)
5819 return 0;
5820
5821 return (*the_low_target.supports_tracepoints) ();
5822 }
5823
5824 static CORE_ADDR
5825 linux_read_pc (struct regcache *regcache)
5826 {
5827 if (the_low_target.get_pc == NULL)
5828 return 0;
5829
5830 return (*the_low_target.get_pc) (regcache);
5831 }
5832
5833 static void
5834 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5835 {
5836 gdb_assert (the_low_target.set_pc != NULL);
5837
5838 (*the_low_target.set_pc) (regcache, pc);
5839 }
5840
5841 static int
5842 linux_thread_stopped (struct thread_info *thread)
5843 {
5844 return get_thread_lwp (thread)->stopped;
5845 }
5846
5847 /* This exposes stop-all-threads functionality to other modules. */
5848
5849 static void
5850 linux_pause_all (int freeze)
5851 {
5852 stop_all_lwps (freeze, NULL);
5853 }
5854
5855 /* This exposes unstop-all-threads functionality to other gdbserver
5856 modules. */
5857
5858 static void
5859 linux_unpause_all (int unfreeze)
5860 {
5861 unstop_all_lwps (unfreeze, NULL);
5862 }
5863
5864 static int
5865 linux_prepare_to_access_memory (void)
5866 {
5867 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5868 running LWP. */
5869 if (non_stop)
5870 linux_pause_all (1);
5871 return 0;
5872 }
5873
5874 static void
5875 linux_done_accessing_memory (void)
5876 {
5877 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5878 running LWP. */
5879 if (non_stop)
5880 linux_unpause_all (1);
5881 }
5882
5883 static int
5884 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5885 CORE_ADDR collector,
5886 CORE_ADDR lockaddr,
5887 ULONGEST orig_size,
5888 CORE_ADDR *jump_entry,
5889 CORE_ADDR *trampoline,
5890 ULONGEST *trampoline_size,
5891 unsigned char *jjump_pad_insn,
5892 ULONGEST *jjump_pad_insn_size,
5893 CORE_ADDR *adjusted_insn_addr,
5894 CORE_ADDR *adjusted_insn_addr_end,
5895 char *err)
5896 {
5897 return (*the_low_target.install_fast_tracepoint_jump_pad)
5898 (tpoint, tpaddr, collector, lockaddr, orig_size,
5899 jump_entry, trampoline, trampoline_size,
5900 jjump_pad_insn, jjump_pad_insn_size,
5901 adjusted_insn_addr, adjusted_insn_addr_end,
5902 err);
5903 }
5904
5905 static struct emit_ops *
5906 linux_emit_ops (void)
5907 {
5908 if (the_low_target.emit_ops != NULL)
5909 return (*the_low_target.emit_ops) ();
5910 else
5911 return NULL;
5912 }
5913
5914 static int
5915 linux_get_min_fast_tracepoint_insn_len (void)
5916 {
5917 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5918 }
5919
5920 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5921
5922 static int
5923 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5924 CORE_ADDR *phdr_memaddr, int *num_phdr)
5925 {
5926 char filename[PATH_MAX];
5927 int fd;
5928 const int auxv_size = is_elf64
5929 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5930 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5931
5932 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5933
5934 fd = open (filename, O_RDONLY);
5935 if (fd < 0)
5936 return 1;
5937
5938 *phdr_memaddr = 0;
5939 *num_phdr = 0;
5940 while (read (fd, buf, auxv_size) == auxv_size
5941 && (*phdr_memaddr == 0 || *num_phdr == 0))
5942 {
5943 if (is_elf64)
5944 {
5945 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5946
5947 switch (aux->a_type)
5948 {
5949 case AT_PHDR:
5950 *phdr_memaddr = aux->a_un.a_val;
5951 break;
5952 case AT_PHNUM:
5953 *num_phdr = aux->a_un.a_val;
5954 break;
5955 }
5956 }
5957 else
5958 {
5959 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5960
5961 switch (aux->a_type)
5962 {
5963 case AT_PHDR:
5964 *phdr_memaddr = aux->a_un.a_val;
5965 break;
5966 case AT_PHNUM:
5967 *num_phdr = aux->a_un.a_val;
5968 break;
5969 }
5970 }
5971 }
5972
5973 close (fd);
5974
5975 if (*phdr_memaddr == 0 || *num_phdr == 0)
5976 {
5977 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5978 "phdr_memaddr = %ld, phdr_num = %d",
5979 (long) *phdr_memaddr, *num_phdr);
5980 return 2;
5981 }
5982
5983 return 0;
5984 }
5985
5986 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5987
5988 static CORE_ADDR
5989 get_dynamic (const int pid, const int is_elf64)
5990 {
5991 CORE_ADDR phdr_memaddr, relocation;
5992 int num_phdr, i;
5993 unsigned char *phdr_buf;
5994 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5995
5996 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5997 return 0;
5998
5999 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6000 phdr_buf = alloca (num_phdr * phdr_size);
6001
6002 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6003 return 0;
6004
6005 /* Compute relocation: it is expected to be 0 for "regular" executables,
6006 non-zero for PIE ones. */
6007 relocation = -1;
6008 for (i = 0; relocation == -1 && i < num_phdr; i++)
6009 if (is_elf64)
6010 {
6011 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6012
6013 if (p->p_type == PT_PHDR)
6014 relocation = phdr_memaddr - p->p_vaddr;
6015 }
6016 else
6017 {
6018 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6019
6020 if (p->p_type == PT_PHDR)
6021 relocation = phdr_memaddr - p->p_vaddr;
6022 }
6023
6024 if (relocation == -1)
6025 {
6026 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6027 any real world executables, including PIE executables, have always
6028 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6029 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6030 or present DT_DEBUG anyway (fpc binaries are statically linked).
6031
6032 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6033
6034 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6035
6036 return 0;
6037 }
6038
6039 for (i = 0; i < num_phdr; i++)
6040 {
6041 if (is_elf64)
6042 {
6043 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6044
6045 if (p->p_type == PT_DYNAMIC)
6046 return p->p_vaddr + relocation;
6047 }
6048 else
6049 {
6050 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6051
6052 if (p->p_type == PT_DYNAMIC)
6053 return p->p_vaddr + relocation;
6054 }
6055 }
6056
6057 return 0;
6058 }
6059
6060 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6061 can be 0 if the inferior does not yet have the library list initialized.
6062 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6063 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6064
6065 static CORE_ADDR
6066 get_r_debug (const int pid, const int is_elf64)
6067 {
6068 CORE_ADDR dynamic_memaddr;
6069 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6070 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6071 CORE_ADDR map = -1;
6072
6073 dynamic_memaddr = get_dynamic (pid, is_elf64);
6074 if (dynamic_memaddr == 0)
6075 return map;
6076
6077 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6078 {
6079 if (is_elf64)
6080 {
6081 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6082 #ifdef DT_MIPS_RLD_MAP
6083 union
6084 {
6085 Elf64_Xword map;
6086 unsigned char buf[sizeof (Elf64_Xword)];
6087 }
6088 rld_map;
6089
6090 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6091 {
6092 if (linux_read_memory (dyn->d_un.d_val,
6093 rld_map.buf, sizeof (rld_map.buf)) == 0)
6094 return rld_map.map;
6095 else
6096 break;
6097 }
6098 #endif /* DT_MIPS_RLD_MAP */
6099
6100 if (dyn->d_tag == DT_DEBUG && map == -1)
6101 map = dyn->d_un.d_val;
6102
6103 if (dyn->d_tag == DT_NULL)
6104 break;
6105 }
6106 else
6107 {
6108 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6109 #ifdef DT_MIPS_RLD_MAP
6110 union
6111 {
6112 Elf32_Word map;
6113 unsigned char buf[sizeof (Elf32_Word)];
6114 }
6115 rld_map;
6116
6117 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6118 {
6119 if (linux_read_memory (dyn->d_un.d_val,
6120 rld_map.buf, sizeof (rld_map.buf)) == 0)
6121 return rld_map.map;
6122 else
6123 break;
6124 }
6125 #endif /* DT_MIPS_RLD_MAP */
6126
6127 if (dyn->d_tag == DT_DEBUG && map == -1)
6128 map = dyn->d_un.d_val;
6129
6130 if (dyn->d_tag == DT_NULL)
6131 break;
6132 }
6133
6134 dynamic_memaddr += dyn_size;
6135 }
6136
6137 return map;
6138 }
6139
6140 /* Read one pointer from MEMADDR in the inferior. */
6141
6142 static int
6143 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6144 {
6145 int ret;
6146
6147 /* Go through a union so this works on either big or little endian
6148 hosts, when the inferior's pointer size is smaller than the size
6149 of CORE_ADDR. It is assumed the inferior's endianness is the
6150 same of the superior's. */
6151 union
6152 {
6153 CORE_ADDR core_addr;
6154 unsigned int ui;
6155 unsigned char uc;
6156 } addr;
6157
6158 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6159 if (ret == 0)
6160 {
6161 if (ptr_size == sizeof (CORE_ADDR))
6162 *ptr = addr.core_addr;
6163 else if (ptr_size == sizeof (unsigned int))
6164 *ptr = addr.ui;
6165 else
6166 gdb_assert_not_reached ("unhandled pointer size");
6167 }
6168 return ret;
6169 }
6170
6171 struct link_map_offsets
6172 {
6173 /* Offset and size of r_debug.r_version. */
6174 int r_version_offset;
6175
6176 /* Offset and size of r_debug.r_map. */
6177 int r_map_offset;
6178
6179 /* Offset to l_addr field in struct link_map. */
6180 int l_addr_offset;
6181
6182 /* Offset to l_name field in struct link_map. */
6183 int l_name_offset;
6184
6185 /* Offset to l_ld field in struct link_map. */
6186 int l_ld_offset;
6187
6188 /* Offset to l_next field in struct link_map. */
6189 int l_next_offset;
6190
6191 /* Offset to l_prev field in struct link_map. */
6192 int l_prev_offset;
6193 };
6194
6195 /* Construct qXfer:libraries-svr4:read reply. */
6196
6197 static int
6198 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6199 unsigned const char *writebuf,
6200 CORE_ADDR offset, int len)
6201 {
6202 char *document;
6203 unsigned document_len;
6204 struct process_info_private *const priv = current_process ()->priv;
6205 char filename[PATH_MAX];
6206 int pid, is_elf64;
6207
6208 static const struct link_map_offsets lmo_32bit_offsets =
6209 {
6210 0, /* r_version offset. */
6211 4, /* r_debug.r_map offset. */
6212 0, /* l_addr offset in link_map. */
6213 4, /* l_name offset in link_map. */
6214 8, /* l_ld offset in link_map. */
6215 12, /* l_next offset in link_map. */
6216 16 /* l_prev offset in link_map. */
6217 };
6218
6219 static const struct link_map_offsets lmo_64bit_offsets =
6220 {
6221 0, /* r_version offset. */
6222 8, /* r_debug.r_map offset. */
6223 0, /* l_addr offset in link_map. */
6224 8, /* l_name offset in link_map. */
6225 16, /* l_ld offset in link_map. */
6226 24, /* l_next offset in link_map. */
6227 32 /* l_prev offset in link_map. */
6228 };
6229 const struct link_map_offsets *lmo;
6230 unsigned int machine;
6231 int ptr_size;
6232 CORE_ADDR lm_addr = 0, lm_prev = 0;
6233 int allocated = 1024;
6234 char *p;
6235 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6236 int header_done = 0;
6237
6238 if (writebuf != NULL)
6239 return -2;
6240 if (readbuf == NULL)
6241 return -1;
6242
6243 pid = lwpid_of (current_thread);
6244 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6245 is_elf64 = elf_64_file_p (filename, &machine);
6246 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6247 ptr_size = is_elf64 ? 8 : 4;
6248
6249 while (annex[0] != '\0')
6250 {
6251 const char *sep;
6252 CORE_ADDR *addrp;
6253 int len;
6254
6255 sep = strchr (annex, '=');
6256 if (sep == NULL)
6257 break;
6258
6259 len = sep - annex;
6260 if (len == 5 && startswith (annex, "start"))
6261 addrp = &lm_addr;
6262 else if (len == 4 && startswith (annex, "prev"))
6263 addrp = &lm_prev;
6264 else
6265 {
6266 annex = strchr (sep, ';');
6267 if (annex == NULL)
6268 break;
6269 annex++;
6270 continue;
6271 }
6272
6273 annex = decode_address_to_semicolon (addrp, sep + 1);
6274 }
6275
6276 if (lm_addr == 0)
6277 {
6278 int r_version = 0;
6279
6280 if (priv->r_debug == 0)
6281 priv->r_debug = get_r_debug (pid, is_elf64);
6282
6283 /* We failed to find DT_DEBUG. Such situation will not change
6284 for this inferior - do not retry it. Report it to GDB as
6285 E01, see for the reasons at the GDB solib-svr4.c side. */
6286 if (priv->r_debug == (CORE_ADDR) -1)
6287 return -1;
6288
6289 if (priv->r_debug != 0)
6290 {
6291 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6292 (unsigned char *) &r_version,
6293 sizeof (r_version)) != 0
6294 || r_version != 1)
6295 {
6296 warning ("unexpected r_debug version %d", r_version);
6297 }
6298 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6299 &lm_addr, ptr_size) != 0)
6300 {
6301 warning ("unable to read r_map from 0x%lx",
6302 (long) priv->r_debug + lmo->r_map_offset);
6303 }
6304 }
6305 }
6306
6307 document = xmalloc (allocated);
6308 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6309 p = document + strlen (document);
6310
6311 while (lm_addr
6312 && read_one_ptr (lm_addr + lmo->l_name_offset,
6313 &l_name, ptr_size) == 0
6314 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6315 &l_addr, ptr_size) == 0
6316 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6317 &l_ld, ptr_size) == 0
6318 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6319 &l_prev, ptr_size) == 0
6320 && read_one_ptr (lm_addr + lmo->l_next_offset,
6321 &l_next, ptr_size) == 0)
6322 {
6323 unsigned char libname[PATH_MAX];
6324
6325 if (lm_prev != l_prev)
6326 {
6327 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6328 (long) lm_prev, (long) l_prev);
6329 break;
6330 }
6331
6332 /* Ignore the first entry even if it has valid name as the first entry
6333 corresponds to the main executable. The first entry should not be
6334 skipped if the dynamic loader was loaded late by a static executable
6335 (see solib-svr4.c parameter ignore_first). But in such case the main
6336 executable does not have PT_DYNAMIC present and this function already
6337 exited above due to failed get_r_debug. */
6338 if (lm_prev == 0)
6339 {
6340 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6341 p = p + strlen (p);
6342 }
6343 else
6344 {
6345 /* Not checking for error because reading may stop before
6346 we've got PATH_MAX worth of characters. */
6347 libname[0] = '\0';
6348 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6349 libname[sizeof (libname) - 1] = '\0';
6350 if (libname[0] != '\0')
6351 {
6352 /* 6x the size for xml_escape_text below. */
6353 size_t len = 6 * strlen ((char *) libname);
6354 char *name;
6355
6356 if (!header_done)
6357 {
6358 /* Terminate `<library-list-svr4'. */
6359 *p++ = '>';
6360 header_done = 1;
6361 }
6362
6363 while (allocated < p - document + len + 200)
6364 {
6365 /* Expand to guarantee sufficient storage. */
6366 uintptr_t document_len = p - document;
6367
6368 document = xrealloc (document, 2 * allocated);
6369 allocated *= 2;
6370 p = document + document_len;
6371 }
6372
6373 name = xml_escape_text ((char *) libname);
6374 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6375 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6376 name, (unsigned long) lm_addr,
6377 (unsigned long) l_addr, (unsigned long) l_ld);
6378 free (name);
6379 }
6380 }
6381
6382 lm_prev = lm_addr;
6383 lm_addr = l_next;
6384 }
6385
6386 if (!header_done)
6387 {
6388 /* Empty list; terminate `<library-list-svr4'. */
6389 strcpy (p, "/>");
6390 }
6391 else
6392 strcpy (p, "</library-list-svr4>");
6393
6394 document_len = strlen (document);
6395 if (offset < document_len)
6396 document_len -= offset;
6397 else
6398 document_len = 0;
6399 if (len > document_len)
6400 len = document_len;
6401
6402 memcpy (readbuf, document + offset, len);
6403 xfree (document);
6404
6405 return len;
6406 }
6407
6408 #ifdef HAVE_LINUX_BTRACE
6409
6410 /* See to_enable_btrace target method. */
6411
6412 static struct btrace_target_info *
6413 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6414 {
6415 struct btrace_target_info *tinfo;
6416
6417 tinfo = linux_enable_btrace (ptid, conf);
6418
6419 if (tinfo != NULL && tinfo->ptr_bits == 0)
6420 {
6421 struct thread_info *thread = find_thread_ptid (ptid);
6422 struct regcache *regcache = get_thread_regcache (thread, 0);
6423
6424 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6425 }
6426
6427 return tinfo;
6428 }
6429
6430 /* See to_disable_btrace target method. */
6431
6432 static int
6433 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6434 {
6435 enum btrace_error err;
6436
6437 err = linux_disable_btrace (tinfo);
6438 return (err == BTRACE_ERR_NONE ? 0 : -1);
6439 }
6440
6441 /* See to_read_btrace target method. */
6442
6443 static int
6444 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6445 int type)
6446 {
6447 struct btrace_data btrace;
6448 struct btrace_block *block;
6449 enum btrace_error err;
6450 int i;
6451
6452 btrace_data_init (&btrace);
6453
6454 err = linux_read_btrace (&btrace, tinfo, type);
6455 if (err != BTRACE_ERR_NONE)
6456 {
6457 if (err == BTRACE_ERR_OVERFLOW)
6458 buffer_grow_str0 (buffer, "E.Overflow.");
6459 else
6460 buffer_grow_str0 (buffer, "E.Generic Error.");
6461
6462 btrace_data_fini (&btrace);
6463 return -1;
6464 }
6465
6466 switch (btrace.format)
6467 {
6468 case BTRACE_FORMAT_NONE:
6469 buffer_grow_str0 (buffer, "E.No Trace.");
6470 break;
6471
6472 case BTRACE_FORMAT_BTS:
6473 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6474 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6475
6476 for (i = 0;
6477 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6478 i++)
6479 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6480 paddress (block->begin), paddress (block->end));
6481
6482 buffer_grow_str0 (buffer, "</btrace>\n");
6483 break;
6484
6485 default:
6486 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6487
6488 btrace_data_fini (&btrace);
6489 return -1;
6490 }
6491
6492 btrace_data_fini (&btrace);
6493 return 0;
6494 }
6495
6496 /* See to_btrace_conf target method. */
6497
6498 static int
6499 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6500 struct buffer *buffer)
6501 {
6502 const struct btrace_config *conf;
6503
6504 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6505 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6506
6507 conf = linux_btrace_conf (tinfo);
6508 if (conf != NULL)
6509 {
6510 switch (conf->format)
6511 {
6512 case BTRACE_FORMAT_NONE:
6513 break;
6514
6515 case BTRACE_FORMAT_BTS:
6516 buffer_xml_printf (buffer, "<bts");
6517 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6518 buffer_xml_printf (buffer, " />\n");
6519 break;
6520 }
6521 }
6522
6523 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6524 return 0;
6525 }
6526 #endif /* HAVE_LINUX_BTRACE */
6527
6528 /* See nat/linux-nat.h. */
6529
6530 ptid_t
6531 current_lwp_ptid (void)
6532 {
6533 return ptid_of (current_thread);
6534 }
6535
6536 static struct target_ops linux_target_ops = {
6537 linux_create_inferior,
6538 linux_attach,
6539 linux_kill,
6540 linux_detach,
6541 linux_mourn,
6542 linux_join,
6543 linux_thread_alive,
6544 linux_resume,
6545 linux_wait,
6546 linux_fetch_registers,
6547 linux_store_registers,
6548 linux_prepare_to_access_memory,
6549 linux_done_accessing_memory,
6550 linux_read_memory,
6551 linux_write_memory,
6552 linux_look_up_symbols,
6553 linux_request_interrupt,
6554 linux_read_auxv,
6555 linux_supports_z_point_type,
6556 linux_insert_point,
6557 linux_remove_point,
6558 linux_stopped_by_sw_breakpoint,
6559 linux_supports_stopped_by_sw_breakpoint,
6560 linux_stopped_by_hw_breakpoint,
6561 linux_supports_stopped_by_hw_breakpoint,
6562 linux_supports_conditional_breakpoints,
6563 linux_stopped_by_watchpoint,
6564 linux_stopped_data_address,
6565 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6566 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6567 && defined(PT_TEXT_END_ADDR)
6568 linux_read_offsets,
6569 #else
6570 NULL,
6571 #endif
6572 #ifdef USE_THREAD_DB
6573 thread_db_get_tls_address,
6574 #else
6575 NULL,
6576 #endif
6577 linux_qxfer_spu,
6578 hostio_last_error_from_errno,
6579 linux_qxfer_osdata,
6580 linux_xfer_siginfo,
6581 linux_supports_non_stop,
6582 linux_async,
6583 linux_start_non_stop,
6584 linux_supports_multi_process,
6585 linux_supports_fork_events,
6586 linux_supports_vfork_events,
6587 linux_handle_new_gdb_connection,
6588 #ifdef USE_THREAD_DB
6589 thread_db_handle_monitor_command,
6590 #else
6591 NULL,
6592 #endif
6593 linux_common_core_of_thread,
6594 linux_read_loadmap,
6595 linux_process_qsupported,
6596 linux_supports_tracepoints,
6597 linux_read_pc,
6598 linux_write_pc,
6599 linux_thread_stopped,
6600 NULL,
6601 linux_pause_all,
6602 linux_unpause_all,
6603 linux_stabilize_threads,
6604 linux_install_fast_tracepoint_jump_pad,
6605 linux_emit_ops,
6606 linux_supports_disable_randomization,
6607 linux_get_min_fast_tracepoint_insn_len,
6608 linux_qxfer_libraries_svr4,
6609 linux_supports_agent,
6610 #ifdef HAVE_LINUX_BTRACE
6611 linux_supports_btrace,
6612 linux_low_enable_btrace,
6613 linux_low_disable_btrace,
6614 linux_low_read_btrace,
6615 linux_low_btrace_conf,
6616 #else
6617 NULL,
6618 NULL,
6619 NULL,
6620 NULL,
6621 NULL,
6622 #endif
6623 linux_supports_range_stepping,
6624 linux_proc_pid_to_exec_file,
6625 };
6626
6627 static void
6628 linux_init_signals ()
6629 {
6630 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6631 to find what the cancel signal actually is. */
6632 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6633 signal (__SIGRTMIN+1, SIG_IGN);
6634 #endif
6635 }
6636
6637 #ifdef HAVE_LINUX_REGSETS
6638 void
6639 initialize_regsets_info (struct regsets_info *info)
6640 {
6641 for (info->num_regsets = 0;
6642 info->regsets[info->num_regsets].size >= 0;
6643 info->num_regsets++)
6644 ;
6645 }
6646 #endif
6647
6648 void
6649 initialize_low (void)
6650 {
6651 struct sigaction sigchld_action;
6652 memset (&sigchld_action, 0, sizeof (sigchld_action));
6653 set_target_ops (&linux_target_ops);
6654 set_breakpoint_data (the_low_target.breakpoint,
6655 the_low_target.breakpoint_len);
6656 linux_init_signals ();
6657 linux_ptrace_init_warnings ();
6658
6659 sigchld_action.sa_handler = sigchld_handler;
6660 sigemptyset (&sigchld_action.sa_mask);
6661 sigchld_action.sa_flags = SA_RESTART;
6662 sigaction (SIGCHLD, &sigchld_action, NULL);
6663
6664 initialize_low_arch ();
6665
6666 linux_check_ptrace_features ();
6667 }
This page took 0.281607 seconds and 4 git commands to generate.