Fix preprocessor conditional
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24
25 #include "nat/linux-nat.h"
26 #include "nat/linux-waitpid.h"
27 #include "gdb_wait.h"
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include "nat/linux-personality.h"
32 #include <signal.h>
33 #include <sys/ioctl.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #include "filestuff.h"
46 #include "tracepoint.h"
47 #include "hostio.h"
48 #ifndef ELFMAG0
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53 #include <elf.h>
54 #endif
55
56 #ifndef SPUFS_MAGIC
57 #define SPUFS_MAGIC 0x23c9b64e
58 #endif
59
60 #ifdef HAVE_PERSONALITY
61 # include <sys/personality.h>
62 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
63 # define ADDR_NO_RANDOMIZE 0x0040000
64 # endif
65 #endif
66
67 #ifndef O_LARGEFILE
68 #define O_LARGEFILE 0
69 #endif
70
71 #ifndef W_STOPCODE
72 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
73 #endif
74
75 /* This is the kernel's hard limit. Not to be confused with
76 SIGRTMIN. */
77 #ifndef __SIGRTMIN
78 #define __SIGRTMIN 32
79 #endif
80
81 /* Some targets did not define these ptrace constants from the start,
82 so gdbserver defines them locally here. In the future, these may
83 be removed after they are added to asm/ptrace.h. */
84 #if !(defined(PT_TEXT_ADDR) \
85 || defined(PT_DATA_ADDR) \
86 || defined(PT_TEXT_END_ADDR))
87 #if defined(__mcoldfire__)
88 /* These are still undefined in 3.10 kernels. */
89 #define PT_TEXT_ADDR 49*4
90 #define PT_DATA_ADDR 50*4
91 #define PT_TEXT_END_ADDR 51*4
92 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #elif defined(BFIN)
94 #define PT_TEXT_ADDR 220
95 #define PT_TEXT_END_ADDR 224
96 #define PT_DATA_ADDR 228
97 /* These are still undefined in 3.10 kernels. */
98 #elif defined(__TMS320C6X__)
99 #define PT_TEXT_ADDR (0x10000*4)
100 #define PT_DATA_ADDR (0x10004*4)
101 #define PT_TEXT_END_ADDR (0x10008*4)
102 #endif
103 #endif
104
105 #ifdef HAVE_LINUX_BTRACE
106 # include "nat/linux-btrace.h"
107 # include "btrace-common.h"
108 #endif
109
110 #ifndef HAVE_ELF32_AUXV_T
111 /* Copied from glibc's elf.h. */
112 typedef struct
113 {
114 uint32_t a_type; /* Entry type */
115 union
116 {
117 uint32_t a_val; /* Integer value */
118 /* We use to have pointer elements added here. We cannot do that,
119 though, since it does not work when using 32-bit definitions
120 on 64-bit platforms and vice versa. */
121 } a_un;
122 } Elf32_auxv_t;
123 #endif
124
125 #ifndef HAVE_ELF64_AUXV_T
126 /* Copied from glibc's elf.h. */
127 typedef struct
128 {
129 uint64_t a_type; /* Entry type */
130 union
131 {
132 uint64_t a_val; /* Integer value */
133 /* We use to have pointer elements added here. We cannot do that,
134 though, since it does not work when using 32-bit definitions
135 on 64-bit platforms and vice versa. */
136 } a_un;
137 } Elf64_auxv_t;
138 #endif
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* A list of all unknown processes which receive stop signals. Some
184 other process will presumably claim each of these as forked
185 children momentarily. */
186
187 struct simple_pid_list
188 {
189 /* The process ID. */
190 int pid;
191
192 /* The status as reported by waitpid. */
193 int status;
194
195 /* Next in chain. */
196 struct simple_pid_list *next;
197 };
198 struct simple_pid_list *stopped_pids;
199
200 /* Trivial list manipulation functions to keep track of a list of new
201 stopped processes. */
202
203 static void
204 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
205 {
206 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
207
208 new_pid->pid = pid;
209 new_pid->status = status;
210 new_pid->next = *listp;
211 *listp = new_pid;
212 }
213
214 static int
215 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
216 {
217 struct simple_pid_list **p;
218
219 for (p = listp; *p != NULL; p = &(*p)->next)
220 if ((*p)->pid == pid)
221 {
222 struct simple_pid_list *next = (*p)->next;
223
224 *statusp = (*p)->status;
225 xfree (*p);
226 *p = next;
227 return 1;
228 }
229 return 0;
230 }
231
232 enum stopping_threads_kind
233 {
234 /* Not stopping threads presently. */
235 NOT_STOPPING_THREADS,
236
237 /* Stopping threads. */
238 STOPPING_THREADS,
239
240 /* Stopping and suspending threads. */
241 STOPPING_AND_SUSPENDING_THREADS
242 };
243
244 /* This is set while stop_all_lwps is in effect. */
245 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
246
247 /* FIXME make into a target method? */
248 int using_threads = 1;
249
250 /* True if we're presently stabilizing threads (moving them out of
251 jump pads). */
252 static int stabilizing_threads;
253
254 static void linux_resume_one_lwp (struct lwp_info *lwp,
255 int step, int signal, siginfo_t *info);
256 static void linux_resume (struct thread_resume *resume_info, size_t n);
257 static void stop_all_lwps (int suspend, struct lwp_info *except);
258 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
259 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
260 int *wstat, int options);
261 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
262 static struct lwp_info *add_lwp (ptid_t ptid);
263 static int linux_stopped_by_watchpoint (void);
264 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
265 static void proceed_all_lwps (void);
266 static int finish_step_over (struct lwp_info *lwp);
267 static int kill_lwp (unsigned long lwpid, int signo);
268
269 /* When the event-loop is doing a step-over, this points at the thread
270 being stepped. */
271 ptid_t step_over_bkpt;
272
273 /* True if the low target can hardware single-step. Such targets
274 don't need a BREAKPOINT_REINSERT_ADDR callback. */
275
276 static int
277 can_hardware_single_step (void)
278 {
279 return (the_low_target.breakpoint_reinsert_addr == NULL);
280 }
281
282 /* True if the low target supports memory breakpoints. If so, we'll
283 have a GET_PC implementation. */
284
285 static int
286 supports_breakpoints (void)
287 {
288 return (the_low_target.get_pc != NULL);
289 }
290
291 /* Returns true if this target can support fast tracepoints. This
292 does not mean that the in-process agent has been loaded in the
293 inferior. */
294
295 static int
296 supports_fast_tracepoints (void)
297 {
298 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 struct pending_signals
312 {
313 int signal;
314 siginfo_t info;
315 struct pending_signals *prev;
316 };
317
318 /* The read/write ends of the pipe registered as waitable file in the
319 event loop. */
320 static int linux_event_pipe[2] = { -1, -1 };
321
322 /* True if we're currently in async mode. */
323 #define target_is_async_p() (linux_event_pipe[0] != -1)
324
325 static void send_sigstop (struct lwp_info *lwp);
326 static void wait_for_sigstop (void);
327
328 /* Return non-zero if HEADER is a 64-bit ELF file. */
329
330 static int
331 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
332 {
333 if (header->e_ident[EI_MAG0] == ELFMAG0
334 && header->e_ident[EI_MAG1] == ELFMAG1
335 && header->e_ident[EI_MAG2] == ELFMAG2
336 && header->e_ident[EI_MAG3] == ELFMAG3)
337 {
338 *machine = header->e_machine;
339 return header->e_ident[EI_CLASS] == ELFCLASS64;
340
341 }
342 *machine = EM_NONE;
343 return -1;
344 }
345
346 /* Return non-zero if FILE is a 64-bit ELF file,
347 zero if the file is not a 64-bit ELF file,
348 and -1 if the file is not accessible or doesn't exist. */
349
350 static int
351 elf_64_file_p (const char *file, unsigned int *machine)
352 {
353 Elf64_Ehdr header;
354 int fd;
355
356 fd = open (file, O_RDONLY);
357 if (fd < 0)
358 return -1;
359
360 if (read (fd, &header, sizeof (header)) != sizeof (header))
361 {
362 close (fd);
363 return 0;
364 }
365 close (fd);
366
367 return elf_64_header_p (&header, machine);
368 }
369
370 /* Accepts an integer PID; Returns true if the executable PID is
371 running is a 64-bit ELF file.. */
372
373 int
374 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
375 {
376 char file[PATH_MAX];
377
378 sprintf (file, "/proc/%d/exe", pid);
379 return elf_64_file_p (file, machine);
380 }
381
382 static void
383 delete_lwp (struct lwp_info *lwp)
384 {
385 struct thread_info *thr = get_lwp_thread (lwp);
386
387 if (debug_threads)
388 debug_printf ("deleting %ld\n", lwpid_of (thr));
389
390 remove_thread (thr);
391 free (lwp->arch_private);
392 free (lwp);
393 }
394
395 /* Add a process to the common process list, and set its private
396 data. */
397
398 static struct process_info *
399 linux_add_process (int pid, int attached)
400 {
401 struct process_info *proc;
402
403 proc = add_process (pid, attached);
404 proc->priv = xcalloc (1, sizeof (*proc->priv));
405
406 /* Set the arch when the first LWP stops. */
407 proc->priv->new_inferior = 1;
408
409 if (the_low_target.new_process != NULL)
410 proc->priv->arch_private = the_low_target.new_process ();
411
412 return proc;
413 }
414
415 static CORE_ADDR get_pc (struct lwp_info *lwp);
416
417 /* Handle a GNU/Linux extended wait response. If we see a clone
418 event, we need to add the new LWP to our list (and return 0 so as
419 not to report the trap to higher layers). */
420
421 static int
422 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
423 {
424 int event = linux_ptrace_get_extended_event (wstat);
425 struct thread_info *event_thr = get_lwp_thread (event_lwp);
426 struct lwp_info *new_lwp;
427
428 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
429 || (event == PTRACE_EVENT_CLONE))
430 {
431 ptid_t ptid;
432 unsigned long new_pid;
433 int ret, status;
434
435 /* Get the pid of the new lwp. */
436 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
437 &new_pid);
438
439 /* If we haven't already seen the new PID stop, wait for it now. */
440 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
441 {
442 /* The new child has a pending SIGSTOP. We can't affect it until it
443 hits the SIGSTOP, but we're already attached. */
444
445 ret = my_waitpid (new_pid, &status, __WALL);
446
447 if (ret == -1)
448 perror_with_name ("waiting for new child");
449 else if (ret != new_pid)
450 warning ("wait returned unexpected PID %d", ret);
451 else if (!WIFSTOPPED (status))
452 warning ("wait returned unexpected status 0x%x", status);
453 }
454
455 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
456 {
457 struct process_info *parent_proc;
458 struct process_info *child_proc;
459 struct lwp_info *child_lwp;
460 struct thread_info *child_thr;
461 struct target_desc *tdesc;
462
463 ptid = ptid_build (new_pid, new_pid, 0);
464
465 if (debug_threads)
466 {
467 debug_printf ("HEW: Got fork event from LWP %ld, "
468 "new child is %d\n",
469 ptid_get_lwp (ptid_of (event_thr)),
470 ptid_get_pid (ptid));
471 }
472
473 /* Add the new process to the tables and clone the breakpoint
474 lists of the parent. We need to do this even if the new process
475 will be detached, since we will need the process object and the
476 breakpoints to remove any breakpoints from memory when we
477 detach, and the client side will access registers. */
478 child_proc = linux_add_process (new_pid, 0);
479 gdb_assert (child_proc != NULL);
480 child_lwp = add_lwp (ptid);
481 gdb_assert (child_lwp != NULL);
482 child_lwp->stopped = 1;
483 child_lwp->must_set_ptrace_flags = 1;
484 child_lwp->status_pending_p = 0;
485 child_thr = get_lwp_thread (child_lwp);
486 child_thr->last_resume_kind = resume_stop;
487 parent_proc = get_thread_process (event_thr);
488 child_proc->attached = parent_proc->attached;
489 clone_all_breakpoints (&child_proc->breakpoints,
490 &child_proc->raw_breakpoints,
491 parent_proc->breakpoints);
492
493 tdesc = xmalloc (sizeof (struct target_desc));
494 copy_target_description (tdesc, parent_proc->tdesc);
495 child_proc->tdesc = tdesc;
496
497 /* Clone arch-specific process data. */
498 if (the_low_target.new_fork != NULL)
499 the_low_target.new_fork (parent_proc, child_proc);
500
501 /* Save fork info in the parent thread. */
502 if (event == PTRACE_EVENT_FORK)
503 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
504 else if (event == PTRACE_EVENT_VFORK)
505 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
506
507 event_lwp->waitstatus.value.related_pid = ptid;
508
509 /* The status_pending field contains bits denoting the
510 extended event, so when the pending event is handled,
511 the handler will look at lwp->waitstatus. */
512 event_lwp->status_pending_p = 1;
513 event_lwp->status_pending = wstat;
514
515 /* Report the event. */
516 return 0;
517 }
518
519 if (debug_threads)
520 debug_printf ("HEW: Got clone event "
521 "from LWP %ld, new child is LWP %ld\n",
522 lwpid_of (event_thr), new_pid);
523
524 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
525 new_lwp = add_lwp (ptid);
526
527 /* Either we're going to immediately resume the new thread
528 or leave it stopped. linux_resume_one_lwp is a nop if it
529 thinks the thread is currently running, so set this first
530 before calling linux_resume_one_lwp. */
531 new_lwp->stopped = 1;
532
533 /* If we're suspending all threads, leave this one suspended
534 too. */
535 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
536 new_lwp->suspended = 1;
537
538 /* Normally we will get the pending SIGSTOP. But in some cases
539 we might get another signal delivered to the group first.
540 If we do get another signal, be sure not to lose it. */
541 if (WSTOPSIG (status) != SIGSTOP)
542 {
543 new_lwp->stop_expected = 1;
544 new_lwp->status_pending_p = 1;
545 new_lwp->status_pending = status;
546 }
547
548 /* Don't report the event. */
549 return 1;
550 }
551 else if (event == PTRACE_EVENT_VFORK_DONE)
552 {
553 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
554
555 /* Report the event. */
556 return 0;
557 }
558
559 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
560 }
561
562 /* Return the PC as read from the regcache of LWP, without any
563 adjustment. */
564
565 static CORE_ADDR
566 get_pc (struct lwp_info *lwp)
567 {
568 struct thread_info *saved_thread;
569 struct regcache *regcache;
570 CORE_ADDR pc;
571
572 if (the_low_target.get_pc == NULL)
573 return 0;
574
575 saved_thread = current_thread;
576 current_thread = get_lwp_thread (lwp);
577
578 regcache = get_thread_regcache (current_thread, 1);
579 pc = (*the_low_target.get_pc) (regcache);
580
581 if (debug_threads)
582 debug_printf ("pc is 0x%lx\n", (long) pc);
583
584 current_thread = saved_thread;
585 return pc;
586 }
587
588 /* This function should only be called if LWP got a SIGTRAP.
589 The SIGTRAP could mean several things.
590
591 On i386, where decr_pc_after_break is non-zero:
592
593 If we were single-stepping this process using PTRACE_SINGLESTEP, we
594 will get only the one SIGTRAP. The value of $eip will be the next
595 instruction. If the instruction we stepped over was a breakpoint,
596 we need to decrement the PC.
597
598 If we continue the process using PTRACE_CONT, we will get a
599 SIGTRAP when we hit a breakpoint. The value of $eip will be
600 the instruction after the breakpoint (i.e. needs to be
601 decremented). If we report the SIGTRAP to GDB, we must also
602 report the undecremented PC. If the breakpoint is removed, we
603 must resume at the decremented PC.
604
605 On a non-decr_pc_after_break machine with hardware or kernel
606 single-step:
607
608 If we either single-step a breakpoint instruction, or continue and
609 hit a breakpoint instruction, our PC will point at the breakpoint
610 instruction. */
611
612 static int
613 check_stopped_by_breakpoint (struct lwp_info *lwp)
614 {
615 CORE_ADDR pc;
616 CORE_ADDR sw_breakpoint_pc;
617 struct thread_info *saved_thread;
618 #if USE_SIGTRAP_SIGINFO
619 siginfo_t siginfo;
620 #endif
621
622 if (the_low_target.get_pc == NULL)
623 return 0;
624
625 pc = get_pc (lwp);
626 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
627
628 /* breakpoint_at reads from the current thread. */
629 saved_thread = current_thread;
630 current_thread = get_lwp_thread (lwp);
631
632 #if USE_SIGTRAP_SIGINFO
633 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
634 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
635 {
636 if (siginfo.si_signo == SIGTRAP)
637 {
638 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
639 {
640 if (debug_threads)
641 {
642 struct thread_info *thr = get_lwp_thread (lwp);
643
644 debug_printf ("CSBB: %s stopped by software breakpoint\n",
645 target_pid_to_str (ptid_of (thr)));
646 }
647
648 /* Back up the PC if necessary. */
649 if (pc != sw_breakpoint_pc)
650 {
651 struct regcache *regcache
652 = get_thread_regcache (current_thread, 1);
653 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
654 }
655
656 lwp->stop_pc = sw_breakpoint_pc;
657 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
658 current_thread = saved_thread;
659 return 1;
660 }
661 else if (siginfo.si_code == TRAP_HWBKPT)
662 {
663 if (debug_threads)
664 {
665 struct thread_info *thr = get_lwp_thread (lwp);
666
667 debug_printf ("CSBB: %s stopped by hardware "
668 "breakpoint/watchpoint\n",
669 target_pid_to_str (ptid_of (thr)));
670 }
671
672 lwp->stop_pc = pc;
673 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
674 current_thread = saved_thread;
675 return 1;
676 }
677 else if (siginfo.si_code == TRAP_TRACE)
678 {
679 if (debug_threads)
680 {
681 struct thread_info *thr = get_lwp_thread (lwp);
682
683 debug_printf ("CSBB: %s stopped by trace\n",
684 target_pid_to_str (ptid_of (thr)));
685 }
686 }
687 }
688 }
689 #else
690 /* We may have just stepped a breakpoint instruction. E.g., in
691 non-stop mode, GDB first tells the thread A to step a range, and
692 then the user inserts a breakpoint inside the range. In that
693 case we need to report the breakpoint PC. */
694 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
695 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
696 {
697 if (debug_threads)
698 {
699 struct thread_info *thr = get_lwp_thread (lwp);
700
701 debug_printf ("CSBB: %s stopped by software breakpoint\n",
702 target_pid_to_str (ptid_of (thr)));
703 }
704
705 /* Back up the PC if necessary. */
706 if (pc != sw_breakpoint_pc)
707 {
708 struct regcache *regcache
709 = get_thread_regcache (current_thread, 1);
710 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
711 }
712
713 lwp->stop_pc = sw_breakpoint_pc;
714 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
715 current_thread = saved_thread;
716 return 1;
717 }
718
719 if (hardware_breakpoint_inserted_here (pc))
720 {
721 if (debug_threads)
722 {
723 struct thread_info *thr = get_lwp_thread (lwp);
724
725 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
726 target_pid_to_str (ptid_of (thr)));
727 }
728
729 lwp->stop_pc = pc;
730 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
731 current_thread = saved_thread;
732 return 1;
733 }
734 #endif
735
736 current_thread = saved_thread;
737 return 0;
738 }
739
740 static struct lwp_info *
741 add_lwp (ptid_t ptid)
742 {
743 struct lwp_info *lwp;
744
745 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
746 memset (lwp, 0, sizeof (*lwp));
747
748 if (the_low_target.new_thread != NULL)
749 the_low_target.new_thread (lwp);
750
751 lwp->thread = add_thread (ptid, lwp);
752
753 return lwp;
754 }
755
756 /* Start an inferior process and returns its pid.
757 ALLARGS is a vector of program-name and args. */
758
759 static int
760 linux_create_inferior (char *program, char **allargs)
761 {
762 struct lwp_info *new_lwp;
763 int pid;
764 ptid_t ptid;
765 struct cleanup *restore_personality
766 = maybe_disable_address_space_randomization (disable_randomization);
767
768 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
769 pid = vfork ();
770 #else
771 pid = fork ();
772 #endif
773 if (pid < 0)
774 perror_with_name ("fork");
775
776 if (pid == 0)
777 {
778 close_most_fds ();
779 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
780
781 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
782 signal (__SIGRTMIN + 1, SIG_DFL);
783 #endif
784
785 setpgid (0, 0);
786
787 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
788 stdout to stderr so that inferior i/o doesn't corrupt the connection.
789 Also, redirect stdin to /dev/null. */
790 if (remote_connection_is_stdio ())
791 {
792 close (0);
793 open ("/dev/null", O_RDONLY);
794 dup2 (2, 1);
795 if (write (2, "stdin/stdout redirected\n",
796 sizeof ("stdin/stdout redirected\n") - 1) < 0)
797 {
798 /* Errors ignored. */;
799 }
800 }
801
802 execv (program, allargs);
803 if (errno == ENOENT)
804 execvp (program, allargs);
805
806 fprintf (stderr, "Cannot exec %s: %s.\n", program,
807 strerror (errno));
808 fflush (stderr);
809 _exit (0177);
810 }
811
812 do_cleanups (restore_personality);
813
814 linux_add_process (pid, 0);
815
816 ptid = ptid_build (pid, pid, 0);
817 new_lwp = add_lwp (ptid);
818 new_lwp->must_set_ptrace_flags = 1;
819
820 return pid;
821 }
822
823 /* Attach to an inferior process. Returns 0 on success, ERRNO on
824 error. */
825
826 int
827 linux_attach_lwp (ptid_t ptid)
828 {
829 struct lwp_info *new_lwp;
830 int lwpid = ptid_get_lwp (ptid);
831
832 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
833 != 0)
834 return errno;
835
836 new_lwp = add_lwp (ptid);
837
838 /* We need to wait for SIGSTOP before being able to make the next
839 ptrace call on this LWP. */
840 new_lwp->must_set_ptrace_flags = 1;
841
842 if (linux_proc_pid_is_stopped (lwpid))
843 {
844 if (debug_threads)
845 debug_printf ("Attached to a stopped process\n");
846
847 /* The process is definitely stopped. It is in a job control
848 stop, unless the kernel predates the TASK_STOPPED /
849 TASK_TRACED distinction, in which case it might be in a
850 ptrace stop. Make sure it is in a ptrace stop; from there we
851 can kill it, signal it, et cetera.
852
853 First make sure there is a pending SIGSTOP. Since we are
854 already attached, the process can not transition from stopped
855 to running without a PTRACE_CONT; so we know this signal will
856 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
857 probably already in the queue (unless this kernel is old
858 enough to use TASK_STOPPED for ptrace stops); but since
859 SIGSTOP is not an RT signal, it can only be queued once. */
860 kill_lwp (lwpid, SIGSTOP);
861
862 /* Finally, resume the stopped process. This will deliver the
863 SIGSTOP (or a higher priority signal, just like normal
864 PTRACE_ATTACH), which we'll catch later on. */
865 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
866 }
867
868 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
869 brings it to a halt.
870
871 There are several cases to consider here:
872
873 1) gdbserver has already attached to the process and is being notified
874 of a new thread that is being created.
875 In this case we should ignore that SIGSTOP and resume the
876 process. This is handled below by setting stop_expected = 1,
877 and the fact that add_thread sets last_resume_kind ==
878 resume_continue.
879
880 2) This is the first thread (the process thread), and we're attaching
881 to it via attach_inferior.
882 In this case we want the process thread to stop.
883 This is handled by having linux_attach set last_resume_kind ==
884 resume_stop after we return.
885
886 If the pid we are attaching to is also the tgid, we attach to and
887 stop all the existing threads. Otherwise, we attach to pid and
888 ignore any other threads in the same group as this pid.
889
890 3) GDB is connecting to gdbserver and is requesting an enumeration of all
891 existing threads.
892 In this case we want the thread to stop.
893 FIXME: This case is currently not properly handled.
894 We should wait for the SIGSTOP but don't. Things work apparently
895 because enough time passes between when we ptrace (ATTACH) and when
896 gdb makes the next ptrace call on the thread.
897
898 On the other hand, if we are currently trying to stop all threads, we
899 should treat the new thread as if we had sent it a SIGSTOP. This works
900 because we are guaranteed that the add_lwp call above added us to the
901 end of the list, and so the new thread has not yet reached
902 wait_for_sigstop (but will). */
903 new_lwp->stop_expected = 1;
904
905 return 0;
906 }
907
908 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
909 already attached. Returns true if a new LWP is found, false
910 otherwise. */
911
912 static int
913 attach_proc_task_lwp_callback (ptid_t ptid)
914 {
915 /* Is this a new thread? */
916 if (find_thread_ptid (ptid) == NULL)
917 {
918 int lwpid = ptid_get_lwp (ptid);
919 int err;
920
921 if (debug_threads)
922 debug_printf ("Found new lwp %d\n", lwpid);
923
924 err = linux_attach_lwp (ptid);
925
926 /* Be quiet if we simply raced with the thread exiting. EPERM
927 is returned if the thread's task still exists, and is marked
928 as exited or zombie, as well as other conditions, so in that
929 case, confirm the status in /proc/PID/status. */
930 if (err == ESRCH
931 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
932 {
933 if (debug_threads)
934 {
935 debug_printf ("Cannot attach to lwp %d: "
936 "thread is gone (%d: %s)\n",
937 lwpid, err, strerror (err));
938 }
939 }
940 else if (err != 0)
941 {
942 warning (_("Cannot attach to lwp %d: %s"),
943 lwpid,
944 linux_ptrace_attach_fail_reason_string (ptid, err));
945 }
946
947 return 1;
948 }
949 return 0;
950 }
951
952 /* Attach to PID. If PID is the tgid, attach to it and all
953 of its threads. */
954
955 static int
956 linux_attach (unsigned long pid)
957 {
958 ptid_t ptid = ptid_build (pid, pid, 0);
959 int err;
960
961 /* Attach to PID. We will check for other threads
962 soon. */
963 err = linux_attach_lwp (ptid);
964 if (err != 0)
965 error ("Cannot attach to process %ld: %s",
966 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
967
968 linux_add_process (pid, 1);
969
970 if (!non_stop)
971 {
972 struct thread_info *thread;
973
974 /* Don't ignore the initial SIGSTOP if we just attached to this
975 process. It will be collected by wait shortly. */
976 thread = find_thread_ptid (ptid_build (pid, pid, 0));
977 thread->last_resume_kind = resume_stop;
978 }
979
980 /* We must attach to every LWP. If /proc is mounted, use that to
981 find them now. On the one hand, the inferior may be using raw
982 clone instead of using pthreads. On the other hand, even if it
983 is using pthreads, GDB may not be connected yet (thread_db needs
984 to do symbol lookups, through qSymbol). Also, thread_db walks
985 structures in the inferior's address space to find the list of
986 threads/LWPs, and those structures may well be corrupted. Note
987 that once thread_db is loaded, we'll still use it to list threads
988 and associate pthread info with each LWP. */
989 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
990 return 0;
991 }
992
993 struct counter
994 {
995 int pid;
996 int count;
997 };
998
999 static int
1000 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1001 {
1002 struct counter *counter = args;
1003
1004 if (ptid_get_pid (entry->id) == counter->pid)
1005 {
1006 if (++counter->count > 1)
1007 return 1;
1008 }
1009
1010 return 0;
1011 }
1012
1013 static int
1014 last_thread_of_process_p (int pid)
1015 {
1016 struct counter counter = { pid , 0 };
1017
1018 return (find_inferior (&all_threads,
1019 second_thread_of_pid_p, &counter) == NULL);
1020 }
1021
1022 /* Kill LWP. */
1023
1024 static void
1025 linux_kill_one_lwp (struct lwp_info *lwp)
1026 {
1027 struct thread_info *thr = get_lwp_thread (lwp);
1028 int pid = lwpid_of (thr);
1029
1030 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1031 there is no signal context, and ptrace(PTRACE_KILL) (or
1032 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1033 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1034 alternative is to kill with SIGKILL. We only need one SIGKILL
1035 per process, not one for each thread. But since we still support
1036 linuxthreads, and we also support debugging programs using raw
1037 clone without CLONE_THREAD, we send one for each thread. For
1038 years, we used PTRACE_KILL only, so we're being a bit paranoid
1039 about some old kernels where PTRACE_KILL might work better
1040 (dubious if there are any such, but that's why it's paranoia), so
1041 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1042 everywhere. */
1043
1044 errno = 0;
1045 kill_lwp (pid, SIGKILL);
1046 if (debug_threads)
1047 {
1048 int save_errno = errno;
1049
1050 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1051 target_pid_to_str (ptid_of (thr)),
1052 save_errno ? strerror (save_errno) : "OK");
1053 }
1054
1055 errno = 0;
1056 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1057 if (debug_threads)
1058 {
1059 int save_errno = errno;
1060
1061 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1062 target_pid_to_str (ptid_of (thr)),
1063 save_errno ? strerror (save_errno) : "OK");
1064 }
1065 }
1066
1067 /* Kill LWP and wait for it to die. */
1068
1069 static void
1070 kill_wait_lwp (struct lwp_info *lwp)
1071 {
1072 struct thread_info *thr = get_lwp_thread (lwp);
1073 int pid = ptid_get_pid (ptid_of (thr));
1074 int lwpid = ptid_get_lwp (ptid_of (thr));
1075 int wstat;
1076 int res;
1077
1078 if (debug_threads)
1079 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1080
1081 do
1082 {
1083 linux_kill_one_lwp (lwp);
1084
1085 /* Make sure it died. Notes:
1086
1087 - The loop is most likely unnecessary.
1088
1089 - We don't use linux_wait_for_event as that could delete lwps
1090 while we're iterating over them. We're not interested in
1091 any pending status at this point, only in making sure all
1092 wait status on the kernel side are collected until the
1093 process is reaped.
1094
1095 - We don't use __WALL here as the __WALL emulation relies on
1096 SIGCHLD, and killing a stopped process doesn't generate
1097 one, nor an exit status.
1098 */
1099 res = my_waitpid (lwpid, &wstat, 0);
1100 if (res == -1 && errno == ECHILD)
1101 res = my_waitpid (lwpid, &wstat, __WCLONE);
1102 } while (res > 0 && WIFSTOPPED (wstat));
1103
1104 gdb_assert (res > 0);
1105 }
1106
1107 /* Callback for `find_inferior'. Kills an lwp of a given process,
1108 except the leader. */
1109
1110 static int
1111 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1112 {
1113 struct thread_info *thread = (struct thread_info *) entry;
1114 struct lwp_info *lwp = get_thread_lwp (thread);
1115 int pid = * (int *) args;
1116
1117 if (ptid_get_pid (entry->id) != pid)
1118 return 0;
1119
1120 /* We avoid killing the first thread here, because of a Linux kernel (at
1121 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1122 the children get a chance to be reaped, it will remain a zombie
1123 forever. */
1124
1125 if (lwpid_of (thread) == pid)
1126 {
1127 if (debug_threads)
1128 debug_printf ("lkop: is last of process %s\n",
1129 target_pid_to_str (entry->id));
1130 return 0;
1131 }
1132
1133 kill_wait_lwp (lwp);
1134 return 0;
1135 }
1136
1137 static int
1138 linux_kill (int pid)
1139 {
1140 struct process_info *process;
1141 struct lwp_info *lwp;
1142
1143 process = find_process_pid (pid);
1144 if (process == NULL)
1145 return -1;
1146
1147 /* If we're killing a running inferior, make sure it is stopped
1148 first, as PTRACE_KILL will not work otherwise. */
1149 stop_all_lwps (0, NULL);
1150
1151 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1152
1153 /* See the comment in linux_kill_one_lwp. We did not kill the first
1154 thread in the list, so do so now. */
1155 lwp = find_lwp_pid (pid_to_ptid (pid));
1156
1157 if (lwp == NULL)
1158 {
1159 if (debug_threads)
1160 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1161 pid);
1162 }
1163 else
1164 kill_wait_lwp (lwp);
1165
1166 the_target->mourn (process);
1167
1168 /* Since we presently can only stop all lwps of all processes, we
1169 need to unstop lwps of other processes. */
1170 unstop_all_lwps (0, NULL);
1171 return 0;
1172 }
1173
1174 /* Get pending signal of THREAD, for detaching purposes. This is the
1175 signal the thread last stopped for, which we need to deliver to the
1176 thread when detaching, otherwise, it'd be suppressed/lost. */
1177
1178 static int
1179 get_detach_signal (struct thread_info *thread)
1180 {
1181 enum gdb_signal signo = GDB_SIGNAL_0;
1182 int status;
1183 struct lwp_info *lp = get_thread_lwp (thread);
1184
1185 if (lp->status_pending_p)
1186 status = lp->status_pending;
1187 else
1188 {
1189 /* If the thread had been suspended by gdbserver, and it stopped
1190 cleanly, then it'll have stopped with SIGSTOP. But we don't
1191 want to deliver that SIGSTOP. */
1192 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1193 || thread->last_status.value.sig == GDB_SIGNAL_0)
1194 return 0;
1195
1196 /* Otherwise, we may need to deliver the signal we
1197 intercepted. */
1198 status = lp->last_status;
1199 }
1200
1201 if (!WIFSTOPPED (status))
1202 {
1203 if (debug_threads)
1204 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1205 target_pid_to_str (ptid_of (thread)));
1206 return 0;
1207 }
1208
1209 /* Extended wait statuses aren't real SIGTRAPs. */
1210 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1211 {
1212 if (debug_threads)
1213 debug_printf ("GPS: lwp %s had stopped with extended "
1214 "status: no pending signal\n",
1215 target_pid_to_str (ptid_of (thread)));
1216 return 0;
1217 }
1218
1219 signo = gdb_signal_from_host (WSTOPSIG (status));
1220
1221 if (program_signals_p && !program_signals[signo])
1222 {
1223 if (debug_threads)
1224 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1225 target_pid_to_str (ptid_of (thread)),
1226 gdb_signal_to_string (signo));
1227 return 0;
1228 }
1229 else if (!program_signals_p
1230 /* If we have no way to know which signals GDB does not
1231 want to have passed to the program, assume
1232 SIGTRAP/SIGINT, which is GDB's default. */
1233 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1234 {
1235 if (debug_threads)
1236 debug_printf ("GPS: lwp %s had signal %s, "
1237 "but we don't know if we should pass it. "
1238 "Default to not.\n",
1239 target_pid_to_str (ptid_of (thread)),
1240 gdb_signal_to_string (signo));
1241 return 0;
1242 }
1243 else
1244 {
1245 if (debug_threads)
1246 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1247 target_pid_to_str (ptid_of (thread)),
1248 gdb_signal_to_string (signo));
1249
1250 return WSTOPSIG (status);
1251 }
1252 }
1253
1254 static int
1255 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1256 {
1257 struct thread_info *thread = (struct thread_info *) entry;
1258 struct lwp_info *lwp = get_thread_lwp (thread);
1259 int pid = * (int *) args;
1260 int sig;
1261
1262 if (ptid_get_pid (entry->id) != pid)
1263 return 0;
1264
1265 /* If there is a pending SIGSTOP, get rid of it. */
1266 if (lwp->stop_expected)
1267 {
1268 if (debug_threads)
1269 debug_printf ("Sending SIGCONT to %s\n",
1270 target_pid_to_str (ptid_of (thread)));
1271
1272 kill_lwp (lwpid_of (thread), SIGCONT);
1273 lwp->stop_expected = 0;
1274 }
1275
1276 /* Flush any pending changes to the process's registers. */
1277 regcache_invalidate_thread (thread);
1278
1279 /* Pass on any pending signal for this thread. */
1280 sig = get_detach_signal (thread);
1281
1282 /* Finally, let it resume. */
1283 if (the_low_target.prepare_to_resume != NULL)
1284 the_low_target.prepare_to_resume (lwp);
1285 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1286 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1287 error (_("Can't detach %s: %s"),
1288 target_pid_to_str (ptid_of (thread)),
1289 strerror (errno));
1290
1291 delete_lwp (lwp);
1292 return 0;
1293 }
1294
1295 static int
1296 linux_detach (int pid)
1297 {
1298 struct process_info *process;
1299
1300 process = find_process_pid (pid);
1301 if (process == NULL)
1302 return -1;
1303
1304 /* Stop all threads before detaching. First, ptrace requires that
1305 the thread is stopped to sucessfully detach. Second, thread_db
1306 may need to uninstall thread event breakpoints from memory, which
1307 only works with a stopped process anyway. */
1308 stop_all_lwps (0, NULL);
1309
1310 #ifdef USE_THREAD_DB
1311 thread_db_detach (process);
1312 #endif
1313
1314 /* Stabilize threads (move out of jump pads). */
1315 stabilize_threads ();
1316
1317 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1318
1319 the_target->mourn (process);
1320
1321 /* Since we presently can only stop all lwps of all processes, we
1322 need to unstop lwps of other processes. */
1323 unstop_all_lwps (0, NULL);
1324 return 0;
1325 }
1326
1327 /* Remove all LWPs that belong to process PROC from the lwp list. */
1328
1329 static int
1330 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1331 {
1332 struct thread_info *thread = (struct thread_info *) entry;
1333 struct lwp_info *lwp = get_thread_lwp (thread);
1334 struct process_info *process = proc;
1335
1336 if (pid_of (thread) == pid_of (process))
1337 delete_lwp (lwp);
1338
1339 return 0;
1340 }
1341
1342 static void
1343 linux_mourn (struct process_info *process)
1344 {
1345 struct process_info_private *priv;
1346
1347 #ifdef USE_THREAD_DB
1348 thread_db_mourn (process);
1349 #endif
1350
1351 find_inferior (&all_threads, delete_lwp_callback, process);
1352
1353 /* Freeing all private data. */
1354 priv = process->priv;
1355 free (priv->arch_private);
1356 free (priv);
1357 process->priv = NULL;
1358
1359 remove_process (process);
1360 }
1361
1362 static void
1363 linux_join (int pid)
1364 {
1365 int status, ret;
1366
1367 do {
1368 ret = my_waitpid (pid, &status, 0);
1369 if (WIFEXITED (status) || WIFSIGNALED (status))
1370 break;
1371 } while (ret != -1 || errno != ECHILD);
1372 }
1373
1374 /* Return nonzero if the given thread is still alive. */
1375 static int
1376 linux_thread_alive (ptid_t ptid)
1377 {
1378 struct lwp_info *lwp = find_lwp_pid (ptid);
1379
1380 /* We assume we always know if a thread exits. If a whole process
1381 exited but we still haven't been able to report it to GDB, we'll
1382 hold on to the last lwp of the dead process. */
1383 if (lwp != NULL)
1384 return !lwp->dead;
1385 else
1386 return 0;
1387 }
1388
1389 /* Return 1 if this lwp still has an interesting status pending. If
1390 not (e.g., it had stopped for a breakpoint that is gone), return
1391 false. */
1392
1393 static int
1394 thread_still_has_status_pending_p (struct thread_info *thread)
1395 {
1396 struct lwp_info *lp = get_thread_lwp (thread);
1397
1398 if (!lp->status_pending_p)
1399 return 0;
1400
1401 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1402 report any status pending the LWP may have. */
1403 if (thread->last_resume_kind == resume_stop
1404 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1405 return 0;
1406
1407 if (thread->last_resume_kind != resume_stop
1408 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1409 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1410 {
1411 struct thread_info *saved_thread;
1412 CORE_ADDR pc;
1413 int discard = 0;
1414
1415 gdb_assert (lp->last_status != 0);
1416
1417 pc = get_pc (lp);
1418
1419 saved_thread = current_thread;
1420 current_thread = thread;
1421
1422 if (pc != lp->stop_pc)
1423 {
1424 if (debug_threads)
1425 debug_printf ("PC of %ld changed\n",
1426 lwpid_of (thread));
1427 discard = 1;
1428 }
1429
1430 #if !USE_SIGTRAP_SIGINFO
1431 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1432 && !(*the_low_target.breakpoint_at) (pc))
1433 {
1434 if (debug_threads)
1435 debug_printf ("previous SW breakpoint of %ld gone\n",
1436 lwpid_of (thread));
1437 discard = 1;
1438 }
1439 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1440 && !hardware_breakpoint_inserted_here (pc))
1441 {
1442 if (debug_threads)
1443 debug_printf ("previous HW breakpoint of %ld gone\n",
1444 lwpid_of (thread));
1445 discard = 1;
1446 }
1447 #endif
1448
1449 current_thread = saved_thread;
1450
1451 if (discard)
1452 {
1453 if (debug_threads)
1454 debug_printf ("discarding pending breakpoint status\n");
1455 lp->status_pending_p = 0;
1456 return 0;
1457 }
1458 }
1459
1460 return 1;
1461 }
1462
1463 /* Return 1 if this lwp has an interesting status pending. */
1464 static int
1465 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1466 {
1467 struct thread_info *thread = (struct thread_info *) entry;
1468 struct lwp_info *lp = get_thread_lwp (thread);
1469 ptid_t ptid = * (ptid_t *) arg;
1470
1471 /* Check if we're only interested in events from a specific process
1472 or a specific LWP. */
1473 if (!ptid_match (ptid_of (thread), ptid))
1474 return 0;
1475
1476 if (lp->status_pending_p
1477 && !thread_still_has_status_pending_p (thread))
1478 {
1479 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1480 return 0;
1481 }
1482
1483 return lp->status_pending_p;
1484 }
1485
1486 static int
1487 same_lwp (struct inferior_list_entry *entry, void *data)
1488 {
1489 ptid_t ptid = *(ptid_t *) data;
1490 int lwp;
1491
1492 if (ptid_get_lwp (ptid) != 0)
1493 lwp = ptid_get_lwp (ptid);
1494 else
1495 lwp = ptid_get_pid (ptid);
1496
1497 if (ptid_get_lwp (entry->id) == lwp)
1498 return 1;
1499
1500 return 0;
1501 }
1502
1503 struct lwp_info *
1504 find_lwp_pid (ptid_t ptid)
1505 {
1506 struct inferior_list_entry *thread
1507 = find_inferior (&all_threads, same_lwp, &ptid);
1508
1509 if (thread == NULL)
1510 return NULL;
1511
1512 return get_thread_lwp ((struct thread_info *) thread);
1513 }
1514
1515 /* Return the number of known LWPs in the tgid given by PID. */
1516
1517 static int
1518 num_lwps (int pid)
1519 {
1520 struct inferior_list_entry *inf, *tmp;
1521 int count = 0;
1522
1523 ALL_INFERIORS (&all_threads, inf, tmp)
1524 {
1525 if (ptid_get_pid (inf->id) == pid)
1526 count++;
1527 }
1528
1529 return count;
1530 }
1531
1532 /* The arguments passed to iterate_over_lwps. */
1533
1534 struct iterate_over_lwps_args
1535 {
1536 /* The FILTER argument passed to iterate_over_lwps. */
1537 ptid_t filter;
1538
1539 /* The CALLBACK argument passed to iterate_over_lwps. */
1540 iterate_over_lwps_ftype *callback;
1541
1542 /* The DATA argument passed to iterate_over_lwps. */
1543 void *data;
1544 };
1545
1546 /* Callback for find_inferior used by iterate_over_lwps to filter
1547 calls to the callback supplied to that function. Returning a
1548 nonzero value causes find_inferiors to stop iterating and return
1549 the current inferior_list_entry. Returning zero indicates that
1550 find_inferiors should continue iterating. */
1551
1552 static int
1553 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1554 {
1555 struct iterate_over_lwps_args *args
1556 = (struct iterate_over_lwps_args *) args_p;
1557
1558 if (ptid_match (entry->id, args->filter))
1559 {
1560 struct thread_info *thr = (struct thread_info *) entry;
1561 struct lwp_info *lwp = get_thread_lwp (thr);
1562
1563 return (*args->callback) (lwp, args->data);
1564 }
1565
1566 return 0;
1567 }
1568
1569 /* See nat/linux-nat.h. */
1570
1571 struct lwp_info *
1572 iterate_over_lwps (ptid_t filter,
1573 iterate_over_lwps_ftype callback,
1574 void *data)
1575 {
1576 struct iterate_over_lwps_args args = {filter, callback, data};
1577 struct inferior_list_entry *entry;
1578
1579 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1580 if (entry == NULL)
1581 return NULL;
1582
1583 return get_thread_lwp ((struct thread_info *) entry);
1584 }
1585
1586 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1587 their exits until all other threads in the group have exited. */
1588
1589 static void
1590 check_zombie_leaders (void)
1591 {
1592 struct process_info *proc, *tmp;
1593
1594 ALL_PROCESSES (proc, tmp)
1595 {
1596 pid_t leader_pid = pid_of (proc);
1597 struct lwp_info *leader_lp;
1598
1599 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1600
1601 if (debug_threads)
1602 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1603 "num_lwps=%d, zombie=%d\n",
1604 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1605 linux_proc_pid_is_zombie (leader_pid));
1606
1607 if (leader_lp != NULL
1608 /* Check if there are other threads in the group, as we may
1609 have raced with the inferior simply exiting. */
1610 && !last_thread_of_process_p (leader_pid)
1611 && linux_proc_pid_is_zombie (leader_pid))
1612 {
1613 /* A leader zombie can mean one of two things:
1614
1615 - It exited, and there's an exit status pending
1616 available, or only the leader exited (not the whole
1617 program). In the latter case, we can't waitpid the
1618 leader's exit status until all other threads are gone.
1619
1620 - There are 3 or more threads in the group, and a thread
1621 other than the leader exec'd. On an exec, the Linux
1622 kernel destroys all other threads (except the execing
1623 one) in the thread group, and resets the execing thread's
1624 tid to the tgid. No exit notification is sent for the
1625 execing thread -- from the ptracer's perspective, it
1626 appears as though the execing thread just vanishes.
1627 Until we reap all other threads except the leader and the
1628 execing thread, the leader will be zombie, and the
1629 execing thread will be in `D (disc sleep)'. As soon as
1630 all other threads are reaped, the execing thread changes
1631 it's tid to the tgid, and the previous (zombie) leader
1632 vanishes, giving place to the "new" leader. We could try
1633 distinguishing the exit and exec cases, by waiting once
1634 more, and seeing if something comes out, but it doesn't
1635 sound useful. The previous leader _does_ go away, and
1636 we'll re-add the new one once we see the exec event
1637 (which is just the same as what would happen if the
1638 previous leader did exit voluntarily before some other
1639 thread execs). */
1640
1641 if (debug_threads)
1642 fprintf (stderr,
1643 "CZL: Thread group leader %d zombie "
1644 "(it exited, or another thread execd).\n",
1645 leader_pid);
1646
1647 delete_lwp (leader_lp);
1648 }
1649 }
1650 }
1651
1652 /* Callback for `find_inferior'. Returns the first LWP that is not
1653 stopped. ARG is a PTID filter. */
1654
1655 static int
1656 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1657 {
1658 struct thread_info *thr = (struct thread_info *) entry;
1659 struct lwp_info *lwp;
1660 ptid_t filter = *(ptid_t *) arg;
1661
1662 if (!ptid_match (ptid_of (thr), filter))
1663 return 0;
1664
1665 lwp = get_thread_lwp (thr);
1666 if (!lwp->stopped)
1667 return 1;
1668
1669 return 0;
1670 }
1671
1672 /* This function should only be called if the LWP got a SIGTRAP.
1673
1674 Handle any tracepoint steps or hits. Return true if a tracepoint
1675 event was handled, 0 otherwise. */
1676
1677 static int
1678 handle_tracepoints (struct lwp_info *lwp)
1679 {
1680 struct thread_info *tinfo = get_lwp_thread (lwp);
1681 int tpoint_related_event = 0;
1682
1683 gdb_assert (lwp->suspended == 0);
1684
1685 /* If this tracepoint hit causes a tracing stop, we'll immediately
1686 uninsert tracepoints. To do this, we temporarily pause all
1687 threads, unpatch away, and then unpause threads. We need to make
1688 sure the unpausing doesn't resume LWP too. */
1689 lwp->suspended++;
1690
1691 /* And we need to be sure that any all-threads-stopping doesn't try
1692 to move threads out of the jump pads, as it could deadlock the
1693 inferior (LWP could be in the jump pad, maybe even holding the
1694 lock.) */
1695
1696 /* Do any necessary step collect actions. */
1697 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1698
1699 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1700
1701 /* See if we just hit a tracepoint and do its main collect
1702 actions. */
1703 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1704
1705 lwp->suspended--;
1706
1707 gdb_assert (lwp->suspended == 0);
1708 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1709
1710 if (tpoint_related_event)
1711 {
1712 if (debug_threads)
1713 debug_printf ("got a tracepoint event\n");
1714 return 1;
1715 }
1716
1717 return 0;
1718 }
1719
1720 /* Convenience wrapper. Returns true if LWP is presently collecting a
1721 fast tracepoint. */
1722
1723 static int
1724 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1725 struct fast_tpoint_collect_status *status)
1726 {
1727 CORE_ADDR thread_area;
1728 struct thread_info *thread = get_lwp_thread (lwp);
1729
1730 if (the_low_target.get_thread_area == NULL)
1731 return 0;
1732
1733 /* Get the thread area address. This is used to recognize which
1734 thread is which when tracing with the in-process agent library.
1735 We don't read anything from the address, and treat it as opaque;
1736 it's the address itself that we assume is unique per-thread. */
1737 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1738 return 0;
1739
1740 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1741 }
1742
1743 /* The reason we resume in the caller, is because we want to be able
1744 to pass lwp->status_pending as WSTAT, and we need to clear
1745 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1746 refuses to resume. */
1747
1748 static int
1749 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1750 {
1751 struct thread_info *saved_thread;
1752
1753 saved_thread = current_thread;
1754 current_thread = get_lwp_thread (lwp);
1755
1756 if ((wstat == NULL
1757 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1758 && supports_fast_tracepoints ()
1759 && agent_loaded_p ())
1760 {
1761 struct fast_tpoint_collect_status status;
1762 int r;
1763
1764 if (debug_threads)
1765 debug_printf ("Checking whether LWP %ld needs to move out of the "
1766 "jump pad.\n",
1767 lwpid_of (current_thread));
1768
1769 r = linux_fast_tracepoint_collecting (lwp, &status);
1770
1771 if (wstat == NULL
1772 || (WSTOPSIG (*wstat) != SIGILL
1773 && WSTOPSIG (*wstat) != SIGFPE
1774 && WSTOPSIG (*wstat) != SIGSEGV
1775 && WSTOPSIG (*wstat) != SIGBUS))
1776 {
1777 lwp->collecting_fast_tracepoint = r;
1778
1779 if (r != 0)
1780 {
1781 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1782 {
1783 /* Haven't executed the original instruction yet.
1784 Set breakpoint there, and wait till it's hit,
1785 then single-step until exiting the jump pad. */
1786 lwp->exit_jump_pad_bkpt
1787 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1788 }
1789
1790 if (debug_threads)
1791 debug_printf ("Checking whether LWP %ld needs to move out of "
1792 "the jump pad...it does\n",
1793 lwpid_of (current_thread));
1794 current_thread = saved_thread;
1795
1796 return 1;
1797 }
1798 }
1799 else
1800 {
1801 /* If we get a synchronous signal while collecting, *and*
1802 while executing the (relocated) original instruction,
1803 reset the PC to point at the tpoint address, before
1804 reporting to GDB. Otherwise, it's an IPA lib bug: just
1805 report the signal to GDB, and pray for the best. */
1806
1807 lwp->collecting_fast_tracepoint = 0;
1808
1809 if (r != 0
1810 && (status.adjusted_insn_addr <= lwp->stop_pc
1811 && lwp->stop_pc < status.adjusted_insn_addr_end))
1812 {
1813 siginfo_t info;
1814 struct regcache *regcache;
1815
1816 /* The si_addr on a few signals references the address
1817 of the faulting instruction. Adjust that as
1818 well. */
1819 if ((WSTOPSIG (*wstat) == SIGILL
1820 || WSTOPSIG (*wstat) == SIGFPE
1821 || WSTOPSIG (*wstat) == SIGBUS
1822 || WSTOPSIG (*wstat) == SIGSEGV)
1823 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1824 (PTRACE_TYPE_ARG3) 0, &info) == 0
1825 /* Final check just to make sure we don't clobber
1826 the siginfo of non-kernel-sent signals. */
1827 && (uintptr_t) info.si_addr == lwp->stop_pc)
1828 {
1829 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1830 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1831 (PTRACE_TYPE_ARG3) 0, &info);
1832 }
1833
1834 regcache = get_thread_regcache (current_thread, 1);
1835 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1836 lwp->stop_pc = status.tpoint_addr;
1837
1838 /* Cancel any fast tracepoint lock this thread was
1839 holding. */
1840 force_unlock_trace_buffer ();
1841 }
1842
1843 if (lwp->exit_jump_pad_bkpt != NULL)
1844 {
1845 if (debug_threads)
1846 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1847 "stopping all threads momentarily.\n");
1848
1849 stop_all_lwps (1, lwp);
1850
1851 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1852 lwp->exit_jump_pad_bkpt = NULL;
1853
1854 unstop_all_lwps (1, lwp);
1855
1856 gdb_assert (lwp->suspended >= 0);
1857 }
1858 }
1859 }
1860
1861 if (debug_threads)
1862 debug_printf ("Checking whether LWP %ld needs to move out of the "
1863 "jump pad...no\n",
1864 lwpid_of (current_thread));
1865
1866 current_thread = saved_thread;
1867 return 0;
1868 }
1869
1870 /* Enqueue one signal in the "signals to report later when out of the
1871 jump pad" list. */
1872
1873 static void
1874 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1875 {
1876 struct pending_signals *p_sig;
1877 struct thread_info *thread = get_lwp_thread (lwp);
1878
1879 if (debug_threads)
1880 debug_printf ("Deferring signal %d for LWP %ld.\n",
1881 WSTOPSIG (*wstat), lwpid_of (thread));
1882
1883 if (debug_threads)
1884 {
1885 struct pending_signals *sig;
1886
1887 for (sig = lwp->pending_signals_to_report;
1888 sig != NULL;
1889 sig = sig->prev)
1890 debug_printf (" Already queued %d\n",
1891 sig->signal);
1892
1893 debug_printf (" (no more currently queued signals)\n");
1894 }
1895
1896 /* Don't enqueue non-RT signals if they are already in the deferred
1897 queue. (SIGSTOP being the easiest signal to see ending up here
1898 twice) */
1899 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1900 {
1901 struct pending_signals *sig;
1902
1903 for (sig = lwp->pending_signals_to_report;
1904 sig != NULL;
1905 sig = sig->prev)
1906 {
1907 if (sig->signal == WSTOPSIG (*wstat))
1908 {
1909 if (debug_threads)
1910 debug_printf ("Not requeuing already queued non-RT signal %d"
1911 " for LWP %ld\n",
1912 sig->signal,
1913 lwpid_of (thread));
1914 return;
1915 }
1916 }
1917 }
1918
1919 p_sig = xmalloc (sizeof (*p_sig));
1920 p_sig->prev = lwp->pending_signals_to_report;
1921 p_sig->signal = WSTOPSIG (*wstat);
1922 memset (&p_sig->info, 0, sizeof (siginfo_t));
1923 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1924 &p_sig->info);
1925
1926 lwp->pending_signals_to_report = p_sig;
1927 }
1928
1929 /* Dequeue one signal from the "signals to report later when out of
1930 the jump pad" list. */
1931
1932 static int
1933 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1934 {
1935 struct thread_info *thread = get_lwp_thread (lwp);
1936
1937 if (lwp->pending_signals_to_report != NULL)
1938 {
1939 struct pending_signals **p_sig;
1940
1941 p_sig = &lwp->pending_signals_to_report;
1942 while ((*p_sig)->prev != NULL)
1943 p_sig = &(*p_sig)->prev;
1944
1945 *wstat = W_STOPCODE ((*p_sig)->signal);
1946 if ((*p_sig)->info.si_signo != 0)
1947 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1948 &(*p_sig)->info);
1949 free (*p_sig);
1950 *p_sig = NULL;
1951
1952 if (debug_threads)
1953 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1954 WSTOPSIG (*wstat), lwpid_of (thread));
1955
1956 if (debug_threads)
1957 {
1958 struct pending_signals *sig;
1959
1960 for (sig = lwp->pending_signals_to_report;
1961 sig != NULL;
1962 sig = sig->prev)
1963 debug_printf (" Still queued %d\n",
1964 sig->signal);
1965
1966 debug_printf (" (no more queued signals)\n");
1967 }
1968
1969 return 1;
1970 }
1971
1972 return 0;
1973 }
1974
1975 /* Fetch the possibly triggered data watchpoint info and store it in
1976 CHILD.
1977
1978 On some archs, like x86, that use debug registers to set
1979 watchpoints, it's possible that the way to know which watched
1980 address trapped, is to check the register that is used to select
1981 which address to watch. Problem is, between setting the watchpoint
1982 and reading back which data address trapped, the user may change
1983 the set of watchpoints, and, as a consequence, GDB changes the
1984 debug registers in the inferior. To avoid reading back a stale
1985 stopped-data-address when that happens, we cache in LP the fact
1986 that a watchpoint trapped, and the corresponding data address, as
1987 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1988 registers meanwhile, we have the cached data we can rely on. */
1989
1990 static int
1991 check_stopped_by_watchpoint (struct lwp_info *child)
1992 {
1993 if (the_low_target.stopped_by_watchpoint != NULL)
1994 {
1995 struct thread_info *saved_thread;
1996
1997 saved_thread = current_thread;
1998 current_thread = get_lwp_thread (child);
1999
2000 if (the_low_target.stopped_by_watchpoint ())
2001 {
2002 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2003
2004 if (the_low_target.stopped_data_address != NULL)
2005 child->stopped_data_address
2006 = the_low_target.stopped_data_address ();
2007 else
2008 child->stopped_data_address = 0;
2009 }
2010
2011 current_thread = saved_thread;
2012 }
2013
2014 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2015 }
2016
2017 /* Return the ptrace options that we want to try to enable. */
2018
2019 static int
2020 linux_low_ptrace_options (int attached)
2021 {
2022 int options = 0;
2023
2024 if (!attached)
2025 options |= PTRACE_O_EXITKILL;
2026
2027 if (report_fork_events)
2028 options |= PTRACE_O_TRACEFORK;
2029
2030 if (report_vfork_events)
2031 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2032
2033 return options;
2034 }
2035
2036 /* Do low-level handling of the event, and check if we should go on
2037 and pass it to caller code. Return the affected lwp if we are, or
2038 NULL otherwise. */
2039
2040 static struct lwp_info *
2041 linux_low_filter_event (int lwpid, int wstat)
2042 {
2043 struct lwp_info *child;
2044 struct thread_info *thread;
2045 int have_stop_pc = 0;
2046
2047 child = find_lwp_pid (pid_to_ptid (lwpid));
2048
2049 /* If we didn't find a process, one of two things presumably happened:
2050 - A process we started and then detached from has exited. Ignore it.
2051 - A process we are controlling has forked and the new child's stop
2052 was reported to us by the kernel. Save its PID. */
2053 if (child == NULL && WIFSTOPPED (wstat))
2054 {
2055 add_to_pid_list (&stopped_pids, lwpid, wstat);
2056 return NULL;
2057 }
2058 else if (child == NULL)
2059 return NULL;
2060
2061 thread = get_lwp_thread (child);
2062
2063 child->stopped = 1;
2064
2065 child->last_status = wstat;
2066
2067 /* Check if the thread has exited. */
2068 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2069 {
2070 if (debug_threads)
2071 debug_printf ("LLFE: %d exited.\n", lwpid);
2072 if (num_lwps (pid_of (thread)) > 1)
2073 {
2074
2075 /* If there is at least one more LWP, then the exit signal was
2076 not the end of the debugged application and should be
2077 ignored. */
2078 delete_lwp (child);
2079 return NULL;
2080 }
2081 else
2082 {
2083 /* This was the last lwp in the process. Since events are
2084 serialized to GDB core, and we can't report this one
2085 right now, but GDB core and the other target layers will
2086 want to be notified about the exit code/signal, leave the
2087 status pending for the next time we're able to report
2088 it. */
2089 mark_lwp_dead (child, wstat);
2090 return child;
2091 }
2092 }
2093
2094 gdb_assert (WIFSTOPPED (wstat));
2095
2096 if (WIFSTOPPED (wstat))
2097 {
2098 struct process_info *proc;
2099
2100 /* Architecture-specific setup after inferior is running. This
2101 needs to happen after we have attached to the inferior and it
2102 is stopped for the first time, but before we access any
2103 inferior registers. */
2104 proc = find_process_pid (pid_of (thread));
2105 if (proc->priv->new_inferior)
2106 {
2107 struct thread_info *saved_thread;
2108
2109 saved_thread = current_thread;
2110 current_thread = thread;
2111
2112 the_low_target.arch_setup ();
2113
2114 current_thread = saved_thread;
2115
2116 proc->priv->new_inferior = 0;
2117 }
2118 }
2119
2120 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2121 {
2122 struct process_info *proc = find_process_pid (pid_of (thread));
2123 int options = linux_low_ptrace_options (proc->attached);
2124
2125 linux_enable_event_reporting (lwpid, options);
2126 child->must_set_ptrace_flags = 0;
2127 }
2128
2129 /* Be careful to not overwrite stop_pc until
2130 check_stopped_by_breakpoint is called. */
2131 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2132 && linux_is_extended_waitstatus (wstat))
2133 {
2134 child->stop_pc = get_pc (child);
2135 if (handle_extended_wait (child, wstat))
2136 {
2137 /* The event has been handled, so just return without
2138 reporting it. */
2139 return NULL;
2140 }
2141 }
2142
2143 /* Check first whether this was a SW/HW breakpoint before checking
2144 watchpoints, because at least s390 can't tell the data address of
2145 hardware watchpoint hits, and returns stopped-by-watchpoint as
2146 long as there's a watchpoint set. */
2147 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2148 {
2149 if (check_stopped_by_breakpoint (child))
2150 have_stop_pc = 1;
2151 }
2152
2153 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2154 or hardware watchpoint. Check which is which if we got
2155 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2156 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2157 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2158 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2159 check_stopped_by_watchpoint (child);
2160
2161 if (!have_stop_pc)
2162 child->stop_pc = get_pc (child);
2163
2164 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2165 && child->stop_expected)
2166 {
2167 if (debug_threads)
2168 debug_printf ("Expected stop.\n");
2169 child->stop_expected = 0;
2170
2171 if (thread->last_resume_kind == resume_stop)
2172 {
2173 /* We want to report the stop to the core. Treat the
2174 SIGSTOP as a normal event. */
2175 if (debug_threads)
2176 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2177 target_pid_to_str (ptid_of (thread)));
2178 }
2179 else if (stopping_threads != NOT_STOPPING_THREADS)
2180 {
2181 /* Stopping threads. We don't want this SIGSTOP to end up
2182 pending. */
2183 if (debug_threads)
2184 debug_printf ("LLW: SIGSTOP caught for %s "
2185 "while stopping threads.\n",
2186 target_pid_to_str (ptid_of (thread)));
2187 return NULL;
2188 }
2189 else
2190 {
2191 /* This is a delayed SIGSTOP. Filter out the event. */
2192 if (debug_threads)
2193 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2194 child->stepping ? "step" : "continue",
2195 target_pid_to_str (ptid_of (thread)));
2196
2197 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2198 return NULL;
2199 }
2200 }
2201
2202 child->status_pending_p = 1;
2203 child->status_pending = wstat;
2204 return child;
2205 }
2206
2207 /* Resume LWPs that are currently stopped without any pending status
2208 to report, but are resumed from the core's perspective. */
2209
2210 static void
2211 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2212 {
2213 struct thread_info *thread = (struct thread_info *) entry;
2214 struct lwp_info *lp = get_thread_lwp (thread);
2215
2216 if (lp->stopped
2217 && !lp->status_pending_p
2218 && thread->last_resume_kind != resume_stop
2219 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2220 {
2221 int step = thread->last_resume_kind == resume_step;
2222
2223 if (debug_threads)
2224 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2225 target_pid_to_str (ptid_of (thread)),
2226 paddress (lp->stop_pc),
2227 step);
2228
2229 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2230 }
2231 }
2232
2233 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2234 match FILTER_PTID (leaving others pending). The PTIDs can be:
2235 minus_one_ptid, to specify any child; a pid PTID, specifying all
2236 lwps of a thread group; or a PTID representing a single lwp. Store
2237 the stop status through the status pointer WSTAT. OPTIONS is
2238 passed to the waitpid call. Return 0 if no event was found and
2239 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2240 was found. Return the PID of the stopped child otherwise. */
2241
2242 static int
2243 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2244 int *wstatp, int options)
2245 {
2246 struct thread_info *event_thread;
2247 struct lwp_info *event_child, *requested_child;
2248 sigset_t block_mask, prev_mask;
2249
2250 retry:
2251 /* N.B. event_thread points to the thread_info struct that contains
2252 event_child. Keep them in sync. */
2253 event_thread = NULL;
2254 event_child = NULL;
2255 requested_child = NULL;
2256
2257 /* Check for a lwp with a pending status. */
2258
2259 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2260 {
2261 event_thread = (struct thread_info *)
2262 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2263 if (event_thread != NULL)
2264 event_child = get_thread_lwp (event_thread);
2265 if (debug_threads && event_thread)
2266 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2267 }
2268 else if (!ptid_equal (filter_ptid, null_ptid))
2269 {
2270 requested_child = find_lwp_pid (filter_ptid);
2271
2272 if (stopping_threads == NOT_STOPPING_THREADS
2273 && requested_child->status_pending_p
2274 && requested_child->collecting_fast_tracepoint)
2275 {
2276 enqueue_one_deferred_signal (requested_child,
2277 &requested_child->status_pending);
2278 requested_child->status_pending_p = 0;
2279 requested_child->status_pending = 0;
2280 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2281 }
2282
2283 if (requested_child->suspended
2284 && requested_child->status_pending_p)
2285 {
2286 internal_error (__FILE__, __LINE__,
2287 "requesting an event out of a"
2288 " suspended child?");
2289 }
2290
2291 if (requested_child->status_pending_p)
2292 {
2293 event_child = requested_child;
2294 event_thread = get_lwp_thread (event_child);
2295 }
2296 }
2297
2298 if (event_child != NULL)
2299 {
2300 if (debug_threads)
2301 debug_printf ("Got an event from pending child %ld (%04x)\n",
2302 lwpid_of (event_thread), event_child->status_pending);
2303 *wstatp = event_child->status_pending;
2304 event_child->status_pending_p = 0;
2305 event_child->status_pending = 0;
2306 current_thread = event_thread;
2307 return lwpid_of (event_thread);
2308 }
2309
2310 /* But if we don't find a pending event, we'll have to wait.
2311
2312 We only enter this loop if no process has a pending wait status.
2313 Thus any action taken in response to a wait status inside this
2314 loop is responding as soon as we detect the status, not after any
2315 pending events. */
2316
2317 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2318 all signals while here. */
2319 sigfillset (&block_mask);
2320 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2321
2322 /* Always pull all events out of the kernel. We'll randomly select
2323 an event LWP out of all that have events, to prevent
2324 starvation. */
2325 while (event_child == NULL)
2326 {
2327 pid_t ret = 0;
2328
2329 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2330 quirks:
2331
2332 - If the thread group leader exits while other threads in the
2333 thread group still exist, waitpid(TGID, ...) hangs. That
2334 waitpid won't return an exit status until the other threads
2335 in the group are reaped.
2336
2337 - When a non-leader thread execs, that thread just vanishes
2338 without reporting an exit (so we'd hang if we waited for it
2339 explicitly in that case). The exec event is reported to
2340 the TGID pid (although we don't currently enable exec
2341 events). */
2342 errno = 0;
2343 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2344
2345 if (debug_threads)
2346 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2347 ret, errno ? strerror (errno) : "ERRNO-OK");
2348
2349 if (ret > 0)
2350 {
2351 if (debug_threads)
2352 {
2353 debug_printf ("LLW: waitpid %ld received %s\n",
2354 (long) ret, status_to_str (*wstatp));
2355 }
2356
2357 /* Filter all events. IOW, leave all events pending. We'll
2358 randomly select an event LWP out of all that have events
2359 below. */
2360 linux_low_filter_event (ret, *wstatp);
2361 /* Retry until nothing comes out of waitpid. A single
2362 SIGCHLD can indicate more than one child stopped. */
2363 continue;
2364 }
2365
2366 /* Now that we've pulled all events out of the kernel, resume
2367 LWPs that don't have an interesting event to report. */
2368 if (stopping_threads == NOT_STOPPING_THREADS)
2369 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2370
2371 /* ... and find an LWP with a status to report to the core, if
2372 any. */
2373 event_thread = (struct thread_info *)
2374 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2375 if (event_thread != NULL)
2376 {
2377 event_child = get_thread_lwp (event_thread);
2378 *wstatp = event_child->status_pending;
2379 event_child->status_pending_p = 0;
2380 event_child->status_pending = 0;
2381 break;
2382 }
2383
2384 /* Check for zombie thread group leaders. Those can't be reaped
2385 until all other threads in the thread group are. */
2386 check_zombie_leaders ();
2387
2388 /* If there are no resumed children left in the set of LWPs we
2389 want to wait for, bail. We can't just block in
2390 waitpid/sigsuspend, because lwps might have been left stopped
2391 in trace-stop state, and we'd be stuck forever waiting for
2392 their status to change (which would only happen if we resumed
2393 them). Even if WNOHANG is set, this return code is preferred
2394 over 0 (below), as it is more detailed. */
2395 if ((find_inferior (&all_threads,
2396 not_stopped_callback,
2397 &wait_ptid) == NULL))
2398 {
2399 if (debug_threads)
2400 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2401 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2402 return -1;
2403 }
2404
2405 /* No interesting event to report to the caller. */
2406 if ((options & WNOHANG))
2407 {
2408 if (debug_threads)
2409 debug_printf ("WNOHANG set, no event found\n");
2410
2411 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2412 return 0;
2413 }
2414
2415 /* Block until we get an event reported with SIGCHLD. */
2416 if (debug_threads)
2417 debug_printf ("sigsuspend'ing\n");
2418
2419 sigsuspend (&prev_mask);
2420 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2421 goto retry;
2422 }
2423
2424 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2425
2426 current_thread = event_thread;
2427
2428 /* Check for thread exit. */
2429 if (! WIFSTOPPED (*wstatp))
2430 {
2431 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2432
2433 if (debug_threads)
2434 debug_printf ("LWP %d is the last lwp of process. "
2435 "Process %ld exiting.\n",
2436 pid_of (event_thread), lwpid_of (event_thread));
2437 return lwpid_of (event_thread);
2438 }
2439
2440 return lwpid_of (event_thread);
2441 }
2442
2443 /* Wait for an event from child(ren) PTID. PTIDs can be:
2444 minus_one_ptid, to specify any child; a pid PTID, specifying all
2445 lwps of a thread group; or a PTID representing a single lwp. Store
2446 the stop status through the status pointer WSTAT. OPTIONS is
2447 passed to the waitpid call. Return 0 if no event was found and
2448 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2449 was found. Return the PID of the stopped child otherwise. */
2450
2451 static int
2452 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2453 {
2454 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2455 }
2456
2457 /* Count the LWP's that have had events. */
2458
2459 static int
2460 count_events_callback (struct inferior_list_entry *entry, void *data)
2461 {
2462 struct thread_info *thread = (struct thread_info *) entry;
2463 struct lwp_info *lp = get_thread_lwp (thread);
2464 int *count = data;
2465
2466 gdb_assert (count != NULL);
2467
2468 /* Count only resumed LWPs that have an event pending. */
2469 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2470 && lp->status_pending_p)
2471 (*count)++;
2472
2473 return 0;
2474 }
2475
2476 /* Select the LWP (if any) that is currently being single-stepped. */
2477
2478 static int
2479 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2480 {
2481 struct thread_info *thread = (struct thread_info *) entry;
2482 struct lwp_info *lp = get_thread_lwp (thread);
2483
2484 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2485 && thread->last_resume_kind == resume_step
2486 && lp->status_pending_p)
2487 return 1;
2488 else
2489 return 0;
2490 }
2491
2492 /* Select the Nth LWP that has had an event. */
2493
2494 static int
2495 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2496 {
2497 struct thread_info *thread = (struct thread_info *) entry;
2498 struct lwp_info *lp = get_thread_lwp (thread);
2499 int *selector = data;
2500
2501 gdb_assert (selector != NULL);
2502
2503 /* Select only resumed LWPs that have an event pending. */
2504 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2505 && lp->status_pending_p)
2506 if ((*selector)-- == 0)
2507 return 1;
2508
2509 return 0;
2510 }
2511
2512 /* Select one LWP out of those that have events pending. */
2513
2514 static void
2515 select_event_lwp (struct lwp_info **orig_lp)
2516 {
2517 int num_events = 0;
2518 int random_selector;
2519 struct thread_info *event_thread = NULL;
2520
2521 /* In all-stop, give preference to the LWP that is being
2522 single-stepped. There will be at most one, and it's the LWP that
2523 the core is most interested in. If we didn't do this, then we'd
2524 have to handle pending step SIGTRAPs somehow in case the core
2525 later continues the previously-stepped thread, otherwise we'd
2526 report the pending SIGTRAP, and the core, not having stepped the
2527 thread, wouldn't understand what the trap was for, and therefore
2528 would report it to the user as a random signal. */
2529 if (!non_stop)
2530 {
2531 event_thread
2532 = (struct thread_info *) find_inferior (&all_threads,
2533 select_singlestep_lwp_callback,
2534 NULL);
2535 if (event_thread != NULL)
2536 {
2537 if (debug_threads)
2538 debug_printf ("SEL: Select single-step %s\n",
2539 target_pid_to_str (ptid_of (event_thread)));
2540 }
2541 }
2542 if (event_thread == NULL)
2543 {
2544 /* No single-stepping LWP. Select one at random, out of those
2545 which have had events. */
2546
2547 /* First see how many events we have. */
2548 find_inferior (&all_threads, count_events_callback, &num_events);
2549 gdb_assert (num_events > 0);
2550
2551 /* Now randomly pick a LWP out of those that have had
2552 events. */
2553 random_selector = (int)
2554 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2555
2556 if (debug_threads && num_events > 1)
2557 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2558 num_events, random_selector);
2559
2560 event_thread
2561 = (struct thread_info *) find_inferior (&all_threads,
2562 select_event_lwp_callback,
2563 &random_selector);
2564 }
2565
2566 if (event_thread != NULL)
2567 {
2568 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2569
2570 /* Switch the event LWP. */
2571 *orig_lp = event_lp;
2572 }
2573 }
2574
2575 /* Decrement the suspend count of an LWP. */
2576
2577 static int
2578 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2579 {
2580 struct thread_info *thread = (struct thread_info *) entry;
2581 struct lwp_info *lwp = get_thread_lwp (thread);
2582
2583 /* Ignore EXCEPT. */
2584 if (lwp == except)
2585 return 0;
2586
2587 lwp->suspended--;
2588
2589 gdb_assert (lwp->suspended >= 0);
2590 return 0;
2591 }
2592
2593 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2594 NULL. */
2595
2596 static void
2597 unsuspend_all_lwps (struct lwp_info *except)
2598 {
2599 find_inferior (&all_threads, unsuspend_one_lwp, except);
2600 }
2601
2602 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2603 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2604 void *data);
2605 static int lwp_running (struct inferior_list_entry *entry, void *data);
2606 static ptid_t linux_wait_1 (ptid_t ptid,
2607 struct target_waitstatus *ourstatus,
2608 int target_options);
2609
2610 /* Stabilize threads (move out of jump pads).
2611
2612 If a thread is midway collecting a fast tracepoint, we need to
2613 finish the collection and move it out of the jump pad before
2614 reporting the signal.
2615
2616 This avoids recursion while collecting (when a signal arrives
2617 midway, and the signal handler itself collects), which would trash
2618 the trace buffer. In case the user set a breakpoint in a signal
2619 handler, this avoids the backtrace showing the jump pad, etc..
2620 Most importantly, there are certain things we can't do safely if
2621 threads are stopped in a jump pad (or in its callee's). For
2622 example:
2623
2624 - starting a new trace run. A thread still collecting the
2625 previous run, could trash the trace buffer when resumed. The trace
2626 buffer control structures would have been reset but the thread had
2627 no way to tell. The thread could even midway memcpy'ing to the
2628 buffer, which would mean that when resumed, it would clobber the
2629 trace buffer that had been set for a new run.
2630
2631 - we can't rewrite/reuse the jump pads for new tracepoints
2632 safely. Say you do tstart while a thread is stopped midway while
2633 collecting. When the thread is later resumed, it finishes the
2634 collection, and returns to the jump pad, to execute the original
2635 instruction that was under the tracepoint jump at the time the
2636 older run had been started. If the jump pad had been rewritten
2637 since for something else in the new run, the thread would now
2638 execute the wrong / random instructions. */
2639
2640 static void
2641 linux_stabilize_threads (void)
2642 {
2643 struct thread_info *saved_thread;
2644 struct thread_info *thread_stuck;
2645
2646 thread_stuck
2647 = (struct thread_info *) find_inferior (&all_threads,
2648 stuck_in_jump_pad_callback,
2649 NULL);
2650 if (thread_stuck != NULL)
2651 {
2652 if (debug_threads)
2653 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2654 lwpid_of (thread_stuck));
2655 return;
2656 }
2657
2658 saved_thread = current_thread;
2659
2660 stabilizing_threads = 1;
2661
2662 /* Kick 'em all. */
2663 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2664
2665 /* Loop until all are stopped out of the jump pads. */
2666 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2667 {
2668 struct target_waitstatus ourstatus;
2669 struct lwp_info *lwp;
2670 int wstat;
2671
2672 /* Note that we go through the full wait even loop. While
2673 moving threads out of jump pad, we need to be able to step
2674 over internal breakpoints and such. */
2675 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2676
2677 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2678 {
2679 lwp = get_thread_lwp (current_thread);
2680
2681 /* Lock it. */
2682 lwp->suspended++;
2683
2684 if (ourstatus.value.sig != GDB_SIGNAL_0
2685 || current_thread->last_resume_kind == resume_stop)
2686 {
2687 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2688 enqueue_one_deferred_signal (lwp, &wstat);
2689 }
2690 }
2691 }
2692
2693 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2694
2695 stabilizing_threads = 0;
2696
2697 current_thread = saved_thread;
2698
2699 if (debug_threads)
2700 {
2701 thread_stuck
2702 = (struct thread_info *) find_inferior (&all_threads,
2703 stuck_in_jump_pad_callback,
2704 NULL);
2705 if (thread_stuck != NULL)
2706 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2707 lwpid_of (thread_stuck));
2708 }
2709 }
2710
2711 static void async_file_mark (void);
2712
2713 /* Convenience function that is called when the kernel reports an
2714 event that is not passed out to GDB. */
2715
2716 static ptid_t
2717 ignore_event (struct target_waitstatus *ourstatus)
2718 {
2719 /* If we got an event, there may still be others, as a single
2720 SIGCHLD can indicate more than one child stopped. This forces
2721 another target_wait call. */
2722 async_file_mark ();
2723
2724 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2725 return null_ptid;
2726 }
2727
2728 /* Return non-zero if WAITSTATUS reflects an extended linux
2729 event. Otherwise, return zero. */
2730
2731 static int
2732 extended_event_reported (const struct target_waitstatus *waitstatus)
2733 {
2734 if (waitstatus == NULL)
2735 return 0;
2736
2737 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2738 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2739 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2740 }
2741
2742 /* Wait for process, returns status. */
2743
2744 static ptid_t
2745 linux_wait_1 (ptid_t ptid,
2746 struct target_waitstatus *ourstatus, int target_options)
2747 {
2748 int w;
2749 struct lwp_info *event_child;
2750 int options;
2751 int pid;
2752 int step_over_finished;
2753 int bp_explains_trap;
2754 int maybe_internal_trap;
2755 int report_to_gdb;
2756 int trace_event;
2757 int in_step_range;
2758
2759 if (debug_threads)
2760 {
2761 debug_enter ();
2762 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2763 }
2764
2765 /* Translate generic target options into linux options. */
2766 options = __WALL;
2767 if (target_options & TARGET_WNOHANG)
2768 options |= WNOHANG;
2769
2770 bp_explains_trap = 0;
2771 trace_event = 0;
2772 in_step_range = 0;
2773 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2774
2775 if (ptid_equal (step_over_bkpt, null_ptid))
2776 pid = linux_wait_for_event (ptid, &w, options);
2777 else
2778 {
2779 if (debug_threads)
2780 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2781 target_pid_to_str (step_over_bkpt));
2782 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2783 }
2784
2785 if (pid == 0)
2786 {
2787 gdb_assert (target_options & TARGET_WNOHANG);
2788
2789 if (debug_threads)
2790 {
2791 debug_printf ("linux_wait_1 ret = null_ptid, "
2792 "TARGET_WAITKIND_IGNORE\n");
2793 debug_exit ();
2794 }
2795
2796 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2797 return null_ptid;
2798 }
2799 else if (pid == -1)
2800 {
2801 if (debug_threads)
2802 {
2803 debug_printf ("linux_wait_1 ret = null_ptid, "
2804 "TARGET_WAITKIND_NO_RESUMED\n");
2805 debug_exit ();
2806 }
2807
2808 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2809 return null_ptid;
2810 }
2811
2812 event_child = get_thread_lwp (current_thread);
2813
2814 /* linux_wait_for_event only returns an exit status for the last
2815 child of a process. Report it. */
2816 if (WIFEXITED (w) || WIFSIGNALED (w))
2817 {
2818 if (WIFEXITED (w))
2819 {
2820 ourstatus->kind = TARGET_WAITKIND_EXITED;
2821 ourstatus->value.integer = WEXITSTATUS (w);
2822
2823 if (debug_threads)
2824 {
2825 debug_printf ("linux_wait_1 ret = %s, exited with "
2826 "retcode %d\n",
2827 target_pid_to_str (ptid_of (current_thread)),
2828 WEXITSTATUS (w));
2829 debug_exit ();
2830 }
2831 }
2832 else
2833 {
2834 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2835 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2836
2837 if (debug_threads)
2838 {
2839 debug_printf ("linux_wait_1 ret = %s, terminated with "
2840 "signal %d\n",
2841 target_pid_to_str (ptid_of (current_thread)),
2842 WTERMSIG (w));
2843 debug_exit ();
2844 }
2845 }
2846
2847 return ptid_of (current_thread);
2848 }
2849
2850 /* If step-over executes a breakpoint instruction, it means a
2851 gdb/gdbserver breakpoint had been planted on top of a permanent
2852 breakpoint. The PC has been adjusted by
2853 check_stopped_by_breakpoint to point at the breakpoint address.
2854 Advance the PC manually past the breakpoint, otherwise the
2855 program would keep trapping the permanent breakpoint forever. */
2856 if (!ptid_equal (step_over_bkpt, null_ptid)
2857 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2858 {
2859 unsigned int increment_pc = the_low_target.breakpoint_len;
2860
2861 if (debug_threads)
2862 {
2863 debug_printf ("step-over for %s executed software breakpoint\n",
2864 target_pid_to_str (ptid_of (current_thread)));
2865 }
2866
2867 if (increment_pc != 0)
2868 {
2869 struct regcache *regcache
2870 = get_thread_regcache (current_thread, 1);
2871
2872 event_child->stop_pc += increment_pc;
2873 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2874
2875 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2876 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2877 }
2878 }
2879
2880 /* If this event was not handled before, and is not a SIGTRAP, we
2881 report it. SIGILL and SIGSEGV are also treated as traps in case
2882 a breakpoint is inserted at the current PC. If this target does
2883 not support internal breakpoints at all, we also report the
2884 SIGTRAP without further processing; it's of no concern to us. */
2885 maybe_internal_trap
2886 = (supports_breakpoints ()
2887 && (WSTOPSIG (w) == SIGTRAP
2888 || ((WSTOPSIG (w) == SIGILL
2889 || WSTOPSIG (w) == SIGSEGV)
2890 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2891
2892 if (maybe_internal_trap)
2893 {
2894 /* Handle anything that requires bookkeeping before deciding to
2895 report the event or continue waiting. */
2896
2897 /* First check if we can explain the SIGTRAP with an internal
2898 breakpoint, or if we should possibly report the event to GDB.
2899 Do this before anything that may remove or insert a
2900 breakpoint. */
2901 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2902
2903 /* We have a SIGTRAP, possibly a step-over dance has just
2904 finished. If so, tweak the state machine accordingly,
2905 reinsert breakpoints and delete any reinsert (software
2906 single-step) breakpoints. */
2907 step_over_finished = finish_step_over (event_child);
2908
2909 /* Now invoke the callbacks of any internal breakpoints there. */
2910 check_breakpoints (event_child->stop_pc);
2911
2912 /* Handle tracepoint data collecting. This may overflow the
2913 trace buffer, and cause a tracing stop, removing
2914 breakpoints. */
2915 trace_event = handle_tracepoints (event_child);
2916
2917 if (bp_explains_trap)
2918 {
2919 /* If we stepped or ran into an internal breakpoint, we've
2920 already handled it. So next time we resume (from this
2921 PC), we should step over it. */
2922 if (debug_threads)
2923 debug_printf ("Hit a gdbserver breakpoint.\n");
2924
2925 if (breakpoint_here (event_child->stop_pc))
2926 event_child->need_step_over = 1;
2927 }
2928 }
2929 else
2930 {
2931 /* We have some other signal, possibly a step-over dance was in
2932 progress, and it should be cancelled too. */
2933 step_over_finished = finish_step_over (event_child);
2934 }
2935
2936 /* We have all the data we need. Either report the event to GDB, or
2937 resume threads and keep waiting for more. */
2938
2939 /* If we're collecting a fast tracepoint, finish the collection and
2940 move out of the jump pad before delivering a signal. See
2941 linux_stabilize_threads. */
2942
2943 if (WIFSTOPPED (w)
2944 && WSTOPSIG (w) != SIGTRAP
2945 && supports_fast_tracepoints ()
2946 && agent_loaded_p ())
2947 {
2948 if (debug_threads)
2949 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2950 "to defer or adjust it.\n",
2951 WSTOPSIG (w), lwpid_of (current_thread));
2952
2953 /* Allow debugging the jump pad itself. */
2954 if (current_thread->last_resume_kind != resume_step
2955 && maybe_move_out_of_jump_pad (event_child, &w))
2956 {
2957 enqueue_one_deferred_signal (event_child, &w);
2958
2959 if (debug_threads)
2960 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2961 WSTOPSIG (w), lwpid_of (current_thread));
2962
2963 linux_resume_one_lwp (event_child, 0, 0, NULL);
2964
2965 return ignore_event (ourstatus);
2966 }
2967 }
2968
2969 if (event_child->collecting_fast_tracepoint)
2970 {
2971 if (debug_threads)
2972 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2973 "Check if we're already there.\n",
2974 lwpid_of (current_thread),
2975 event_child->collecting_fast_tracepoint);
2976
2977 trace_event = 1;
2978
2979 event_child->collecting_fast_tracepoint
2980 = linux_fast_tracepoint_collecting (event_child, NULL);
2981
2982 if (event_child->collecting_fast_tracepoint != 1)
2983 {
2984 /* No longer need this breakpoint. */
2985 if (event_child->exit_jump_pad_bkpt != NULL)
2986 {
2987 if (debug_threads)
2988 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2989 "stopping all threads momentarily.\n");
2990
2991 /* Other running threads could hit this breakpoint.
2992 We don't handle moribund locations like GDB does,
2993 instead we always pause all threads when removing
2994 breakpoints, so that any step-over or
2995 decr_pc_after_break adjustment is always taken
2996 care of while the breakpoint is still
2997 inserted. */
2998 stop_all_lwps (1, event_child);
2999
3000 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3001 event_child->exit_jump_pad_bkpt = NULL;
3002
3003 unstop_all_lwps (1, event_child);
3004
3005 gdb_assert (event_child->suspended >= 0);
3006 }
3007 }
3008
3009 if (event_child->collecting_fast_tracepoint == 0)
3010 {
3011 if (debug_threads)
3012 debug_printf ("fast tracepoint finished "
3013 "collecting successfully.\n");
3014
3015 /* We may have a deferred signal to report. */
3016 if (dequeue_one_deferred_signal (event_child, &w))
3017 {
3018 if (debug_threads)
3019 debug_printf ("dequeued one signal.\n");
3020 }
3021 else
3022 {
3023 if (debug_threads)
3024 debug_printf ("no deferred signals.\n");
3025
3026 if (stabilizing_threads)
3027 {
3028 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3029 ourstatus->value.sig = GDB_SIGNAL_0;
3030
3031 if (debug_threads)
3032 {
3033 debug_printf ("linux_wait_1 ret = %s, stopped "
3034 "while stabilizing threads\n",
3035 target_pid_to_str (ptid_of (current_thread)));
3036 debug_exit ();
3037 }
3038
3039 return ptid_of (current_thread);
3040 }
3041 }
3042 }
3043 }
3044
3045 /* Check whether GDB would be interested in this event. */
3046
3047 /* If GDB is not interested in this signal, don't stop other
3048 threads, and don't report it to GDB. Just resume the inferior
3049 right away. We do this for threading-related signals as well as
3050 any that GDB specifically requested we ignore. But never ignore
3051 SIGSTOP if we sent it ourselves, and do not ignore signals when
3052 stepping - they may require special handling to skip the signal
3053 handler. Also never ignore signals that could be caused by a
3054 breakpoint. */
3055 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3056 thread library? */
3057 if (WIFSTOPPED (w)
3058 && current_thread->last_resume_kind != resume_step
3059 && (
3060 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3061 (current_process ()->priv->thread_db != NULL
3062 && (WSTOPSIG (w) == __SIGRTMIN
3063 || WSTOPSIG (w) == __SIGRTMIN + 1))
3064 ||
3065 #endif
3066 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3067 && !(WSTOPSIG (w) == SIGSTOP
3068 && current_thread->last_resume_kind == resume_stop)
3069 && !linux_wstatus_maybe_breakpoint (w))))
3070 {
3071 siginfo_t info, *info_p;
3072
3073 if (debug_threads)
3074 debug_printf ("Ignored signal %d for LWP %ld.\n",
3075 WSTOPSIG (w), lwpid_of (current_thread));
3076
3077 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3078 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3079 info_p = &info;
3080 else
3081 info_p = NULL;
3082 linux_resume_one_lwp (event_child, event_child->stepping,
3083 WSTOPSIG (w), info_p);
3084 return ignore_event (ourstatus);
3085 }
3086
3087 /* Note that all addresses are always "out of the step range" when
3088 there's no range to begin with. */
3089 in_step_range = lwp_in_step_range (event_child);
3090
3091 /* If GDB wanted this thread to single step, and the thread is out
3092 of the step range, we always want to report the SIGTRAP, and let
3093 GDB handle it. Watchpoints should always be reported. So should
3094 signals we can't explain. A SIGTRAP we can't explain could be a
3095 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3096 do, we're be able to handle GDB breakpoints on top of internal
3097 breakpoints, by handling the internal breakpoint and still
3098 reporting the event to GDB. If we don't, we're out of luck, GDB
3099 won't see the breakpoint hit. */
3100 report_to_gdb = (!maybe_internal_trap
3101 || (current_thread->last_resume_kind == resume_step
3102 && !in_step_range)
3103 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3104 || (!step_over_finished && !in_step_range
3105 && !bp_explains_trap && !trace_event)
3106 || (gdb_breakpoint_here (event_child->stop_pc)
3107 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3108 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3109 || extended_event_reported (&event_child->waitstatus));
3110
3111 run_breakpoint_commands (event_child->stop_pc);
3112
3113 /* We found no reason GDB would want us to stop. We either hit one
3114 of our own breakpoints, or finished an internal step GDB
3115 shouldn't know about. */
3116 if (!report_to_gdb)
3117 {
3118 if (debug_threads)
3119 {
3120 if (bp_explains_trap)
3121 debug_printf ("Hit a gdbserver breakpoint.\n");
3122 if (step_over_finished)
3123 debug_printf ("Step-over finished.\n");
3124 if (trace_event)
3125 debug_printf ("Tracepoint event.\n");
3126 if (lwp_in_step_range (event_child))
3127 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3128 paddress (event_child->stop_pc),
3129 paddress (event_child->step_range_start),
3130 paddress (event_child->step_range_end));
3131 if (extended_event_reported (&event_child->waitstatus))
3132 {
3133 char *str = target_waitstatus_to_string (ourstatus);
3134 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3135 lwpid_of (get_lwp_thread (event_child)), str);
3136 xfree (str);
3137 }
3138 }
3139
3140 /* We're not reporting this breakpoint to GDB, so apply the
3141 decr_pc_after_break adjustment to the inferior's regcache
3142 ourselves. */
3143
3144 if (the_low_target.set_pc != NULL)
3145 {
3146 struct regcache *regcache
3147 = get_thread_regcache (current_thread, 1);
3148 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3149 }
3150
3151 /* We may have finished stepping over a breakpoint. If so,
3152 we've stopped and suspended all LWPs momentarily except the
3153 stepping one. This is where we resume them all again. We're
3154 going to keep waiting, so use proceed, which handles stepping
3155 over the next breakpoint. */
3156 if (debug_threads)
3157 debug_printf ("proceeding all threads.\n");
3158
3159 if (step_over_finished)
3160 unsuspend_all_lwps (event_child);
3161
3162 proceed_all_lwps ();
3163 return ignore_event (ourstatus);
3164 }
3165
3166 if (debug_threads)
3167 {
3168 if (current_thread->last_resume_kind == resume_step)
3169 {
3170 if (event_child->step_range_start == event_child->step_range_end)
3171 debug_printf ("GDB wanted to single-step, reporting event.\n");
3172 else if (!lwp_in_step_range (event_child))
3173 debug_printf ("Out of step range, reporting event.\n");
3174 }
3175 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3176 debug_printf ("Stopped by watchpoint.\n");
3177 else if (gdb_breakpoint_here (event_child->stop_pc))
3178 debug_printf ("Stopped by GDB breakpoint.\n");
3179 if (debug_threads)
3180 debug_printf ("Hit a non-gdbserver trap event.\n");
3181 }
3182
3183 /* Alright, we're going to report a stop. */
3184
3185 if (!stabilizing_threads)
3186 {
3187 /* In all-stop, stop all threads. */
3188 if (!non_stop)
3189 stop_all_lwps (0, NULL);
3190
3191 /* If we're not waiting for a specific LWP, choose an event LWP
3192 from among those that have had events. Giving equal priority
3193 to all LWPs that have had events helps prevent
3194 starvation. */
3195 if (ptid_equal (ptid, minus_one_ptid))
3196 {
3197 event_child->status_pending_p = 1;
3198 event_child->status_pending = w;
3199
3200 select_event_lwp (&event_child);
3201
3202 /* current_thread and event_child must stay in sync. */
3203 current_thread = get_lwp_thread (event_child);
3204
3205 event_child->status_pending_p = 0;
3206 w = event_child->status_pending;
3207 }
3208
3209 if (step_over_finished)
3210 {
3211 if (!non_stop)
3212 {
3213 /* If we were doing a step-over, all other threads but
3214 the stepping one had been paused in start_step_over,
3215 with their suspend counts incremented. We don't want
3216 to do a full unstop/unpause, because we're in
3217 all-stop mode (so we want threads stopped), but we
3218 still need to unsuspend the other threads, to
3219 decrement their `suspended' count back. */
3220 unsuspend_all_lwps (event_child);
3221 }
3222 else
3223 {
3224 /* If we just finished a step-over, then all threads had
3225 been momentarily paused. In all-stop, that's fine,
3226 we want threads stopped by now anyway. In non-stop,
3227 we need to re-resume threads that GDB wanted to be
3228 running. */
3229 unstop_all_lwps (1, event_child);
3230 }
3231 }
3232
3233 /* Stabilize threads (move out of jump pads). */
3234 if (!non_stop)
3235 stabilize_threads ();
3236 }
3237 else
3238 {
3239 /* If we just finished a step-over, then all threads had been
3240 momentarily paused. In all-stop, that's fine, we want
3241 threads stopped by now anyway. In non-stop, we need to
3242 re-resume threads that GDB wanted to be running. */
3243 if (step_over_finished)
3244 unstop_all_lwps (1, event_child);
3245 }
3246
3247 if (extended_event_reported (&event_child->waitstatus))
3248 {
3249 /* If the reported event is a fork, vfork or exec, let GDB know. */
3250 ourstatus->kind = event_child->waitstatus.kind;
3251 ourstatus->value = event_child->waitstatus.value;
3252
3253 /* Clear the event lwp's waitstatus since we handled it already. */
3254 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3255 }
3256 else
3257 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3258
3259 /* Now that we've selected our final event LWP, un-adjust its PC if
3260 it was a software breakpoint, and the client doesn't know we can
3261 adjust the breakpoint ourselves. */
3262 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3263 && !swbreak_feature)
3264 {
3265 int decr_pc = the_low_target.decr_pc_after_break;
3266
3267 if (decr_pc != 0)
3268 {
3269 struct regcache *regcache
3270 = get_thread_regcache (current_thread, 1);
3271 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3272 }
3273 }
3274
3275 if (current_thread->last_resume_kind == resume_stop
3276 && WSTOPSIG (w) == SIGSTOP)
3277 {
3278 /* A thread that has been requested to stop by GDB with vCont;t,
3279 and it stopped cleanly, so report as SIG0. The use of
3280 SIGSTOP is an implementation detail. */
3281 ourstatus->value.sig = GDB_SIGNAL_0;
3282 }
3283 else if (current_thread->last_resume_kind == resume_stop
3284 && WSTOPSIG (w) != SIGSTOP)
3285 {
3286 /* A thread that has been requested to stop by GDB with vCont;t,
3287 but, it stopped for other reasons. */
3288 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3289 }
3290 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3291 {
3292 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3293 }
3294
3295 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3296
3297 if (debug_threads)
3298 {
3299 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3300 target_pid_to_str (ptid_of (current_thread)),
3301 ourstatus->kind, ourstatus->value.sig);
3302 debug_exit ();
3303 }
3304
3305 return ptid_of (current_thread);
3306 }
3307
3308 /* Get rid of any pending event in the pipe. */
3309 static void
3310 async_file_flush (void)
3311 {
3312 int ret;
3313 char buf;
3314
3315 do
3316 ret = read (linux_event_pipe[0], &buf, 1);
3317 while (ret >= 0 || (ret == -1 && errno == EINTR));
3318 }
3319
3320 /* Put something in the pipe, so the event loop wakes up. */
3321 static void
3322 async_file_mark (void)
3323 {
3324 int ret;
3325
3326 async_file_flush ();
3327
3328 do
3329 ret = write (linux_event_pipe[1], "+", 1);
3330 while (ret == 0 || (ret == -1 && errno == EINTR));
3331
3332 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3333 be awakened anyway. */
3334 }
3335
3336 static ptid_t
3337 linux_wait (ptid_t ptid,
3338 struct target_waitstatus *ourstatus, int target_options)
3339 {
3340 ptid_t event_ptid;
3341
3342 /* Flush the async file first. */
3343 if (target_is_async_p ())
3344 async_file_flush ();
3345
3346 do
3347 {
3348 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3349 }
3350 while ((target_options & TARGET_WNOHANG) == 0
3351 && ptid_equal (event_ptid, null_ptid)
3352 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3353
3354 /* If at least one stop was reported, there may be more. A single
3355 SIGCHLD can signal more than one child stop. */
3356 if (target_is_async_p ()
3357 && (target_options & TARGET_WNOHANG) != 0
3358 && !ptid_equal (event_ptid, null_ptid))
3359 async_file_mark ();
3360
3361 return event_ptid;
3362 }
3363
3364 /* Send a signal to an LWP. */
3365
3366 static int
3367 kill_lwp (unsigned long lwpid, int signo)
3368 {
3369 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3370 fails, then we are not using nptl threads and we should be using kill. */
3371
3372 #ifdef __NR_tkill
3373 {
3374 static int tkill_failed;
3375
3376 if (!tkill_failed)
3377 {
3378 int ret;
3379
3380 errno = 0;
3381 ret = syscall (__NR_tkill, lwpid, signo);
3382 if (errno != ENOSYS)
3383 return ret;
3384 tkill_failed = 1;
3385 }
3386 }
3387 #endif
3388
3389 return kill (lwpid, signo);
3390 }
3391
3392 void
3393 linux_stop_lwp (struct lwp_info *lwp)
3394 {
3395 send_sigstop (lwp);
3396 }
3397
3398 static void
3399 send_sigstop (struct lwp_info *lwp)
3400 {
3401 int pid;
3402
3403 pid = lwpid_of (get_lwp_thread (lwp));
3404
3405 /* If we already have a pending stop signal for this process, don't
3406 send another. */
3407 if (lwp->stop_expected)
3408 {
3409 if (debug_threads)
3410 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3411
3412 return;
3413 }
3414
3415 if (debug_threads)
3416 debug_printf ("Sending sigstop to lwp %d\n", pid);
3417
3418 lwp->stop_expected = 1;
3419 kill_lwp (pid, SIGSTOP);
3420 }
3421
3422 static int
3423 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3424 {
3425 struct thread_info *thread = (struct thread_info *) entry;
3426 struct lwp_info *lwp = get_thread_lwp (thread);
3427
3428 /* Ignore EXCEPT. */
3429 if (lwp == except)
3430 return 0;
3431
3432 if (lwp->stopped)
3433 return 0;
3434
3435 send_sigstop (lwp);
3436 return 0;
3437 }
3438
3439 /* Increment the suspend count of an LWP, and stop it, if not stopped
3440 yet. */
3441 static int
3442 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3443 void *except)
3444 {
3445 struct thread_info *thread = (struct thread_info *) entry;
3446 struct lwp_info *lwp = get_thread_lwp (thread);
3447
3448 /* Ignore EXCEPT. */
3449 if (lwp == except)
3450 return 0;
3451
3452 lwp->suspended++;
3453
3454 return send_sigstop_callback (entry, except);
3455 }
3456
3457 static void
3458 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3459 {
3460 /* It's dead, really. */
3461 lwp->dead = 1;
3462
3463 /* Store the exit status for later. */
3464 lwp->status_pending_p = 1;
3465 lwp->status_pending = wstat;
3466
3467 /* Prevent trying to stop it. */
3468 lwp->stopped = 1;
3469
3470 /* No further stops are expected from a dead lwp. */
3471 lwp->stop_expected = 0;
3472 }
3473
3474 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3475
3476 static void
3477 wait_for_sigstop (void)
3478 {
3479 struct thread_info *saved_thread;
3480 ptid_t saved_tid;
3481 int wstat;
3482 int ret;
3483
3484 saved_thread = current_thread;
3485 if (saved_thread != NULL)
3486 saved_tid = saved_thread->entry.id;
3487 else
3488 saved_tid = null_ptid; /* avoid bogus unused warning */
3489
3490 if (debug_threads)
3491 debug_printf ("wait_for_sigstop: pulling events\n");
3492
3493 /* Passing NULL_PTID as filter indicates we want all events to be
3494 left pending. Eventually this returns when there are no
3495 unwaited-for children left. */
3496 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3497 &wstat, __WALL);
3498 gdb_assert (ret == -1);
3499
3500 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3501 current_thread = saved_thread;
3502 else
3503 {
3504 if (debug_threads)
3505 debug_printf ("Previously current thread died.\n");
3506
3507 if (non_stop)
3508 {
3509 /* We can't change the current inferior behind GDB's back,
3510 otherwise, a subsequent command may apply to the wrong
3511 process. */
3512 current_thread = NULL;
3513 }
3514 else
3515 {
3516 /* Set a valid thread as current. */
3517 set_desired_thread (0);
3518 }
3519 }
3520 }
3521
3522 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3523 move it out, because we need to report the stop event to GDB. For
3524 example, if the user puts a breakpoint in the jump pad, it's
3525 because she wants to debug it. */
3526
3527 static int
3528 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3529 {
3530 struct thread_info *thread = (struct thread_info *) entry;
3531 struct lwp_info *lwp = get_thread_lwp (thread);
3532
3533 gdb_assert (lwp->suspended == 0);
3534 gdb_assert (lwp->stopped);
3535
3536 /* Allow debugging the jump pad, gdb_collect, etc.. */
3537 return (supports_fast_tracepoints ()
3538 && agent_loaded_p ()
3539 && (gdb_breakpoint_here (lwp->stop_pc)
3540 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3541 || thread->last_resume_kind == resume_step)
3542 && linux_fast_tracepoint_collecting (lwp, NULL));
3543 }
3544
3545 static void
3546 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3547 {
3548 struct thread_info *thread = (struct thread_info *) entry;
3549 struct lwp_info *lwp = get_thread_lwp (thread);
3550 int *wstat;
3551
3552 gdb_assert (lwp->suspended == 0);
3553 gdb_assert (lwp->stopped);
3554
3555 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3556
3557 /* Allow debugging the jump pad, gdb_collect, etc. */
3558 if (!gdb_breakpoint_here (lwp->stop_pc)
3559 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3560 && thread->last_resume_kind != resume_step
3561 && maybe_move_out_of_jump_pad (lwp, wstat))
3562 {
3563 if (debug_threads)
3564 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3565 lwpid_of (thread));
3566
3567 if (wstat)
3568 {
3569 lwp->status_pending_p = 0;
3570 enqueue_one_deferred_signal (lwp, wstat);
3571
3572 if (debug_threads)
3573 debug_printf ("Signal %d for LWP %ld deferred "
3574 "(in jump pad)\n",
3575 WSTOPSIG (*wstat), lwpid_of (thread));
3576 }
3577
3578 linux_resume_one_lwp (lwp, 0, 0, NULL);
3579 }
3580 else
3581 lwp->suspended++;
3582 }
3583
3584 static int
3585 lwp_running (struct inferior_list_entry *entry, void *data)
3586 {
3587 struct thread_info *thread = (struct thread_info *) entry;
3588 struct lwp_info *lwp = get_thread_lwp (thread);
3589
3590 if (lwp->dead)
3591 return 0;
3592 if (lwp->stopped)
3593 return 0;
3594 return 1;
3595 }
3596
3597 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3598 If SUSPEND, then also increase the suspend count of every LWP,
3599 except EXCEPT. */
3600
3601 static void
3602 stop_all_lwps (int suspend, struct lwp_info *except)
3603 {
3604 /* Should not be called recursively. */
3605 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3606
3607 if (debug_threads)
3608 {
3609 debug_enter ();
3610 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3611 suspend ? "stop-and-suspend" : "stop",
3612 except != NULL
3613 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3614 : "none");
3615 }
3616
3617 stopping_threads = (suspend
3618 ? STOPPING_AND_SUSPENDING_THREADS
3619 : STOPPING_THREADS);
3620
3621 if (suspend)
3622 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3623 else
3624 find_inferior (&all_threads, send_sigstop_callback, except);
3625 wait_for_sigstop ();
3626 stopping_threads = NOT_STOPPING_THREADS;
3627
3628 if (debug_threads)
3629 {
3630 debug_printf ("stop_all_lwps done, setting stopping_threads "
3631 "back to !stopping\n");
3632 debug_exit ();
3633 }
3634 }
3635
3636 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3637 SIGNAL is nonzero, give it that signal. */
3638
3639 static void
3640 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3641 int step, int signal, siginfo_t *info)
3642 {
3643 struct thread_info *thread = get_lwp_thread (lwp);
3644 struct thread_info *saved_thread;
3645 int fast_tp_collecting;
3646
3647 if (lwp->stopped == 0)
3648 return;
3649
3650 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3651
3652 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3653
3654 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3655 user used the "jump" command, or "set $pc = foo"). */
3656 if (lwp->stop_pc != get_pc (lwp))
3657 {
3658 /* Collecting 'while-stepping' actions doesn't make sense
3659 anymore. */
3660 release_while_stepping_state_list (thread);
3661 }
3662
3663 /* If we have pending signals or status, and a new signal, enqueue the
3664 signal. Also enqueue the signal if we are waiting to reinsert a
3665 breakpoint; it will be picked up again below. */
3666 if (signal != 0
3667 && (lwp->status_pending_p
3668 || lwp->pending_signals != NULL
3669 || lwp->bp_reinsert != 0
3670 || fast_tp_collecting))
3671 {
3672 struct pending_signals *p_sig;
3673 p_sig = xmalloc (sizeof (*p_sig));
3674 p_sig->prev = lwp->pending_signals;
3675 p_sig->signal = signal;
3676 if (info == NULL)
3677 memset (&p_sig->info, 0, sizeof (siginfo_t));
3678 else
3679 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3680 lwp->pending_signals = p_sig;
3681 }
3682
3683 if (lwp->status_pending_p)
3684 {
3685 if (debug_threads)
3686 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3687 " has pending status\n",
3688 lwpid_of (thread), step ? "step" : "continue", signal,
3689 lwp->stop_expected ? "expected" : "not expected");
3690 return;
3691 }
3692
3693 saved_thread = current_thread;
3694 current_thread = thread;
3695
3696 if (debug_threads)
3697 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3698 lwpid_of (thread), step ? "step" : "continue", signal,
3699 lwp->stop_expected ? "expected" : "not expected");
3700
3701 /* This bit needs some thinking about. If we get a signal that
3702 we must report while a single-step reinsert is still pending,
3703 we often end up resuming the thread. It might be better to
3704 (ew) allow a stack of pending events; then we could be sure that
3705 the reinsert happened right away and not lose any signals.
3706
3707 Making this stack would also shrink the window in which breakpoints are
3708 uninserted (see comment in linux_wait_for_lwp) but not enough for
3709 complete correctness, so it won't solve that problem. It may be
3710 worthwhile just to solve this one, however. */
3711 if (lwp->bp_reinsert != 0)
3712 {
3713 if (debug_threads)
3714 debug_printf (" pending reinsert at 0x%s\n",
3715 paddress (lwp->bp_reinsert));
3716
3717 if (can_hardware_single_step ())
3718 {
3719 if (fast_tp_collecting == 0)
3720 {
3721 if (step == 0)
3722 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3723 if (lwp->suspended)
3724 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3725 lwp->suspended);
3726 }
3727
3728 step = 1;
3729 }
3730
3731 /* Postpone any pending signal. It was enqueued above. */
3732 signal = 0;
3733 }
3734
3735 if (fast_tp_collecting == 1)
3736 {
3737 if (debug_threads)
3738 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3739 " (exit-jump-pad-bkpt)\n",
3740 lwpid_of (thread));
3741
3742 /* Postpone any pending signal. It was enqueued above. */
3743 signal = 0;
3744 }
3745 else if (fast_tp_collecting == 2)
3746 {
3747 if (debug_threads)
3748 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3749 " single-stepping\n",
3750 lwpid_of (thread));
3751
3752 if (can_hardware_single_step ())
3753 step = 1;
3754 else
3755 {
3756 internal_error (__FILE__, __LINE__,
3757 "moving out of jump pad single-stepping"
3758 " not implemented on this target");
3759 }
3760
3761 /* Postpone any pending signal. It was enqueued above. */
3762 signal = 0;
3763 }
3764
3765 /* If we have while-stepping actions in this thread set it stepping.
3766 If we have a signal to deliver, it may or may not be set to
3767 SIG_IGN, we don't know. Assume so, and allow collecting
3768 while-stepping into a signal handler. A possible smart thing to
3769 do would be to set an internal breakpoint at the signal return
3770 address, continue, and carry on catching this while-stepping
3771 action only when that breakpoint is hit. A future
3772 enhancement. */
3773 if (thread->while_stepping != NULL
3774 && can_hardware_single_step ())
3775 {
3776 if (debug_threads)
3777 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3778 lwpid_of (thread));
3779 step = 1;
3780 }
3781
3782 if (the_low_target.get_pc != NULL)
3783 {
3784 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3785
3786 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3787
3788 if (debug_threads)
3789 {
3790 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3791 (long) lwp->stop_pc);
3792 }
3793 }
3794
3795 /* If we have pending signals, consume one unless we are trying to
3796 reinsert a breakpoint or we're trying to finish a fast tracepoint
3797 collect. */
3798 if (lwp->pending_signals != NULL
3799 && lwp->bp_reinsert == 0
3800 && fast_tp_collecting == 0)
3801 {
3802 struct pending_signals **p_sig;
3803
3804 p_sig = &lwp->pending_signals;
3805 while ((*p_sig)->prev != NULL)
3806 p_sig = &(*p_sig)->prev;
3807
3808 signal = (*p_sig)->signal;
3809 if ((*p_sig)->info.si_signo != 0)
3810 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3811 &(*p_sig)->info);
3812
3813 free (*p_sig);
3814 *p_sig = NULL;
3815 }
3816
3817 if (the_low_target.prepare_to_resume != NULL)
3818 the_low_target.prepare_to_resume (lwp);
3819
3820 regcache_invalidate_thread (thread);
3821 errno = 0;
3822 lwp->stepping = step;
3823 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3824 (PTRACE_TYPE_ARG3) 0,
3825 /* Coerce to a uintptr_t first to avoid potential gcc warning
3826 of coercing an 8 byte integer to a 4 byte pointer. */
3827 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3828
3829 current_thread = saved_thread;
3830 if (errno)
3831 perror_with_name ("resuming thread");
3832
3833 /* Successfully resumed. Clear state that no longer makes sense,
3834 and mark the LWP as running. Must not do this before resuming
3835 otherwise if that fails other code will be confused. E.g., we'd
3836 later try to stop the LWP and hang forever waiting for a stop
3837 status. Note that we must not throw after this is cleared,
3838 otherwise handle_zombie_lwp_error would get confused. */
3839 lwp->stopped = 0;
3840 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3841 }
3842
3843 /* Called when we try to resume a stopped LWP and that errors out. If
3844 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3845 or about to become), discard the error, clear any pending status
3846 the LWP may have, and return true (we'll collect the exit status
3847 soon enough). Otherwise, return false. */
3848
3849 static int
3850 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3851 {
3852 struct thread_info *thread = get_lwp_thread (lp);
3853
3854 /* If we get an error after resuming the LWP successfully, we'd
3855 confuse !T state for the LWP being gone. */
3856 gdb_assert (lp->stopped);
3857
3858 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3859 because even if ptrace failed with ESRCH, the tracee may be "not
3860 yet fully dead", but already refusing ptrace requests. In that
3861 case the tracee has 'R (Running)' state for a little bit
3862 (observed in Linux 3.18). See also the note on ESRCH in the
3863 ptrace(2) man page. Instead, check whether the LWP has any state
3864 other than ptrace-stopped. */
3865
3866 /* Don't assume anything if /proc/PID/status can't be read. */
3867 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3868 {
3869 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3870 lp->status_pending_p = 0;
3871 return 1;
3872 }
3873 return 0;
3874 }
3875
3876 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3877 disappears while we try to resume it. */
3878
3879 static void
3880 linux_resume_one_lwp (struct lwp_info *lwp,
3881 int step, int signal, siginfo_t *info)
3882 {
3883 TRY
3884 {
3885 linux_resume_one_lwp_throw (lwp, step, signal, info);
3886 }
3887 CATCH (ex, RETURN_MASK_ERROR)
3888 {
3889 if (!check_ptrace_stopped_lwp_gone (lwp))
3890 throw_exception (ex);
3891 }
3892 END_CATCH
3893 }
3894
3895 struct thread_resume_array
3896 {
3897 struct thread_resume *resume;
3898 size_t n;
3899 };
3900
3901 /* This function is called once per thread via find_inferior.
3902 ARG is a pointer to a thread_resume_array struct.
3903 We look up the thread specified by ENTRY in ARG, and mark the thread
3904 with a pointer to the appropriate resume request.
3905
3906 This algorithm is O(threads * resume elements), but resume elements
3907 is small (and will remain small at least until GDB supports thread
3908 suspension). */
3909
3910 static int
3911 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3912 {
3913 struct thread_info *thread = (struct thread_info *) entry;
3914 struct lwp_info *lwp = get_thread_lwp (thread);
3915 int ndx;
3916 struct thread_resume_array *r;
3917
3918 r = arg;
3919
3920 for (ndx = 0; ndx < r->n; ndx++)
3921 {
3922 ptid_t ptid = r->resume[ndx].thread;
3923 if (ptid_equal (ptid, minus_one_ptid)
3924 || ptid_equal (ptid, entry->id)
3925 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3926 of PID'. */
3927 || (ptid_get_pid (ptid) == pid_of (thread)
3928 && (ptid_is_pid (ptid)
3929 || ptid_get_lwp (ptid) == -1)))
3930 {
3931 if (r->resume[ndx].kind == resume_stop
3932 && thread->last_resume_kind == resume_stop)
3933 {
3934 if (debug_threads)
3935 debug_printf ("already %s LWP %ld at GDB's request\n",
3936 (thread->last_status.kind
3937 == TARGET_WAITKIND_STOPPED)
3938 ? "stopped"
3939 : "stopping",
3940 lwpid_of (thread));
3941
3942 continue;
3943 }
3944
3945 lwp->resume = &r->resume[ndx];
3946 thread->last_resume_kind = lwp->resume->kind;
3947
3948 lwp->step_range_start = lwp->resume->step_range_start;
3949 lwp->step_range_end = lwp->resume->step_range_end;
3950
3951 /* If we had a deferred signal to report, dequeue one now.
3952 This can happen if LWP gets more than one signal while
3953 trying to get out of a jump pad. */
3954 if (lwp->stopped
3955 && !lwp->status_pending_p
3956 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3957 {
3958 lwp->status_pending_p = 1;
3959
3960 if (debug_threads)
3961 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3962 "leaving status pending.\n",
3963 WSTOPSIG (lwp->status_pending),
3964 lwpid_of (thread));
3965 }
3966
3967 return 0;
3968 }
3969 }
3970
3971 /* No resume action for this thread. */
3972 lwp->resume = NULL;
3973
3974 return 0;
3975 }
3976
3977 /* find_inferior callback for linux_resume.
3978 Set *FLAG_P if this lwp has an interesting status pending. */
3979
3980 static int
3981 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3982 {
3983 struct thread_info *thread = (struct thread_info *) entry;
3984 struct lwp_info *lwp = get_thread_lwp (thread);
3985
3986 /* LWPs which will not be resumed are not interesting, because
3987 we might not wait for them next time through linux_wait. */
3988 if (lwp->resume == NULL)
3989 return 0;
3990
3991 if (thread_still_has_status_pending_p (thread))
3992 * (int *) flag_p = 1;
3993
3994 return 0;
3995 }
3996
3997 /* Return 1 if this lwp that GDB wants running is stopped at an
3998 internal breakpoint that we need to step over. It assumes that any
3999 required STOP_PC adjustment has already been propagated to the
4000 inferior's regcache. */
4001
4002 static int
4003 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4004 {
4005 struct thread_info *thread = (struct thread_info *) entry;
4006 struct lwp_info *lwp = get_thread_lwp (thread);
4007 struct thread_info *saved_thread;
4008 CORE_ADDR pc;
4009
4010 /* LWPs which will not be resumed are not interesting, because we
4011 might not wait for them next time through linux_wait. */
4012
4013 if (!lwp->stopped)
4014 {
4015 if (debug_threads)
4016 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4017 lwpid_of (thread));
4018 return 0;
4019 }
4020
4021 if (thread->last_resume_kind == resume_stop)
4022 {
4023 if (debug_threads)
4024 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4025 " stopped\n",
4026 lwpid_of (thread));
4027 return 0;
4028 }
4029
4030 gdb_assert (lwp->suspended >= 0);
4031
4032 if (lwp->suspended)
4033 {
4034 if (debug_threads)
4035 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4036 lwpid_of (thread));
4037 return 0;
4038 }
4039
4040 if (!lwp->need_step_over)
4041 {
4042 if (debug_threads)
4043 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4044 }
4045
4046 if (lwp->status_pending_p)
4047 {
4048 if (debug_threads)
4049 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4050 " status.\n",
4051 lwpid_of (thread));
4052 return 0;
4053 }
4054
4055 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4056 or we have. */
4057 pc = get_pc (lwp);
4058
4059 /* If the PC has changed since we stopped, then don't do anything,
4060 and let the breakpoint/tracepoint be hit. This happens if, for
4061 instance, GDB handled the decr_pc_after_break subtraction itself,
4062 GDB is OOL stepping this thread, or the user has issued a "jump"
4063 command, or poked thread's registers herself. */
4064 if (pc != lwp->stop_pc)
4065 {
4066 if (debug_threads)
4067 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4068 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4069 lwpid_of (thread),
4070 paddress (lwp->stop_pc), paddress (pc));
4071
4072 lwp->need_step_over = 0;
4073 return 0;
4074 }
4075
4076 saved_thread = current_thread;
4077 current_thread = thread;
4078
4079 /* We can only step over breakpoints we know about. */
4080 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4081 {
4082 /* Don't step over a breakpoint that GDB expects to hit
4083 though. If the condition is being evaluated on the target's side
4084 and it evaluate to false, step over this breakpoint as well. */
4085 if (gdb_breakpoint_here (pc)
4086 && gdb_condition_true_at_breakpoint (pc)
4087 && gdb_no_commands_at_breakpoint (pc))
4088 {
4089 if (debug_threads)
4090 debug_printf ("Need step over [LWP %ld]? yes, but found"
4091 " GDB breakpoint at 0x%s; skipping step over\n",
4092 lwpid_of (thread), paddress (pc));
4093
4094 current_thread = saved_thread;
4095 return 0;
4096 }
4097 else
4098 {
4099 if (debug_threads)
4100 debug_printf ("Need step over [LWP %ld]? yes, "
4101 "found breakpoint at 0x%s\n",
4102 lwpid_of (thread), paddress (pc));
4103
4104 /* We've found an lwp that needs stepping over --- return 1 so
4105 that find_inferior stops looking. */
4106 current_thread = saved_thread;
4107
4108 /* If the step over is cancelled, this is set again. */
4109 lwp->need_step_over = 0;
4110 return 1;
4111 }
4112 }
4113
4114 current_thread = saved_thread;
4115
4116 if (debug_threads)
4117 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4118 " at 0x%s\n",
4119 lwpid_of (thread), paddress (pc));
4120
4121 return 0;
4122 }
4123
4124 /* Start a step-over operation on LWP. When LWP stopped at a
4125 breakpoint, to make progress, we need to remove the breakpoint out
4126 of the way. If we let other threads run while we do that, they may
4127 pass by the breakpoint location and miss hitting it. To avoid
4128 that, a step-over momentarily stops all threads while LWP is
4129 single-stepped while the breakpoint is temporarily uninserted from
4130 the inferior. When the single-step finishes, we reinsert the
4131 breakpoint, and let all threads that are supposed to be running,
4132 run again.
4133
4134 On targets that don't support hardware single-step, we don't
4135 currently support full software single-stepping. Instead, we only
4136 support stepping over the thread event breakpoint, by asking the
4137 low target where to place a reinsert breakpoint. Since this
4138 routine assumes the breakpoint being stepped over is a thread event
4139 breakpoint, it usually assumes the return address of the current
4140 function is a good enough place to set the reinsert breakpoint. */
4141
4142 static int
4143 start_step_over (struct lwp_info *lwp)
4144 {
4145 struct thread_info *thread = get_lwp_thread (lwp);
4146 struct thread_info *saved_thread;
4147 CORE_ADDR pc;
4148 int step;
4149
4150 if (debug_threads)
4151 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4152 lwpid_of (thread));
4153
4154 stop_all_lwps (1, lwp);
4155 gdb_assert (lwp->suspended == 0);
4156
4157 if (debug_threads)
4158 debug_printf ("Done stopping all threads for step-over.\n");
4159
4160 /* Note, we should always reach here with an already adjusted PC,
4161 either by GDB (if we're resuming due to GDB's request), or by our
4162 caller, if we just finished handling an internal breakpoint GDB
4163 shouldn't care about. */
4164 pc = get_pc (lwp);
4165
4166 saved_thread = current_thread;
4167 current_thread = thread;
4168
4169 lwp->bp_reinsert = pc;
4170 uninsert_breakpoints_at (pc);
4171 uninsert_fast_tracepoint_jumps_at (pc);
4172
4173 if (can_hardware_single_step ())
4174 {
4175 step = 1;
4176 }
4177 else
4178 {
4179 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4180 set_reinsert_breakpoint (raddr);
4181 step = 0;
4182 }
4183
4184 current_thread = saved_thread;
4185
4186 linux_resume_one_lwp (lwp, step, 0, NULL);
4187
4188 /* Require next event from this LWP. */
4189 step_over_bkpt = thread->entry.id;
4190 return 1;
4191 }
4192
4193 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4194 start_step_over, if still there, and delete any reinsert
4195 breakpoints we've set, on non hardware single-step targets. */
4196
4197 static int
4198 finish_step_over (struct lwp_info *lwp)
4199 {
4200 if (lwp->bp_reinsert != 0)
4201 {
4202 if (debug_threads)
4203 debug_printf ("Finished step over.\n");
4204
4205 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4206 may be no breakpoint to reinsert there by now. */
4207 reinsert_breakpoints_at (lwp->bp_reinsert);
4208 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4209
4210 lwp->bp_reinsert = 0;
4211
4212 /* Delete any software-single-step reinsert breakpoints. No
4213 longer needed. We don't have to worry about other threads
4214 hitting this trap, and later not being able to explain it,
4215 because we were stepping over a breakpoint, and we hold all
4216 threads but LWP stopped while doing that. */
4217 if (!can_hardware_single_step ())
4218 delete_reinsert_breakpoints ();
4219
4220 step_over_bkpt = null_ptid;
4221 return 1;
4222 }
4223 else
4224 return 0;
4225 }
4226
4227 /* This function is called once per thread. We check the thread's resume
4228 request, which will tell us whether to resume, step, or leave the thread
4229 stopped; and what signal, if any, it should be sent.
4230
4231 For threads which we aren't explicitly told otherwise, we preserve
4232 the stepping flag; this is used for stepping over gdbserver-placed
4233 breakpoints.
4234
4235 If pending_flags was set in any thread, we queue any needed
4236 signals, since we won't actually resume. We already have a pending
4237 event to report, so we don't need to preserve any step requests;
4238 they should be re-issued if necessary. */
4239
4240 static int
4241 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4242 {
4243 struct thread_info *thread = (struct thread_info *) entry;
4244 struct lwp_info *lwp = get_thread_lwp (thread);
4245 int step;
4246 int leave_all_stopped = * (int *) arg;
4247 int leave_pending;
4248
4249 if (lwp->resume == NULL)
4250 return 0;
4251
4252 if (lwp->resume->kind == resume_stop)
4253 {
4254 if (debug_threads)
4255 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4256
4257 if (!lwp->stopped)
4258 {
4259 if (debug_threads)
4260 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4261
4262 /* Stop the thread, and wait for the event asynchronously,
4263 through the event loop. */
4264 send_sigstop (lwp);
4265 }
4266 else
4267 {
4268 if (debug_threads)
4269 debug_printf ("already stopped LWP %ld\n",
4270 lwpid_of (thread));
4271
4272 /* The LWP may have been stopped in an internal event that
4273 was not meant to be notified back to GDB (e.g., gdbserver
4274 breakpoint), so we should be reporting a stop event in
4275 this case too. */
4276
4277 /* If the thread already has a pending SIGSTOP, this is a
4278 no-op. Otherwise, something later will presumably resume
4279 the thread and this will cause it to cancel any pending
4280 operation, due to last_resume_kind == resume_stop. If
4281 the thread already has a pending status to report, we
4282 will still report it the next time we wait - see
4283 status_pending_p_callback. */
4284
4285 /* If we already have a pending signal to report, then
4286 there's no need to queue a SIGSTOP, as this means we're
4287 midway through moving the LWP out of the jumppad, and we
4288 will report the pending signal as soon as that is
4289 finished. */
4290 if (lwp->pending_signals_to_report == NULL)
4291 send_sigstop (lwp);
4292 }
4293
4294 /* For stop requests, we're done. */
4295 lwp->resume = NULL;
4296 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4297 return 0;
4298 }
4299
4300 /* If this thread which is about to be resumed has a pending status,
4301 then don't resume any threads - we can just report the pending
4302 status. Make sure to queue any signals that would otherwise be
4303 sent. In all-stop mode, we do this decision based on if *any*
4304 thread has a pending status. If there's a thread that needs the
4305 step-over-breakpoint dance, then don't resume any other thread
4306 but that particular one. */
4307 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4308
4309 if (!leave_pending)
4310 {
4311 if (debug_threads)
4312 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4313
4314 step = (lwp->resume->kind == resume_step);
4315 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4316 }
4317 else
4318 {
4319 if (debug_threads)
4320 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4321
4322 /* If we have a new signal, enqueue the signal. */
4323 if (lwp->resume->sig != 0)
4324 {
4325 struct pending_signals *p_sig;
4326 p_sig = xmalloc (sizeof (*p_sig));
4327 p_sig->prev = lwp->pending_signals;
4328 p_sig->signal = lwp->resume->sig;
4329 memset (&p_sig->info, 0, sizeof (siginfo_t));
4330
4331 /* If this is the same signal we were previously stopped by,
4332 make sure to queue its siginfo. We can ignore the return
4333 value of ptrace; if it fails, we'll skip
4334 PTRACE_SETSIGINFO. */
4335 if (WIFSTOPPED (lwp->last_status)
4336 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4337 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4338 &p_sig->info);
4339
4340 lwp->pending_signals = p_sig;
4341 }
4342 }
4343
4344 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4345 lwp->resume = NULL;
4346 return 0;
4347 }
4348
4349 static void
4350 linux_resume (struct thread_resume *resume_info, size_t n)
4351 {
4352 struct thread_resume_array array = { resume_info, n };
4353 struct thread_info *need_step_over = NULL;
4354 int any_pending;
4355 int leave_all_stopped;
4356
4357 if (debug_threads)
4358 {
4359 debug_enter ();
4360 debug_printf ("linux_resume:\n");
4361 }
4362
4363 find_inferior (&all_threads, linux_set_resume_request, &array);
4364
4365 /* If there is a thread which would otherwise be resumed, which has
4366 a pending status, then don't resume any threads - we can just
4367 report the pending status. Make sure to queue any signals that
4368 would otherwise be sent. In non-stop mode, we'll apply this
4369 logic to each thread individually. We consume all pending events
4370 before considering to start a step-over (in all-stop). */
4371 any_pending = 0;
4372 if (!non_stop)
4373 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4374
4375 /* If there is a thread which would otherwise be resumed, which is
4376 stopped at a breakpoint that needs stepping over, then don't
4377 resume any threads - have it step over the breakpoint with all
4378 other threads stopped, then resume all threads again. Make sure
4379 to queue any signals that would otherwise be delivered or
4380 queued. */
4381 if (!any_pending && supports_breakpoints ())
4382 need_step_over
4383 = (struct thread_info *) find_inferior (&all_threads,
4384 need_step_over_p, NULL);
4385
4386 leave_all_stopped = (need_step_over != NULL || any_pending);
4387
4388 if (debug_threads)
4389 {
4390 if (need_step_over != NULL)
4391 debug_printf ("Not resuming all, need step over\n");
4392 else if (any_pending)
4393 debug_printf ("Not resuming, all-stop and found "
4394 "an LWP with pending status\n");
4395 else
4396 debug_printf ("Resuming, no pending status or step over needed\n");
4397 }
4398
4399 /* Even if we're leaving threads stopped, queue all signals we'd
4400 otherwise deliver. */
4401 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4402
4403 if (need_step_over)
4404 start_step_over (get_thread_lwp (need_step_over));
4405
4406 if (debug_threads)
4407 {
4408 debug_printf ("linux_resume done\n");
4409 debug_exit ();
4410 }
4411 }
4412
4413 /* This function is called once per thread. We check the thread's
4414 last resume request, which will tell us whether to resume, step, or
4415 leave the thread stopped. Any signal the client requested to be
4416 delivered has already been enqueued at this point.
4417
4418 If any thread that GDB wants running is stopped at an internal
4419 breakpoint that needs stepping over, we start a step-over operation
4420 on that particular thread, and leave all others stopped. */
4421
4422 static int
4423 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4424 {
4425 struct thread_info *thread = (struct thread_info *) entry;
4426 struct lwp_info *lwp = get_thread_lwp (thread);
4427 int step;
4428
4429 if (lwp == except)
4430 return 0;
4431
4432 if (debug_threads)
4433 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4434
4435 if (!lwp->stopped)
4436 {
4437 if (debug_threads)
4438 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4439 return 0;
4440 }
4441
4442 if (thread->last_resume_kind == resume_stop
4443 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4444 {
4445 if (debug_threads)
4446 debug_printf (" client wants LWP to remain %ld stopped\n",
4447 lwpid_of (thread));
4448 return 0;
4449 }
4450
4451 if (lwp->status_pending_p)
4452 {
4453 if (debug_threads)
4454 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4455 lwpid_of (thread));
4456 return 0;
4457 }
4458
4459 gdb_assert (lwp->suspended >= 0);
4460
4461 if (lwp->suspended)
4462 {
4463 if (debug_threads)
4464 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4465 return 0;
4466 }
4467
4468 if (thread->last_resume_kind == resume_stop
4469 && lwp->pending_signals_to_report == NULL
4470 && lwp->collecting_fast_tracepoint == 0)
4471 {
4472 /* We haven't reported this LWP as stopped yet (otherwise, the
4473 last_status.kind check above would catch it, and we wouldn't
4474 reach here. This LWP may have been momentarily paused by a
4475 stop_all_lwps call while handling for example, another LWP's
4476 step-over. In that case, the pending expected SIGSTOP signal
4477 that was queued at vCont;t handling time will have already
4478 been consumed by wait_for_sigstop, and so we need to requeue
4479 another one here. Note that if the LWP already has a SIGSTOP
4480 pending, this is a no-op. */
4481
4482 if (debug_threads)
4483 debug_printf ("Client wants LWP %ld to stop. "
4484 "Making sure it has a SIGSTOP pending\n",
4485 lwpid_of (thread));
4486
4487 send_sigstop (lwp);
4488 }
4489
4490 step = thread->last_resume_kind == resume_step;
4491 linux_resume_one_lwp (lwp, step, 0, NULL);
4492 return 0;
4493 }
4494
4495 static int
4496 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4497 {
4498 struct thread_info *thread = (struct thread_info *) entry;
4499 struct lwp_info *lwp = get_thread_lwp (thread);
4500
4501 if (lwp == except)
4502 return 0;
4503
4504 lwp->suspended--;
4505 gdb_assert (lwp->suspended >= 0);
4506
4507 return proceed_one_lwp (entry, except);
4508 }
4509
4510 /* When we finish a step-over, set threads running again. If there's
4511 another thread that may need a step-over, now's the time to start
4512 it. Eventually, we'll move all threads past their breakpoints. */
4513
4514 static void
4515 proceed_all_lwps (void)
4516 {
4517 struct thread_info *need_step_over;
4518
4519 /* If there is a thread which would otherwise be resumed, which is
4520 stopped at a breakpoint that needs stepping over, then don't
4521 resume any threads - have it step over the breakpoint with all
4522 other threads stopped, then resume all threads again. */
4523
4524 if (supports_breakpoints ())
4525 {
4526 need_step_over
4527 = (struct thread_info *) find_inferior (&all_threads,
4528 need_step_over_p, NULL);
4529
4530 if (need_step_over != NULL)
4531 {
4532 if (debug_threads)
4533 debug_printf ("proceed_all_lwps: found "
4534 "thread %ld needing a step-over\n",
4535 lwpid_of (need_step_over));
4536
4537 start_step_over (get_thread_lwp (need_step_over));
4538 return;
4539 }
4540 }
4541
4542 if (debug_threads)
4543 debug_printf ("Proceeding, no step-over needed\n");
4544
4545 find_inferior (&all_threads, proceed_one_lwp, NULL);
4546 }
4547
4548 /* Stopped LWPs that the client wanted to be running, that don't have
4549 pending statuses, are set to run again, except for EXCEPT, if not
4550 NULL. This undoes a stop_all_lwps call. */
4551
4552 static void
4553 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4554 {
4555 if (debug_threads)
4556 {
4557 debug_enter ();
4558 if (except)
4559 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4560 lwpid_of (get_lwp_thread (except)));
4561 else
4562 debug_printf ("unstopping all lwps\n");
4563 }
4564
4565 if (unsuspend)
4566 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4567 else
4568 find_inferior (&all_threads, proceed_one_lwp, except);
4569
4570 if (debug_threads)
4571 {
4572 debug_printf ("unstop_all_lwps done\n");
4573 debug_exit ();
4574 }
4575 }
4576
4577
4578 #ifdef HAVE_LINUX_REGSETS
4579
4580 #define use_linux_regsets 1
4581
4582 /* Returns true if REGSET has been disabled. */
4583
4584 static int
4585 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4586 {
4587 return (info->disabled_regsets != NULL
4588 && info->disabled_regsets[regset - info->regsets]);
4589 }
4590
4591 /* Disable REGSET. */
4592
4593 static void
4594 disable_regset (struct regsets_info *info, struct regset_info *regset)
4595 {
4596 int dr_offset;
4597
4598 dr_offset = regset - info->regsets;
4599 if (info->disabled_regsets == NULL)
4600 info->disabled_regsets = xcalloc (1, info->num_regsets);
4601 info->disabled_regsets[dr_offset] = 1;
4602 }
4603
4604 static int
4605 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4606 struct regcache *regcache)
4607 {
4608 struct regset_info *regset;
4609 int saw_general_regs = 0;
4610 int pid;
4611 struct iovec iov;
4612
4613 pid = lwpid_of (current_thread);
4614 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4615 {
4616 void *buf, *data;
4617 int nt_type, res;
4618
4619 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4620 continue;
4621
4622 buf = xmalloc (regset->size);
4623
4624 nt_type = regset->nt_type;
4625 if (nt_type)
4626 {
4627 iov.iov_base = buf;
4628 iov.iov_len = regset->size;
4629 data = (void *) &iov;
4630 }
4631 else
4632 data = buf;
4633
4634 #ifndef __sparc__
4635 res = ptrace (regset->get_request, pid,
4636 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4637 #else
4638 res = ptrace (regset->get_request, pid, data, nt_type);
4639 #endif
4640 if (res < 0)
4641 {
4642 if (errno == EIO)
4643 {
4644 /* If we get EIO on a regset, do not try it again for
4645 this process mode. */
4646 disable_regset (regsets_info, regset);
4647 }
4648 else if (errno == ENODATA)
4649 {
4650 /* ENODATA may be returned if the regset is currently
4651 not "active". This can happen in normal operation,
4652 so suppress the warning in this case. */
4653 }
4654 else
4655 {
4656 char s[256];
4657 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4658 pid);
4659 perror (s);
4660 }
4661 }
4662 else
4663 {
4664 if (regset->type == GENERAL_REGS)
4665 saw_general_regs = 1;
4666 regset->store_function (regcache, buf);
4667 }
4668 free (buf);
4669 }
4670 if (saw_general_regs)
4671 return 0;
4672 else
4673 return 1;
4674 }
4675
4676 static int
4677 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4678 struct regcache *regcache)
4679 {
4680 struct regset_info *regset;
4681 int saw_general_regs = 0;
4682 int pid;
4683 struct iovec iov;
4684
4685 pid = lwpid_of (current_thread);
4686 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4687 {
4688 void *buf, *data;
4689 int nt_type, res;
4690
4691 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4692 || regset->fill_function == NULL)
4693 continue;
4694
4695 buf = xmalloc (regset->size);
4696
4697 /* First fill the buffer with the current register set contents,
4698 in case there are any items in the kernel's regset that are
4699 not in gdbserver's regcache. */
4700
4701 nt_type = regset->nt_type;
4702 if (nt_type)
4703 {
4704 iov.iov_base = buf;
4705 iov.iov_len = regset->size;
4706 data = (void *) &iov;
4707 }
4708 else
4709 data = buf;
4710
4711 #ifndef __sparc__
4712 res = ptrace (regset->get_request, pid,
4713 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4714 #else
4715 res = ptrace (regset->get_request, pid, data, nt_type);
4716 #endif
4717
4718 if (res == 0)
4719 {
4720 /* Then overlay our cached registers on that. */
4721 regset->fill_function (regcache, buf);
4722
4723 /* Only now do we write the register set. */
4724 #ifndef __sparc__
4725 res = ptrace (regset->set_request, pid,
4726 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4727 #else
4728 res = ptrace (regset->set_request, pid, data, nt_type);
4729 #endif
4730 }
4731
4732 if (res < 0)
4733 {
4734 if (errno == EIO)
4735 {
4736 /* If we get EIO on a regset, do not try it again for
4737 this process mode. */
4738 disable_regset (regsets_info, regset);
4739 }
4740 else if (errno == ESRCH)
4741 {
4742 /* At this point, ESRCH should mean the process is
4743 already gone, in which case we simply ignore attempts
4744 to change its registers. See also the related
4745 comment in linux_resume_one_lwp. */
4746 free (buf);
4747 return 0;
4748 }
4749 else
4750 {
4751 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4752 }
4753 }
4754 else if (regset->type == GENERAL_REGS)
4755 saw_general_regs = 1;
4756 free (buf);
4757 }
4758 if (saw_general_regs)
4759 return 0;
4760 else
4761 return 1;
4762 }
4763
4764 #else /* !HAVE_LINUX_REGSETS */
4765
4766 #define use_linux_regsets 0
4767 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4768 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4769
4770 #endif
4771
4772 /* Return 1 if register REGNO is supported by one of the regset ptrace
4773 calls or 0 if it has to be transferred individually. */
4774
4775 static int
4776 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4777 {
4778 unsigned char mask = 1 << (regno % 8);
4779 size_t index = regno / 8;
4780
4781 return (use_linux_regsets
4782 && (regs_info->regset_bitmap == NULL
4783 || (regs_info->regset_bitmap[index] & mask) != 0));
4784 }
4785
4786 #ifdef HAVE_LINUX_USRREGS
4787
4788 int
4789 register_addr (const struct usrregs_info *usrregs, int regnum)
4790 {
4791 int addr;
4792
4793 if (regnum < 0 || regnum >= usrregs->num_regs)
4794 error ("Invalid register number %d.", regnum);
4795
4796 addr = usrregs->regmap[regnum];
4797
4798 return addr;
4799 }
4800
4801 /* Fetch one register. */
4802 static void
4803 fetch_register (const struct usrregs_info *usrregs,
4804 struct regcache *regcache, int regno)
4805 {
4806 CORE_ADDR regaddr;
4807 int i, size;
4808 char *buf;
4809 int pid;
4810
4811 if (regno >= usrregs->num_regs)
4812 return;
4813 if ((*the_low_target.cannot_fetch_register) (regno))
4814 return;
4815
4816 regaddr = register_addr (usrregs, regno);
4817 if (regaddr == -1)
4818 return;
4819
4820 size = ((register_size (regcache->tdesc, regno)
4821 + sizeof (PTRACE_XFER_TYPE) - 1)
4822 & -sizeof (PTRACE_XFER_TYPE));
4823 buf = alloca (size);
4824
4825 pid = lwpid_of (current_thread);
4826 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4827 {
4828 errno = 0;
4829 *(PTRACE_XFER_TYPE *) (buf + i) =
4830 ptrace (PTRACE_PEEKUSER, pid,
4831 /* Coerce to a uintptr_t first to avoid potential gcc warning
4832 of coercing an 8 byte integer to a 4 byte pointer. */
4833 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4834 regaddr += sizeof (PTRACE_XFER_TYPE);
4835 if (errno != 0)
4836 error ("reading register %d: %s", regno, strerror (errno));
4837 }
4838
4839 if (the_low_target.supply_ptrace_register)
4840 the_low_target.supply_ptrace_register (regcache, regno, buf);
4841 else
4842 supply_register (regcache, regno, buf);
4843 }
4844
4845 /* Store one register. */
4846 static void
4847 store_register (const struct usrregs_info *usrregs,
4848 struct regcache *regcache, int regno)
4849 {
4850 CORE_ADDR regaddr;
4851 int i, size;
4852 char *buf;
4853 int pid;
4854
4855 if (regno >= usrregs->num_regs)
4856 return;
4857 if ((*the_low_target.cannot_store_register) (regno))
4858 return;
4859
4860 regaddr = register_addr (usrregs, regno);
4861 if (regaddr == -1)
4862 return;
4863
4864 size = ((register_size (regcache->tdesc, regno)
4865 + sizeof (PTRACE_XFER_TYPE) - 1)
4866 & -sizeof (PTRACE_XFER_TYPE));
4867 buf = alloca (size);
4868 memset (buf, 0, size);
4869
4870 if (the_low_target.collect_ptrace_register)
4871 the_low_target.collect_ptrace_register (regcache, regno, buf);
4872 else
4873 collect_register (regcache, regno, buf);
4874
4875 pid = lwpid_of (current_thread);
4876 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4877 {
4878 errno = 0;
4879 ptrace (PTRACE_POKEUSER, pid,
4880 /* Coerce to a uintptr_t first to avoid potential gcc warning
4881 about coercing an 8 byte integer to a 4 byte pointer. */
4882 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4883 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4884 if (errno != 0)
4885 {
4886 /* At this point, ESRCH should mean the process is
4887 already gone, in which case we simply ignore attempts
4888 to change its registers. See also the related
4889 comment in linux_resume_one_lwp. */
4890 if (errno == ESRCH)
4891 return;
4892
4893 if ((*the_low_target.cannot_store_register) (regno) == 0)
4894 error ("writing register %d: %s", regno, strerror (errno));
4895 }
4896 regaddr += sizeof (PTRACE_XFER_TYPE);
4897 }
4898 }
4899
4900 /* Fetch all registers, or just one, from the child process.
4901 If REGNO is -1, do this for all registers, skipping any that are
4902 assumed to have been retrieved by regsets_fetch_inferior_registers,
4903 unless ALL is non-zero.
4904 Otherwise, REGNO specifies which register (so we can save time). */
4905 static void
4906 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4907 struct regcache *regcache, int regno, int all)
4908 {
4909 struct usrregs_info *usr = regs_info->usrregs;
4910
4911 if (regno == -1)
4912 {
4913 for (regno = 0; regno < usr->num_regs; regno++)
4914 if (all || !linux_register_in_regsets (regs_info, regno))
4915 fetch_register (usr, regcache, regno);
4916 }
4917 else
4918 fetch_register (usr, regcache, regno);
4919 }
4920
4921 /* Store our register values back into the inferior.
4922 If REGNO is -1, do this for all registers, skipping any that are
4923 assumed to have been saved by regsets_store_inferior_registers,
4924 unless ALL is non-zero.
4925 Otherwise, REGNO specifies which register (so we can save time). */
4926 static void
4927 usr_store_inferior_registers (const struct regs_info *regs_info,
4928 struct regcache *regcache, int regno, int all)
4929 {
4930 struct usrregs_info *usr = regs_info->usrregs;
4931
4932 if (regno == -1)
4933 {
4934 for (regno = 0; regno < usr->num_regs; regno++)
4935 if (all || !linux_register_in_regsets (regs_info, regno))
4936 store_register (usr, regcache, regno);
4937 }
4938 else
4939 store_register (usr, regcache, regno);
4940 }
4941
4942 #else /* !HAVE_LINUX_USRREGS */
4943
4944 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4945 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4946
4947 #endif
4948
4949
4950 void
4951 linux_fetch_registers (struct regcache *regcache, int regno)
4952 {
4953 int use_regsets;
4954 int all = 0;
4955 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4956
4957 if (regno == -1)
4958 {
4959 if (the_low_target.fetch_register != NULL
4960 && regs_info->usrregs != NULL)
4961 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4962 (*the_low_target.fetch_register) (regcache, regno);
4963
4964 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4965 if (regs_info->usrregs != NULL)
4966 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4967 }
4968 else
4969 {
4970 if (the_low_target.fetch_register != NULL
4971 && (*the_low_target.fetch_register) (regcache, regno))
4972 return;
4973
4974 use_regsets = linux_register_in_regsets (regs_info, regno);
4975 if (use_regsets)
4976 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4977 regcache);
4978 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4979 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4980 }
4981 }
4982
4983 void
4984 linux_store_registers (struct regcache *regcache, int regno)
4985 {
4986 int use_regsets;
4987 int all = 0;
4988 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4989
4990 if (regno == -1)
4991 {
4992 all = regsets_store_inferior_registers (regs_info->regsets_info,
4993 regcache);
4994 if (regs_info->usrregs != NULL)
4995 usr_store_inferior_registers (regs_info, regcache, regno, all);
4996 }
4997 else
4998 {
4999 use_regsets = linux_register_in_regsets (regs_info, regno);
5000 if (use_regsets)
5001 all = regsets_store_inferior_registers (regs_info->regsets_info,
5002 regcache);
5003 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5004 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5005 }
5006 }
5007
5008
5009 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5010 to debugger memory starting at MYADDR. */
5011
5012 static int
5013 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5014 {
5015 int pid = lwpid_of (current_thread);
5016 register PTRACE_XFER_TYPE *buffer;
5017 register CORE_ADDR addr;
5018 register int count;
5019 char filename[64];
5020 register int i;
5021 int ret;
5022 int fd;
5023
5024 /* Try using /proc. Don't bother for one word. */
5025 if (len >= 3 * sizeof (long))
5026 {
5027 int bytes;
5028
5029 /* We could keep this file open and cache it - possibly one per
5030 thread. That requires some juggling, but is even faster. */
5031 sprintf (filename, "/proc/%d/mem", pid);
5032 fd = open (filename, O_RDONLY | O_LARGEFILE);
5033 if (fd == -1)
5034 goto no_proc;
5035
5036 /* If pread64 is available, use it. It's faster if the kernel
5037 supports it (only one syscall), and it's 64-bit safe even on
5038 32-bit platforms (for instance, SPARC debugging a SPARC64
5039 application). */
5040 #ifdef HAVE_PREAD64
5041 bytes = pread64 (fd, myaddr, len, memaddr);
5042 #else
5043 bytes = -1;
5044 if (lseek (fd, memaddr, SEEK_SET) != -1)
5045 bytes = read (fd, myaddr, len);
5046 #endif
5047
5048 close (fd);
5049 if (bytes == len)
5050 return 0;
5051
5052 /* Some data was read, we'll try to get the rest with ptrace. */
5053 if (bytes > 0)
5054 {
5055 memaddr += bytes;
5056 myaddr += bytes;
5057 len -= bytes;
5058 }
5059 }
5060
5061 no_proc:
5062 /* Round starting address down to longword boundary. */
5063 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5064 /* Round ending address up; get number of longwords that makes. */
5065 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5066 / sizeof (PTRACE_XFER_TYPE));
5067 /* Allocate buffer of that many longwords. */
5068 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5069
5070 /* Read all the longwords */
5071 errno = 0;
5072 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5073 {
5074 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5075 about coercing an 8 byte integer to a 4 byte pointer. */
5076 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5077 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5078 (PTRACE_TYPE_ARG4) 0);
5079 if (errno)
5080 break;
5081 }
5082 ret = errno;
5083
5084 /* Copy appropriate bytes out of the buffer. */
5085 if (i > 0)
5086 {
5087 i *= sizeof (PTRACE_XFER_TYPE);
5088 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5089 memcpy (myaddr,
5090 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5091 i < len ? i : len);
5092 }
5093
5094 return ret;
5095 }
5096
5097 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5098 memory at MEMADDR. On failure (cannot write to the inferior)
5099 returns the value of errno. Always succeeds if LEN is zero. */
5100
5101 static int
5102 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5103 {
5104 register int i;
5105 /* Round starting address down to longword boundary. */
5106 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5107 /* Round ending address up; get number of longwords that makes. */
5108 register int count
5109 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5110 / sizeof (PTRACE_XFER_TYPE);
5111
5112 /* Allocate buffer of that many longwords. */
5113 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5114 alloca (count * sizeof (PTRACE_XFER_TYPE));
5115
5116 int pid = lwpid_of (current_thread);
5117
5118 if (len == 0)
5119 {
5120 /* Zero length write always succeeds. */
5121 return 0;
5122 }
5123
5124 if (debug_threads)
5125 {
5126 /* Dump up to four bytes. */
5127 unsigned int val = * (unsigned int *) myaddr;
5128 if (len == 1)
5129 val = val & 0xff;
5130 else if (len == 2)
5131 val = val & 0xffff;
5132 else if (len == 3)
5133 val = val & 0xffffff;
5134 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5135 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5136 }
5137
5138 /* Fill start and end extra bytes of buffer with existing memory data. */
5139
5140 errno = 0;
5141 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5142 about coercing an 8 byte integer to a 4 byte pointer. */
5143 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5144 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5145 (PTRACE_TYPE_ARG4) 0);
5146 if (errno)
5147 return errno;
5148
5149 if (count > 1)
5150 {
5151 errno = 0;
5152 buffer[count - 1]
5153 = ptrace (PTRACE_PEEKTEXT, pid,
5154 /* Coerce to a uintptr_t first to avoid potential gcc warning
5155 about coercing an 8 byte integer to a 4 byte pointer. */
5156 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5157 * sizeof (PTRACE_XFER_TYPE)),
5158 (PTRACE_TYPE_ARG4) 0);
5159 if (errno)
5160 return errno;
5161 }
5162
5163 /* Copy data to be written over corresponding part of buffer. */
5164
5165 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5166 myaddr, len);
5167
5168 /* Write the entire buffer. */
5169
5170 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5171 {
5172 errno = 0;
5173 ptrace (PTRACE_POKETEXT, pid,
5174 /* Coerce to a uintptr_t first to avoid potential gcc warning
5175 about coercing an 8 byte integer to a 4 byte pointer. */
5176 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5177 (PTRACE_TYPE_ARG4) buffer[i]);
5178 if (errno)
5179 return errno;
5180 }
5181
5182 return 0;
5183 }
5184
5185 static void
5186 linux_look_up_symbols (void)
5187 {
5188 #ifdef USE_THREAD_DB
5189 struct process_info *proc = current_process ();
5190
5191 if (proc->priv->thread_db != NULL)
5192 return;
5193
5194 /* If the kernel supports tracing clones, then we don't need to
5195 use the magic thread event breakpoint to learn about
5196 threads. */
5197 thread_db_init (!linux_supports_traceclone ());
5198 #endif
5199 }
5200
5201 static void
5202 linux_request_interrupt (void)
5203 {
5204 extern unsigned long signal_pid;
5205
5206 /* Send a SIGINT to the process group. This acts just like the user
5207 typed a ^C on the controlling terminal. */
5208 kill (-signal_pid, SIGINT);
5209 }
5210
5211 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5212 to debugger memory starting at MYADDR. */
5213
5214 static int
5215 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5216 {
5217 char filename[PATH_MAX];
5218 int fd, n;
5219 int pid = lwpid_of (current_thread);
5220
5221 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5222
5223 fd = open (filename, O_RDONLY);
5224 if (fd < 0)
5225 return -1;
5226
5227 if (offset != (CORE_ADDR) 0
5228 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5229 n = -1;
5230 else
5231 n = read (fd, myaddr, len);
5232
5233 close (fd);
5234
5235 return n;
5236 }
5237
5238 /* These breakpoint and watchpoint related wrapper functions simply
5239 pass on the function call if the target has registered a
5240 corresponding function. */
5241
5242 static int
5243 linux_supports_z_point_type (char z_type)
5244 {
5245 return (the_low_target.supports_z_point_type != NULL
5246 && the_low_target.supports_z_point_type (z_type));
5247 }
5248
5249 static int
5250 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5251 int size, struct raw_breakpoint *bp)
5252 {
5253 if (type == raw_bkpt_type_sw)
5254 return insert_memory_breakpoint (bp);
5255 else if (the_low_target.insert_point != NULL)
5256 return the_low_target.insert_point (type, addr, size, bp);
5257 else
5258 /* Unsupported (see target.h). */
5259 return 1;
5260 }
5261
5262 static int
5263 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5264 int size, struct raw_breakpoint *bp)
5265 {
5266 if (type == raw_bkpt_type_sw)
5267 return remove_memory_breakpoint (bp);
5268 else if (the_low_target.remove_point != NULL)
5269 return the_low_target.remove_point (type, addr, size, bp);
5270 else
5271 /* Unsupported (see target.h). */
5272 return 1;
5273 }
5274
5275 /* Implement the to_stopped_by_sw_breakpoint target_ops
5276 method. */
5277
5278 static int
5279 linux_stopped_by_sw_breakpoint (void)
5280 {
5281 struct lwp_info *lwp = get_thread_lwp (current_thread);
5282
5283 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5284 }
5285
5286 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5287 method. */
5288
5289 static int
5290 linux_supports_stopped_by_sw_breakpoint (void)
5291 {
5292 return USE_SIGTRAP_SIGINFO;
5293 }
5294
5295 /* Implement the to_stopped_by_hw_breakpoint target_ops
5296 method. */
5297
5298 static int
5299 linux_stopped_by_hw_breakpoint (void)
5300 {
5301 struct lwp_info *lwp = get_thread_lwp (current_thread);
5302
5303 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5304 }
5305
5306 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5307 method. */
5308
5309 static int
5310 linux_supports_stopped_by_hw_breakpoint (void)
5311 {
5312 return USE_SIGTRAP_SIGINFO;
5313 }
5314
5315 /* Implement the supports_conditional_breakpoints target_ops
5316 method. */
5317
5318 static int
5319 linux_supports_conditional_breakpoints (void)
5320 {
5321 /* GDBserver needs to step over the breakpoint if the condition is
5322 false. GDBserver software single step is too simple, so disable
5323 conditional breakpoints if the target doesn't have hardware single
5324 step. */
5325 return can_hardware_single_step ();
5326 }
5327
5328 static int
5329 linux_stopped_by_watchpoint (void)
5330 {
5331 struct lwp_info *lwp = get_thread_lwp (current_thread);
5332
5333 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5334 }
5335
5336 static CORE_ADDR
5337 linux_stopped_data_address (void)
5338 {
5339 struct lwp_info *lwp = get_thread_lwp (current_thread);
5340
5341 return lwp->stopped_data_address;
5342 }
5343
5344 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5345 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5346 && defined(PT_TEXT_END_ADDR)
5347
5348 /* This is only used for targets that define PT_TEXT_ADDR,
5349 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5350 the target has different ways of acquiring this information, like
5351 loadmaps. */
5352
5353 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5354 to tell gdb about. */
5355
5356 static int
5357 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5358 {
5359 unsigned long text, text_end, data;
5360 int pid = lwpid_of (current_thread);
5361
5362 errno = 0;
5363
5364 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5365 (PTRACE_TYPE_ARG4) 0);
5366 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5367 (PTRACE_TYPE_ARG4) 0);
5368 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5369 (PTRACE_TYPE_ARG4) 0);
5370
5371 if (errno == 0)
5372 {
5373 /* Both text and data offsets produced at compile-time (and so
5374 used by gdb) are relative to the beginning of the program,
5375 with the data segment immediately following the text segment.
5376 However, the actual runtime layout in memory may put the data
5377 somewhere else, so when we send gdb a data base-address, we
5378 use the real data base address and subtract the compile-time
5379 data base-address from it (which is just the length of the
5380 text segment). BSS immediately follows data in both
5381 cases. */
5382 *text_p = text;
5383 *data_p = data - (text_end - text);
5384
5385 return 1;
5386 }
5387 return 0;
5388 }
5389 #endif
5390
5391 static int
5392 linux_qxfer_osdata (const char *annex,
5393 unsigned char *readbuf, unsigned const char *writebuf,
5394 CORE_ADDR offset, int len)
5395 {
5396 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5397 }
5398
5399 /* Convert a native/host siginfo object, into/from the siginfo in the
5400 layout of the inferiors' architecture. */
5401
5402 static void
5403 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5404 {
5405 int done = 0;
5406
5407 if (the_low_target.siginfo_fixup != NULL)
5408 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5409
5410 /* If there was no callback, or the callback didn't do anything,
5411 then just do a straight memcpy. */
5412 if (!done)
5413 {
5414 if (direction == 1)
5415 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5416 else
5417 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5418 }
5419 }
5420
5421 static int
5422 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5423 unsigned const char *writebuf, CORE_ADDR offset, int len)
5424 {
5425 int pid;
5426 siginfo_t siginfo;
5427 char inf_siginfo[sizeof (siginfo_t)];
5428
5429 if (current_thread == NULL)
5430 return -1;
5431
5432 pid = lwpid_of (current_thread);
5433
5434 if (debug_threads)
5435 debug_printf ("%s siginfo for lwp %d.\n",
5436 readbuf != NULL ? "Reading" : "Writing",
5437 pid);
5438
5439 if (offset >= sizeof (siginfo))
5440 return -1;
5441
5442 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5443 return -1;
5444
5445 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5446 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5447 inferior with a 64-bit GDBSERVER should look the same as debugging it
5448 with a 32-bit GDBSERVER, we need to convert it. */
5449 siginfo_fixup (&siginfo, inf_siginfo, 0);
5450
5451 if (offset + len > sizeof (siginfo))
5452 len = sizeof (siginfo) - offset;
5453
5454 if (readbuf != NULL)
5455 memcpy (readbuf, inf_siginfo + offset, len);
5456 else
5457 {
5458 memcpy (inf_siginfo + offset, writebuf, len);
5459
5460 /* Convert back to ptrace layout before flushing it out. */
5461 siginfo_fixup (&siginfo, inf_siginfo, 1);
5462
5463 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5464 return -1;
5465 }
5466
5467 return len;
5468 }
5469
5470 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5471 so we notice when children change state; as the handler for the
5472 sigsuspend in my_waitpid. */
5473
5474 static void
5475 sigchld_handler (int signo)
5476 {
5477 int old_errno = errno;
5478
5479 if (debug_threads)
5480 {
5481 do
5482 {
5483 /* fprintf is not async-signal-safe, so call write
5484 directly. */
5485 if (write (2, "sigchld_handler\n",
5486 sizeof ("sigchld_handler\n") - 1) < 0)
5487 break; /* just ignore */
5488 } while (0);
5489 }
5490
5491 if (target_is_async_p ())
5492 async_file_mark (); /* trigger a linux_wait */
5493
5494 errno = old_errno;
5495 }
5496
5497 static int
5498 linux_supports_non_stop (void)
5499 {
5500 return 1;
5501 }
5502
5503 static int
5504 linux_async (int enable)
5505 {
5506 int previous = target_is_async_p ();
5507
5508 if (debug_threads)
5509 debug_printf ("linux_async (%d), previous=%d\n",
5510 enable, previous);
5511
5512 if (previous != enable)
5513 {
5514 sigset_t mask;
5515 sigemptyset (&mask);
5516 sigaddset (&mask, SIGCHLD);
5517
5518 sigprocmask (SIG_BLOCK, &mask, NULL);
5519
5520 if (enable)
5521 {
5522 if (pipe (linux_event_pipe) == -1)
5523 {
5524 linux_event_pipe[0] = -1;
5525 linux_event_pipe[1] = -1;
5526 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5527
5528 warning ("creating event pipe failed.");
5529 return previous;
5530 }
5531
5532 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5533 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5534
5535 /* Register the event loop handler. */
5536 add_file_handler (linux_event_pipe[0],
5537 handle_target_event, NULL);
5538
5539 /* Always trigger a linux_wait. */
5540 async_file_mark ();
5541 }
5542 else
5543 {
5544 delete_file_handler (linux_event_pipe[0]);
5545
5546 close (linux_event_pipe[0]);
5547 close (linux_event_pipe[1]);
5548 linux_event_pipe[0] = -1;
5549 linux_event_pipe[1] = -1;
5550 }
5551
5552 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5553 }
5554
5555 return previous;
5556 }
5557
5558 static int
5559 linux_start_non_stop (int nonstop)
5560 {
5561 /* Register or unregister from event-loop accordingly. */
5562 linux_async (nonstop);
5563
5564 if (target_is_async_p () != (nonstop != 0))
5565 return -1;
5566
5567 return 0;
5568 }
5569
5570 static int
5571 linux_supports_multi_process (void)
5572 {
5573 return 1;
5574 }
5575
5576 /* Check if fork events are supported. */
5577
5578 static int
5579 linux_supports_fork_events (void)
5580 {
5581 return linux_supports_tracefork ();
5582 }
5583
5584 /* Check if vfork events are supported. */
5585
5586 static int
5587 linux_supports_vfork_events (void)
5588 {
5589 return linux_supports_tracefork ();
5590 }
5591
5592 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5593 options for the specified lwp. */
5594
5595 static int
5596 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5597 void *args)
5598 {
5599 struct thread_info *thread = (struct thread_info *) entry;
5600 struct lwp_info *lwp = get_thread_lwp (thread);
5601
5602 if (!lwp->stopped)
5603 {
5604 /* Stop the lwp so we can modify its ptrace options. */
5605 lwp->must_set_ptrace_flags = 1;
5606 linux_stop_lwp (lwp);
5607 }
5608 else
5609 {
5610 /* Already stopped; go ahead and set the ptrace options. */
5611 struct process_info *proc = find_process_pid (pid_of (thread));
5612 int options = linux_low_ptrace_options (proc->attached);
5613
5614 linux_enable_event_reporting (lwpid_of (thread), options);
5615 lwp->must_set_ptrace_flags = 0;
5616 }
5617
5618 return 0;
5619 }
5620
5621 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5622 ptrace flags for all inferiors. This is in case the new GDB connection
5623 doesn't support the same set of events that the previous one did. */
5624
5625 static void
5626 linux_handle_new_gdb_connection (void)
5627 {
5628 pid_t pid;
5629
5630 /* Request that all the lwps reset their ptrace options. */
5631 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5632 }
5633
5634 static int
5635 linux_supports_disable_randomization (void)
5636 {
5637 #ifdef HAVE_PERSONALITY
5638 return 1;
5639 #else
5640 return 0;
5641 #endif
5642 }
5643
5644 static int
5645 linux_supports_agent (void)
5646 {
5647 return 1;
5648 }
5649
5650 static int
5651 linux_supports_range_stepping (void)
5652 {
5653 if (*the_low_target.supports_range_stepping == NULL)
5654 return 0;
5655
5656 return (*the_low_target.supports_range_stepping) ();
5657 }
5658
5659 /* Enumerate spufs IDs for process PID. */
5660 static int
5661 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5662 {
5663 int pos = 0;
5664 int written = 0;
5665 char path[128];
5666 DIR *dir;
5667 struct dirent *entry;
5668
5669 sprintf (path, "/proc/%ld/fd", pid);
5670 dir = opendir (path);
5671 if (!dir)
5672 return -1;
5673
5674 rewinddir (dir);
5675 while ((entry = readdir (dir)) != NULL)
5676 {
5677 struct stat st;
5678 struct statfs stfs;
5679 int fd;
5680
5681 fd = atoi (entry->d_name);
5682 if (!fd)
5683 continue;
5684
5685 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5686 if (stat (path, &st) != 0)
5687 continue;
5688 if (!S_ISDIR (st.st_mode))
5689 continue;
5690
5691 if (statfs (path, &stfs) != 0)
5692 continue;
5693 if (stfs.f_type != SPUFS_MAGIC)
5694 continue;
5695
5696 if (pos >= offset && pos + 4 <= offset + len)
5697 {
5698 *(unsigned int *)(buf + pos - offset) = fd;
5699 written += 4;
5700 }
5701 pos += 4;
5702 }
5703
5704 closedir (dir);
5705 return written;
5706 }
5707
5708 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5709 object type, using the /proc file system. */
5710 static int
5711 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5712 unsigned const char *writebuf,
5713 CORE_ADDR offset, int len)
5714 {
5715 long pid = lwpid_of (current_thread);
5716 char buf[128];
5717 int fd = 0;
5718 int ret = 0;
5719
5720 if (!writebuf && !readbuf)
5721 return -1;
5722
5723 if (!*annex)
5724 {
5725 if (!readbuf)
5726 return -1;
5727 else
5728 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5729 }
5730
5731 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5732 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5733 if (fd <= 0)
5734 return -1;
5735
5736 if (offset != 0
5737 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5738 {
5739 close (fd);
5740 return 0;
5741 }
5742
5743 if (writebuf)
5744 ret = write (fd, writebuf, (size_t) len);
5745 else
5746 ret = read (fd, readbuf, (size_t) len);
5747
5748 close (fd);
5749 return ret;
5750 }
5751
5752 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5753 struct target_loadseg
5754 {
5755 /* Core address to which the segment is mapped. */
5756 Elf32_Addr addr;
5757 /* VMA recorded in the program header. */
5758 Elf32_Addr p_vaddr;
5759 /* Size of this segment in memory. */
5760 Elf32_Word p_memsz;
5761 };
5762
5763 # if defined PT_GETDSBT
5764 struct target_loadmap
5765 {
5766 /* Protocol version number, must be zero. */
5767 Elf32_Word version;
5768 /* Pointer to the DSBT table, its size, and the DSBT index. */
5769 unsigned *dsbt_table;
5770 unsigned dsbt_size, dsbt_index;
5771 /* Number of segments in this map. */
5772 Elf32_Word nsegs;
5773 /* The actual memory map. */
5774 struct target_loadseg segs[/*nsegs*/];
5775 };
5776 # define LINUX_LOADMAP PT_GETDSBT
5777 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5778 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5779 # else
5780 struct target_loadmap
5781 {
5782 /* Protocol version number, must be zero. */
5783 Elf32_Half version;
5784 /* Number of segments in this map. */
5785 Elf32_Half nsegs;
5786 /* The actual memory map. */
5787 struct target_loadseg segs[/*nsegs*/];
5788 };
5789 # define LINUX_LOADMAP PTRACE_GETFDPIC
5790 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5791 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5792 # endif
5793
5794 static int
5795 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5796 unsigned char *myaddr, unsigned int len)
5797 {
5798 int pid = lwpid_of (current_thread);
5799 int addr = -1;
5800 struct target_loadmap *data = NULL;
5801 unsigned int actual_length, copy_length;
5802
5803 if (strcmp (annex, "exec") == 0)
5804 addr = (int) LINUX_LOADMAP_EXEC;
5805 else if (strcmp (annex, "interp") == 0)
5806 addr = (int) LINUX_LOADMAP_INTERP;
5807 else
5808 return -1;
5809
5810 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5811 return -1;
5812
5813 if (data == NULL)
5814 return -1;
5815
5816 actual_length = sizeof (struct target_loadmap)
5817 + sizeof (struct target_loadseg) * data->nsegs;
5818
5819 if (offset < 0 || offset > actual_length)
5820 return -1;
5821
5822 copy_length = actual_length - offset < len ? actual_length - offset : len;
5823 memcpy (myaddr, (char *) data + offset, copy_length);
5824 return copy_length;
5825 }
5826 #else
5827 # define linux_read_loadmap NULL
5828 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5829
5830 static void
5831 linux_process_qsupported (const char *query)
5832 {
5833 if (the_low_target.process_qsupported != NULL)
5834 the_low_target.process_qsupported (query);
5835 }
5836
5837 static int
5838 linux_supports_tracepoints (void)
5839 {
5840 if (*the_low_target.supports_tracepoints == NULL)
5841 return 0;
5842
5843 return (*the_low_target.supports_tracepoints) ();
5844 }
5845
5846 static CORE_ADDR
5847 linux_read_pc (struct regcache *regcache)
5848 {
5849 if (the_low_target.get_pc == NULL)
5850 return 0;
5851
5852 return (*the_low_target.get_pc) (regcache);
5853 }
5854
5855 static void
5856 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5857 {
5858 gdb_assert (the_low_target.set_pc != NULL);
5859
5860 (*the_low_target.set_pc) (regcache, pc);
5861 }
5862
5863 static int
5864 linux_thread_stopped (struct thread_info *thread)
5865 {
5866 return get_thread_lwp (thread)->stopped;
5867 }
5868
5869 /* This exposes stop-all-threads functionality to other modules. */
5870
5871 static void
5872 linux_pause_all (int freeze)
5873 {
5874 stop_all_lwps (freeze, NULL);
5875 }
5876
5877 /* This exposes unstop-all-threads functionality to other gdbserver
5878 modules. */
5879
5880 static void
5881 linux_unpause_all (int unfreeze)
5882 {
5883 unstop_all_lwps (unfreeze, NULL);
5884 }
5885
5886 static int
5887 linux_prepare_to_access_memory (void)
5888 {
5889 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5890 running LWP. */
5891 if (non_stop)
5892 linux_pause_all (1);
5893 return 0;
5894 }
5895
5896 static void
5897 linux_done_accessing_memory (void)
5898 {
5899 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5900 running LWP. */
5901 if (non_stop)
5902 linux_unpause_all (1);
5903 }
5904
5905 static int
5906 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5907 CORE_ADDR collector,
5908 CORE_ADDR lockaddr,
5909 ULONGEST orig_size,
5910 CORE_ADDR *jump_entry,
5911 CORE_ADDR *trampoline,
5912 ULONGEST *trampoline_size,
5913 unsigned char *jjump_pad_insn,
5914 ULONGEST *jjump_pad_insn_size,
5915 CORE_ADDR *adjusted_insn_addr,
5916 CORE_ADDR *adjusted_insn_addr_end,
5917 char *err)
5918 {
5919 return (*the_low_target.install_fast_tracepoint_jump_pad)
5920 (tpoint, tpaddr, collector, lockaddr, orig_size,
5921 jump_entry, trampoline, trampoline_size,
5922 jjump_pad_insn, jjump_pad_insn_size,
5923 adjusted_insn_addr, adjusted_insn_addr_end,
5924 err);
5925 }
5926
5927 static struct emit_ops *
5928 linux_emit_ops (void)
5929 {
5930 if (the_low_target.emit_ops != NULL)
5931 return (*the_low_target.emit_ops) ();
5932 else
5933 return NULL;
5934 }
5935
5936 static int
5937 linux_get_min_fast_tracepoint_insn_len (void)
5938 {
5939 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5940 }
5941
5942 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5943
5944 static int
5945 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5946 CORE_ADDR *phdr_memaddr, int *num_phdr)
5947 {
5948 char filename[PATH_MAX];
5949 int fd;
5950 const int auxv_size = is_elf64
5951 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5952 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5953
5954 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5955
5956 fd = open (filename, O_RDONLY);
5957 if (fd < 0)
5958 return 1;
5959
5960 *phdr_memaddr = 0;
5961 *num_phdr = 0;
5962 while (read (fd, buf, auxv_size) == auxv_size
5963 && (*phdr_memaddr == 0 || *num_phdr == 0))
5964 {
5965 if (is_elf64)
5966 {
5967 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5968
5969 switch (aux->a_type)
5970 {
5971 case AT_PHDR:
5972 *phdr_memaddr = aux->a_un.a_val;
5973 break;
5974 case AT_PHNUM:
5975 *num_phdr = aux->a_un.a_val;
5976 break;
5977 }
5978 }
5979 else
5980 {
5981 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5982
5983 switch (aux->a_type)
5984 {
5985 case AT_PHDR:
5986 *phdr_memaddr = aux->a_un.a_val;
5987 break;
5988 case AT_PHNUM:
5989 *num_phdr = aux->a_un.a_val;
5990 break;
5991 }
5992 }
5993 }
5994
5995 close (fd);
5996
5997 if (*phdr_memaddr == 0 || *num_phdr == 0)
5998 {
5999 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6000 "phdr_memaddr = %ld, phdr_num = %d",
6001 (long) *phdr_memaddr, *num_phdr);
6002 return 2;
6003 }
6004
6005 return 0;
6006 }
6007
6008 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6009
6010 static CORE_ADDR
6011 get_dynamic (const int pid, const int is_elf64)
6012 {
6013 CORE_ADDR phdr_memaddr, relocation;
6014 int num_phdr, i;
6015 unsigned char *phdr_buf;
6016 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6017
6018 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6019 return 0;
6020
6021 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6022 phdr_buf = alloca (num_phdr * phdr_size);
6023
6024 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6025 return 0;
6026
6027 /* Compute relocation: it is expected to be 0 for "regular" executables,
6028 non-zero for PIE ones. */
6029 relocation = -1;
6030 for (i = 0; relocation == -1 && i < num_phdr; i++)
6031 if (is_elf64)
6032 {
6033 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6034
6035 if (p->p_type == PT_PHDR)
6036 relocation = phdr_memaddr - p->p_vaddr;
6037 }
6038 else
6039 {
6040 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6041
6042 if (p->p_type == PT_PHDR)
6043 relocation = phdr_memaddr - p->p_vaddr;
6044 }
6045
6046 if (relocation == -1)
6047 {
6048 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6049 any real world executables, including PIE executables, have always
6050 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6051 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6052 or present DT_DEBUG anyway (fpc binaries are statically linked).
6053
6054 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6055
6056 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6057
6058 return 0;
6059 }
6060
6061 for (i = 0; i < num_phdr; i++)
6062 {
6063 if (is_elf64)
6064 {
6065 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6066
6067 if (p->p_type == PT_DYNAMIC)
6068 return p->p_vaddr + relocation;
6069 }
6070 else
6071 {
6072 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6073
6074 if (p->p_type == PT_DYNAMIC)
6075 return p->p_vaddr + relocation;
6076 }
6077 }
6078
6079 return 0;
6080 }
6081
6082 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6083 can be 0 if the inferior does not yet have the library list initialized.
6084 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6085 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6086
6087 static CORE_ADDR
6088 get_r_debug (const int pid, const int is_elf64)
6089 {
6090 CORE_ADDR dynamic_memaddr;
6091 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6092 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6093 CORE_ADDR map = -1;
6094
6095 dynamic_memaddr = get_dynamic (pid, is_elf64);
6096 if (dynamic_memaddr == 0)
6097 return map;
6098
6099 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6100 {
6101 if (is_elf64)
6102 {
6103 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6104 #ifdef DT_MIPS_RLD_MAP
6105 union
6106 {
6107 Elf64_Xword map;
6108 unsigned char buf[sizeof (Elf64_Xword)];
6109 }
6110 rld_map;
6111
6112 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6113 {
6114 if (linux_read_memory (dyn->d_un.d_val,
6115 rld_map.buf, sizeof (rld_map.buf)) == 0)
6116 return rld_map.map;
6117 else
6118 break;
6119 }
6120 #endif /* DT_MIPS_RLD_MAP */
6121
6122 if (dyn->d_tag == DT_DEBUG && map == -1)
6123 map = dyn->d_un.d_val;
6124
6125 if (dyn->d_tag == DT_NULL)
6126 break;
6127 }
6128 else
6129 {
6130 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6131 #ifdef DT_MIPS_RLD_MAP
6132 union
6133 {
6134 Elf32_Word map;
6135 unsigned char buf[sizeof (Elf32_Word)];
6136 }
6137 rld_map;
6138
6139 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6140 {
6141 if (linux_read_memory (dyn->d_un.d_val,
6142 rld_map.buf, sizeof (rld_map.buf)) == 0)
6143 return rld_map.map;
6144 else
6145 break;
6146 }
6147 #endif /* DT_MIPS_RLD_MAP */
6148
6149 if (dyn->d_tag == DT_DEBUG && map == -1)
6150 map = dyn->d_un.d_val;
6151
6152 if (dyn->d_tag == DT_NULL)
6153 break;
6154 }
6155
6156 dynamic_memaddr += dyn_size;
6157 }
6158
6159 return map;
6160 }
6161
6162 /* Read one pointer from MEMADDR in the inferior. */
6163
6164 static int
6165 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6166 {
6167 int ret;
6168
6169 /* Go through a union so this works on either big or little endian
6170 hosts, when the inferior's pointer size is smaller than the size
6171 of CORE_ADDR. It is assumed the inferior's endianness is the
6172 same of the superior's. */
6173 union
6174 {
6175 CORE_ADDR core_addr;
6176 unsigned int ui;
6177 unsigned char uc;
6178 } addr;
6179
6180 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6181 if (ret == 0)
6182 {
6183 if (ptr_size == sizeof (CORE_ADDR))
6184 *ptr = addr.core_addr;
6185 else if (ptr_size == sizeof (unsigned int))
6186 *ptr = addr.ui;
6187 else
6188 gdb_assert_not_reached ("unhandled pointer size");
6189 }
6190 return ret;
6191 }
6192
6193 struct link_map_offsets
6194 {
6195 /* Offset and size of r_debug.r_version. */
6196 int r_version_offset;
6197
6198 /* Offset and size of r_debug.r_map. */
6199 int r_map_offset;
6200
6201 /* Offset to l_addr field in struct link_map. */
6202 int l_addr_offset;
6203
6204 /* Offset to l_name field in struct link_map. */
6205 int l_name_offset;
6206
6207 /* Offset to l_ld field in struct link_map. */
6208 int l_ld_offset;
6209
6210 /* Offset to l_next field in struct link_map. */
6211 int l_next_offset;
6212
6213 /* Offset to l_prev field in struct link_map. */
6214 int l_prev_offset;
6215 };
6216
6217 /* Construct qXfer:libraries-svr4:read reply. */
6218
6219 static int
6220 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6221 unsigned const char *writebuf,
6222 CORE_ADDR offset, int len)
6223 {
6224 char *document;
6225 unsigned document_len;
6226 struct process_info_private *const priv = current_process ()->priv;
6227 char filename[PATH_MAX];
6228 int pid, is_elf64;
6229
6230 static const struct link_map_offsets lmo_32bit_offsets =
6231 {
6232 0, /* r_version offset. */
6233 4, /* r_debug.r_map offset. */
6234 0, /* l_addr offset in link_map. */
6235 4, /* l_name offset in link_map. */
6236 8, /* l_ld offset in link_map. */
6237 12, /* l_next offset in link_map. */
6238 16 /* l_prev offset in link_map. */
6239 };
6240
6241 static const struct link_map_offsets lmo_64bit_offsets =
6242 {
6243 0, /* r_version offset. */
6244 8, /* r_debug.r_map offset. */
6245 0, /* l_addr offset in link_map. */
6246 8, /* l_name offset in link_map. */
6247 16, /* l_ld offset in link_map. */
6248 24, /* l_next offset in link_map. */
6249 32 /* l_prev offset in link_map. */
6250 };
6251 const struct link_map_offsets *lmo;
6252 unsigned int machine;
6253 int ptr_size;
6254 CORE_ADDR lm_addr = 0, lm_prev = 0;
6255 int allocated = 1024;
6256 char *p;
6257 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6258 int header_done = 0;
6259
6260 if (writebuf != NULL)
6261 return -2;
6262 if (readbuf == NULL)
6263 return -1;
6264
6265 pid = lwpid_of (current_thread);
6266 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6267 is_elf64 = elf_64_file_p (filename, &machine);
6268 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6269 ptr_size = is_elf64 ? 8 : 4;
6270
6271 while (annex[0] != '\0')
6272 {
6273 const char *sep;
6274 CORE_ADDR *addrp;
6275 int len;
6276
6277 sep = strchr (annex, '=');
6278 if (sep == NULL)
6279 break;
6280
6281 len = sep - annex;
6282 if (len == 5 && startswith (annex, "start"))
6283 addrp = &lm_addr;
6284 else if (len == 4 && startswith (annex, "prev"))
6285 addrp = &lm_prev;
6286 else
6287 {
6288 annex = strchr (sep, ';');
6289 if (annex == NULL)
6290 break;
6291 annex++;
6292 continue;
6293 }
6294
6295 annex = decode_address_to_semicolon (addrp, sep + 1);
6296 }
6297
6298 if (lm_addr == 0)
6299 {
6300 int r_version = 0;
6301
6302 if (priv->r_debug == 0)
6303 priv->r_debug = get_r_debug (pid, is_elf64);
6304
6305 /* We failed to find DT_DEBUG. Such situation will not change
6306 for this inferior - do not retry it. Report it to GDB as
6307 E01, see for the reasons at the GDB solib-svr4.c side. */
6308 if (priv->r_debug == (CORE_ADDR) -1)
6309 return -1;
6310
6311 if (priv->r_debug != 0)
6312 {
6313 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6314 (unsigned char *) &r_version,
6315 sizeof (r_version)) != 0
6316 || r_version != 1)
6317 {
6318 warning ("unexpected r_debug version %d", r_version);
6319 }
6320 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6321 &lm_addr, ptr_size) != 0)
6322 {
6323 warning ("unable to read r_map from 0x%lx",
6324 (long) priv->r_debug + lmo->r_map_offset);
6325 }
6326 }
6327 }
6328
6329 document = xmalloc (allocated);
6330 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6331 p = document + strlen (document);
6332
6333 while (lm_addr
6334 && read_one_ptr (lm_addr + lmo->l_name_offset,
6335 &l_name, ptr_size) == 0
6336 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6337 &l_addr, ptr_size) == 0
6338 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6339 &l_ld, ptr_size) == 0
6340 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6341 &l_prev, ptr_size) == 0
6342 && read_one_ptr (lm_addr + lmo->l_next_offset,
6343 &l_next, ptr_size) == 0)
6344 {
6345 unsigned char libname[PATH_MAX];
6346
6347 if (lm_prev != l_prev)
6348 {
6349 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6350 (long) lm_prev, (long) l_prev);
6351 break;
6352 }
6353
6354 /* Ignore the first entry even if it has valid name as the first entry
6355 corresponds to the main executable. The first entry should not be
6356 skipped if the dynamic loader was loaded late by a static executable
6357 (see solib-svr4.c parameter ignore_first). But in such case the main
6358 executable does not have PT_DYNAMIC present and this function already
6359 exited above due to failed get_r_debug. */
6360 if (lm_prev == 0)
6361 {
6362 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6363 p = p + strlen (p);
6364 }
6365 else
6366 {
6367 /* Not checking for error because reading may stop before
6368 we've got PATH_MAX worth of characters. */
6369 libname[0] = '\0';
6370 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6371 libname[sizeof (libname) - 1] = '\0';
6372 if (libname[0] != '\0')
6373 {
6374 /* 6x the size for xml_escape_text below. */
6375 size_t len = 6 * strlen ((char *) libname);
6376 char *name;
6377
6378 if (!header_done)
6379 {
6380 /* Terminate `<library-list-svr4'. */
6381 *p++ = '>';
6382 header_done = 1;
6383 }
6384
6385 while (allocated < p - document + len + 200)
6386 {
6387 /* Expand to guarantee sufficient storage. */
6388 uintptr_t document_len = p - document;
6389
6390 document = xrealloc (document, 2 * allocated);
6391 allocated *= 2;
6392 p = document + document_len;
6393 }
6394
6395 name = xml_escape_text ((char *) libname);
6396 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6397 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6398 name, (unsigned long) lm_addr,
6399 (unsigned long) l_addr, (unsigned long) l_ld);
6400 free (name);
6401 }
6402 }
6403
6404 lm_prev = lm_addr;
6405 lm_addr = l_next;
6406 }
6407
6408 if (!header_done)
6409 {
6410 /* Empty list; terminate `<library-list-svr4'. */
6411 strcpy (p, "/>");
6412 }
6413 else
6414 strcpy (p, "</library-list-svr4>");
6415
6416 document_len = strlen (document);
6417 if (offset < document_len)
6418 document_len -= offset;
6419 else
6420 document_len = 0;
6421 if (len > document_len)
6422 len = document_len;
6423
6424 memcpy (readbuf, document + offset, len);
6425 xfree (document);
6426
6427 return len;
6428 }
6429
6430 #ifdef HAVE_LINUX_BTRACE
6431
6432 /* See to_enable_btrace target method. */
6433
6434 static struct btrace_target_info *
6435 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6436 {
6437 struct btrace_target_info *tinfo;
6438
6439 tinfo = linux_enable_btrace (ptid, conf);
6440
6441 if (tinfo != NULL && tinfo->ptr_bits == 0)
6442 {
6443 struct thread_info *thread = find_thread_ptid (ptid);
6444 struct regcache *regcache = get_thread_regcache (thread, 0);
6445
6446 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6447 }
6448
6449 return tinfo;
6450 }
6451
6452 /* See to_disable_btrace target method. */
6453
6454 static int
6455 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6456 {
6457 enum btrace_error err;
6458
6459 err = linux_disable_btrace (tinfo);
6460 return (err == BTRACE_ERR_NONE ? 0 : -1);
6461 }
6462
6463 /* See to_read_btrace target method. */
6464
6465 static int
6466 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6467 int type)
6468 {
6469 struct btrace_data btrace;
6470 struct btrace_block *block;
6471 enum btrace_error err;
6472 int i;
6473
6474 btrace_data_init (&btrace);
6475
6476 err = linux_read_btrace (&btrace, tinfo, type);
6477 if (err != BTRACE_ERR_NONE)
6478 {
6479 if (err == BTRACE_ERR_OVERFLOW)
6480 buffer_grow_str0 (buffer, "E.Overflow.");
6481 else
6482 buffer_grow_str0 (buffer, "E.Generic Error.");
6483
6484 btrace_data_fini (&btrace);
6485 return -1;
6486 }
6487
6488 switch (btrace.format)
6489 {
6490 case BTRACE_FORMAT_NONE:
6491 buffer_grow_str0 (buffer, "E.No Trace.");
6492 break;
6493
6494 case BTRACE_FORMAT_BTS:
6495 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6496 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6497
6498 for (i = 0;
6499 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6500 i++)
6501 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6502 paddress (block->begin), paddress (block->end));
6503
6504 buffer_grow_str0 (buffer, "</btrace>\n");
6505 break;
6506
6507 default:
6508 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6509
6510 btrace_data_fini (&btrace);
6511 return -1;
6512 }
6513
6514 btrace_data_fini (&btrace);
6515 return 0;
6516 }
6517
6518 /* See to_btrace_conf target method. */
6519
6520 static int
6521 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6522 struct buffer *buffer)
6523 {
6524 const struct btrace_config *conf;
6525
6526 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6527 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6528
6529 conf = linux_btrace_conf (tinfo);
6530 if (conf != NULL)
6531 {
6532 switch (conf->format)
6533 {
6534 case BTRACE_FORMAT_NONE:
6535 break;
6536
6537 case BTRACE_FORMAT_BTS:
6538 buffer_xml_printf (buffer, "<bts");
6539 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6540 buffer_xml_printf (buffer, " />\n");
6541 break;
6542 }
6543 }
6544
6545 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6546 return 0;
6547 }
6548 #endif /* HAVE_LINUX_BTRACE */
6549
6550 /* See nat/linux-nat.h. */
6551
6552 ptid_t
6553 current_lwp_ptid (void)
6554 {
6555 return ptid_of (current_thread);
6556 }
6557
6558 static struct target_ops linux_target_ops = {
6559 linux_create_inferior,
6560 linux_attach,
6561 linux_kill,
6562 linux_detach,
6563 linux_mourn,
6564 linux_join,
6565 linux_thread_alive,
6566 linux_resume,
6567 linux_wait,
6568 linux_fetch_registers,
6569 linux_store_registers,
6570 linux_prepare_to_access_memory,
6571 linux_done_accessing_memory,
6572 linux_read_memory,
6573 linux_write_memory,
6574 linux_look_up_symbols,
6575 linux_request_interrupt,
6576 linux_read_auxv,
6577 linux_supports_z_point_type,
6578 linux_insert_point,
6579 linux_remove_point,
6580 linux_stopped_by_sw_breakpoint,
6581 linux_supports_stopped_by_sw_breakpoint,
6582 linux_stopped_by_hw_breakpoint,
6583 linux_supports_stopped_by_hw_breakpoint,
6584 linux_supports_conditional_breakpoints,
6585 linux_stopped_by_watchpoint,
6586 linux_stopped_data_address,
6587 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6588 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6589 && defined(PT_TEXT_END_ADDR)
6590 linux_read_offsets,
6591 #else
6592 NULL,
6593 #endif
6594 #ifdef USE_THREAD_DB
6595 thread_db_get_tls_address,
6596 #else
6597 NULL,
6598 #endif
6599 linux_qxfer_spu,
6600 hostio_last_error_from_errno,
6601 linux_qxfer_osdata,
6602 linux_xfer_siginfo,
6603 linux_supports_non_stop,
6604 linux_async,
6605 linux_start_non_stop,
6606 linux_supports_multi_process,
6607 linux_supports_fork_events,
6608 linux_supports_vfork_events,
6609 linux_handle_new_gdb_connection,
6610 #ifdef USE_THREAD_DB
6611 thread_db_handle_monitor_command,
6612 #else
6613 NULL,
6614 #endif
6615 linux_common_core_of_thread,
6616 linux_read_loadmap,
6617 linux_process_qsupported,
6618 linux_supports_tracepoints,
6619 linux_read_pc,
6620 linux_write_pc,
6621 linux_thread_stopped,
6622 NULL,
6623 linux_pause_all,
6624 linux_unpause_all,
6625 linux_stabilize_threads,
6626 linux_install_fast_tracepoint_jump_pad,
6627 linux_emit_ops,
6628 linux_supports_disable_randomization,
6629 linux_get_min_fast_tracepoint_insn_len,
6630 linux_qxfer_libraries_svr4,
6631 linux_supports_agent,
6632 #ifdef HAVE_LINUX_BTRACE
6633 linux_supports_btrace,
6634 linux_low_enable_btrace,
6635 linux_low_disable_btrace,
6636 linux_low_read_btrace,
6637 linux_low_btrace_conf,
6638 #else
6639 NULL,
6640 NULL,
6641 NULL,
6642 NULL,
6643 NULL,
6644 #endif
6645 linux_supports_range_stepping,
6646 linux_proc_pid_to_exec_file,
6647 };
6648
6649 static void
6650 linux_init_signals ()
6651 {
6652 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6653 to find what the cancel signal actually is. */
6654 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6655 signal (__SIGRTMIN+1, SIG_IGN);
6656 #endif
6657 }
6658
6659 #ifdef HAVE_LINUX_REGSETS
6660 void
6661 initialize_regsets_info (struct regsets_info *info)
6662 {
6663 for (info->num_regsets = 0;
6664 info->regsets[info->num_regsets].size >= 0;
6665 info->num_regsets++)
6666 ;
6667 }
6668 #endif
6669
6670 void
6671 initialize_low (void)
6672 {
6673 struct sigaction sigchld_action;
6674 memset (&sigchld_action, 0, sizeof (sigchld_action));
6675 set_target_ops (&linux_target_ops);
6676 set_breakpoint_data (the_low_target.breakpoint,
6677 the_low_target.breakpoint_len);
6678 linux_init_signals ();
6679 linux_ptrace_init_warnings ();
6680
6681 sigchld_action.sa_handler = sigchld_handler;
6682 sigemptyset (&sigchld_action.sa_mask);
6683 sigchld_action.sa_flags = SA_RESTART;
6684 sigaction (SIGCHLD, &sigchld_action, NULL);
6685
6686 initialize_low_arch ();
6687
6688 linux_check_ptrace_features ();
6689 }
This page took 0.231646 seconds and 4 git commands to generate.