Implement vFile:setfs in gdbserver
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24
25 #include "nat/linux-nat.h"
26 #include "nat/linux-waitpid.h"
27 #include "gdb_wait.h"
28 #include <sys/ptrace.h>
29 #include "nat/linux-ptrace.h"
30 #include "nat/linux-procfs.h"
31 #include "nat/linux-personality.h"
32 #include <signal.h>
33 #include <sys/ioctl.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #include "filestuff.h"
46 #include "tracepoint.h"
47 #include "hostio.h"
48 #ifndef ELFMAG0
49 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
50 then ELFMAG0 will have been defined. If it didn't get included by
51 gdb_proc_service.h then including it will likely introduce a duplicate
52 definition of elf_fpregset_t. */
53 #include <elf.h>
54 #endif
55 #include "nat/linux-namespaces.h"
56
57 #ifndef SPUFS_MAGIC
58 #define SPUFS_MAGIC 0x23c9b64e
59 #endif
60
61 #ifdef HAVE_PERSONALITY
62 # include <sys/personality.h>
63 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
64 # define ADDR_NO_RANDOMIZE 0x0040000
65 # endif
66 #endif
67
68 #ifndef O_LARGEFILE
69 #define O_LARGEFILE 0
70 #endif
71
72 #ifndef W_STOPCODE
73 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
74 #endif
75
76 /* This is the kernel's hard limit. Not to be confused with
77 SIGRTMIN. */
78 #ifndef __SIGRTMIN
79 #define __SIGRTMIN 32
80 #endif
81
82 /* Some targets did not define these ptrace constants from the start,
83 so gdbserver defines them locally here. In the future, these may
84 be removed after they are added to asm/ptrace.h. */
85 #if !(defined(PT_TEXT_ADDR) \
86 || defined(PT_DATA_ADDR) \
87 || defined(PT_TEXT_END_ADDR))
88 #if defined(__mcoldfire__)
89 /* These are still undefined in 3.10 kernels. */
90 #define PT_TEXT_ADDR 49*4
91 #define PT_DATA_ADDR 50*4
92 #define PT_TEXT_END_ADDR 51*4
93 /* BFIN already defines these since at least 2.6.32 kernels. */
94 #elif defined(BFIN)
95 #define PT_TEXT_ADDR 220
96 #define PT_TEXT_END_ADDR 224
97 #define PT_DATA_ADDR 228
98 /* These are still undefined in 3.10 kernels. */
99 #elif defined(__TMS320C6X__)
100 #define PT_TEXT_ADDR (0x10000*4)
101 #define PT_DATA_ADDR (0x10004*4)
102 #define PT_TEXT_END_ADDR (0x10008*4)
103 #endif
104 #endif
105
106 #ifdef HAVE_LINUX_BTRACE
107 # include "nat/linux-btrace.h"
108 # include "btrace-common.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* LWP accessors. */
142
143 /* See nat/linux-nat.h. */
144
145 ptid_t
146 ptid_of_lwp (struct lwp_info *lwp)
147 {
148 return ptid_of (get_lwp_thread (lwp));
149 }
150
151 /* See nat/linux-nat.h. */
152
153 void
154 lwp_set_arch_private_info (struct lwp_info *lwp,
155 struct arch_lwp_info *info)
156 {
157 lwp->arch_private = info;
158 }
159
160 /* See nat/linux-nat.h. */
161
162 struct arch_lwp_info *
163 lwp_arch_private_info (struct lwp_info *lwp)
164 {
165 return lwp->arch_private;
166 }
167
168 /* See nat/linux-nat.h. */
169
170 int
171 lwp_is_stopped (struct lwp_info *lwp)
172 {
173 return lwp->stopped;
174 }
175
176 /* See nat/linux-nat.h. */
177
178 enum target_stop_reason
179 lwp_stop_reason (struct lwp_info *lwp)
180 {
181 return lwp->stop_reason;
182 }
183
184 /* A list of all unknown processes which receive stop signals. Some
185 other process will presumably claim each of these as forked
186 children momentarily. */
187
188 struct simple_pid_list
189 {
190 /* The process ID. */
191 int pid;
192
193 /* The status as reported by waitpid. */
194 int status;
195
196 /* Next in chain. */
197 struct simple_pid_list *next;
198 };
199 struct simple_pid_list *stopped_pids;
200
201 /* Trivial list manipulation functions to keep track of a list of new
202 stopped processes. */
203
204 static void
205 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
206 {
207 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
208
209 new_pid->pid = pid;
210 new_pid->status = status;
211 new_pid->next = *listp;
212 *listp = new_pid;
213 }
214
215 static int
216 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
217 {
218 struct simple_pid_list **p;
219
220 for (p = listp; *p != NULL; p = &(*p)->next)
221 if ((*p)->pid == pid)
222 {
223 struct simple_pid_list *next = (*p)->next;
224
225 *statusp = (*p)->status;
226 xfree (*p);
227 *p = next;
228 return 1;
229 }
230 return 0;
231 }
232
233 enum stopping_threads_kind
234 {
235 /* Not stopping threads presently. */
236 NOT_STOPPING_THREADS,
237
238 /* Stopping threads. */
239 STOPPING_THREADS,
240
241 /* Stopping and suspending threads. */
242 STOPPING_AND_SUSPENDING_THREADS
243 };
244
245 /* This is set while stop_all_lwps is in effect. */
246 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
247
248 /* FIXME make into a target method? */
249 int using_threads = 1;
250
251 /* True if we're presently stabilizing threads (moving them out of
252 jump pads). */
253 static int stabilizing_threads;
254
255 static void linux_resume_one_lwp (struct lwp_info *lwp,
256 int step, int signal, siginfo_t *info);
257 static void linux_resume (struct thread_resume *resume_info, size_t n);
258 static void stop_all_lwps (int suspend, struct lwp_info *except);
259 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
260 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
261 int *wstat, int options);
262 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
263 static struct lwp_info *add_lwp (ptid_t ptid);
264 static int linux_stopped_by_watchpoint (void);
265 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
266 static void proceed_all_lwps (void);
267 static int finish_step_over (struct lwp_info *lwp);
268 static int kill_lwp (unsigned long lwpid, int signo);
269
270 /* When the event-loop is doing a step-over, this points at the thread
271 being stepped. */
272 ptid_t step_over_bkpt;
273
274 /* True if the low target can hardware single-step. Such targets
275 don't need a BREAKPOINT_REINSERT_ADDR callback. */
276
277 static int
278 can_hardware_single_step (void)
279 {
280 return (the_low_target.breakpoint_reinsert_addr == NULL);
281 }
282
283 /* True if the low target supports memory breakpoints. If so, we'll
284 have a GET_PC implementation. */
285
286 static int
287 supports_breakpoints (void)
288 {
289 return (the_low_target.get_pc != NULL);
290 }
291
292 /* Returns true if this target can support fast tracepoints. This
293 does not mean that the in-process agent has been loaded in the
294 inferior. */
295
296 static int
297 supports_fast_tracepoints (void)
298 {
299 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
300 }
301
302 /* True if LWP is stopped in its stepping range. */
303
304 static int
305 lwp_in_step_range (struct lwp_info *lwp)
306 {
307 CORE_ADDR pc = lwp->stop_pc;
308
309 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
310 }
311
312 struct pending_signals
313 {
314 int signal;
315 siginfo_t info;
316 struct pending_signals *prev;
317 };
318
319 /* The read/write ends of the pipe registered as waitable file in the
320 event loop. */
321 static int linux_event_pipe[2] = { -1, -1 };
322
323 /* True if we're currently in async mode. */
324 #define target_is_async_p() (linux_event_pipe[0] != -1)
325
326 static void send_sigstop (struct lwp_info *lwp);
327 static void wait_for_sigstop (void);
328
329 /* Return non-zero if HEADER is a 64-bit ELF file. */
330
331 static int
332 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
333 {
334 if (header->e_ident[EI_MAG0] == ELFMAG0
335 && header->e_ident[EI_MAG1] == ELFMAG1
336 && header->e_ident[EI_MAG2] == ELFMAG2
337 && header->e_ident[EI_MAG3] == ELFMAG3)
338 {
339 *machine = header->e_machine;
340 return header->e_ident[EI_CLASS] == ELFCLASS64;
341
342 }
343 *machine = EM_NONE;
344 return -1;
345 }
346
347 /* Return non-zero if FILE is a 64-bit ELF file,
348 zero if the file is not a 64-bit ELF file,
349 and -1 if the file is not accessible or doesn't exist. */
350
351 static int
352 elf_64_file_p (const char *file, unsigned int *machine)
353 {
354 Elf64_Ehdr header;
355 int fd;
356
357 fd = open (file, O_RDONLY);
358 if (fd < 0)
359 return -1;
360
361 if (read (fd, &header, sizeof (header)) != sizeof (header))
362 {
363 close (fd);
364 return 0;
365 }
366 close (fd);
367
368 return elf_64_header_p (&header, machine);
369 }
370
371 /* Accepts an integer PID; Returns true if the executable PID is
372 running is a 64-bit ELF file.. */
373
374 int
375 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
376 {
377 char file[PATH_MAX];
378
379 sprintf (file, "/proc/%d/exe", pid);
380 return elf_64_file_p (file, machine);
381 }
382
383 static void
384 delete_lwp (struct lwp_info *lwp)
385 {
386 struct thread_info *thr = get_lwp_thread (lwp);
387
388 if (debug_threads)
389 debug_printf ("deleting %ld\n", lwpid_of (thr));
390
391 remove_thread (thr);
392 free (lwp->arch_private);
393 free (lwp);
394 }
395
396 /* Add a process to the common process list, and set its private
397 data. */
398
399 static struct process_info *
400 linux_add_process (int pid, int attached)
401 {
402 struct process_info *proc;
403
404 proc = add_process (pid, attached);
405 proc->priv = xcalloc (1, sizeof (*proc->priv));
406
407 /* Set the arch when the first LWP stops. */
408 proc->priv->new_inferior = 1;
409
410 if (the_low_target.new_process != NULL)
411 proc->priv->arch_private = the_low_target.new_process ();
412
413 return proc;
414 }
415
416 static CORE_ADDR get_pc (struct lwp_info *lwp);
417
418 /* Handle a GNU/Linux extended wait response. If we see a clone
419 event, we need to add the new LWP to our list (and return 0 so as
420 not to report the trap to higher layers). */
421
422 static int
423 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
424 {
425 int event = linux_ptrace_get_extended_event (wstat);
426 struct thread_info *event_thr = get_lwp_thread (event_lwp);
427 struct lwp_info *new_lwp;
428
429 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
430 || (event == PTRACE_EVENT_CLONE))
431 {
432 ptid_t ptid;
433 unsigned long new_pid;
434 int ret, status;
435
436 /* Get the pid of the new lwp. */
437 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
438 &new_pid);
439
440 /* If we haven't already seen the new PID stop, wait for it now. */
441 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
442 {
443 /* The new child has a pending SIGSTOP. We can't affect it until it
444 hits the SIGSTOP, but we're already attached. */
445
446 ret = my_waitpid (new_pid, &status, __WALL);
447
448 if (ret == -1)
449 perror_with_name ("waiting for new child");
450 else if (ret != new_pid)
451 warning ("wait returned unexpected PID %d", ret);
452 else if (!WIFSTOPPED (status))
453 warning ("wait returned unexpected status 0x%x", status);
454 }
455
456 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
457 {
458 struct process_info *parent_proc;
459 struct process_info *child_proc;
460 struct lwp_info *child_lwp;
461 struct thread_info *child_thr;
462 struct target_desc *tdesc;
463
464 ptid = ptid_build (new_pid, new_pid, 0);
465
466 if (debug_threads)
467 {
468 debug_printf ("HEW: Got fork event from LWP %ld, "
469 "new child is %d\n",
470 ptid_get_lwp (ptid_of (event_thr)),
471 ptid_get_pid (ptid));
472 }
473
474 /* Add the new process to the tables and clone the breakpoint
475 lists of the parent. We need to do this even if the new process
476 will be detached, since we will need the process object and the
477 breakpoints to remove any breakpoints from memory when we
478 detach, and the client side will access registers. */
479 child_proc = linux_add_process (new_pid, 0);
480 gdb_assert (child_proc != NULL);
481 child_lwp = add_lwp (ptid);
482 gdb_assert (child_lwp != NULL);
483 child_lwp->stopped = 1;
484 child_lwp->must_set_ptrace_flags = 1;
485 child_lwp->status_pending_p = 0;
486 child_thr = get_lwp_thread (child_lwp);
487 child_thr->last_resume_kind = resume_stop;
488 parent_proc = get_thread_process (event_thr);
489 child_proc->attached = parent_proc->attached;
490 clone_all_breakpoints (&child_proc->breakpoints,
491 &child_proc->raw_breakpoints,
492 parent_proc->breakpoints);
493
494 tdesc = xmalloc (sizeof (struct target_desc));
495 copy_target_description (tdesc, parent_proc->tdesc);
496 child_proc->tdesc = tdesc;
497
498 /* Clone arch-specific process data. */
499 if (the_low_target.new_fork != NULL)
500 the_low_target.new_fork (parent_proc, child_proc);
501
502 /* Save fork info in the parent thread. */
503 if (event == PTRACE_EVENT_FORK)
504 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
505 else if (event == PTRACE_EVENT_VFORK)
506 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
507
508 event_lwp->waitstatus.value.related_pid = ptid;
509
510 /* The status_pending field contains bits denoting the
511 extended event, so when the pending event is handled,
512 the handler will look at lwp->waitstatus. */
513 event_lwp->status_pending_p = 1;
514 event_lwp->status_pending = wstat;
515
516 /* Report the event. */
517 return 0;
518 }
519
520 if (debug_threads)
521 debug_printf ("HEW: Got clone event "
522 "from LWP %ld, new child is LWP %ld\n",
523 lwpid_of (event_thr), new_pid);
524
525 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
526 new_lwp = add_lwp (ptid);
527
528 /* Either we're going to immediately resume the new thread
529 or leave it stopped. linux_resume_one_lwp is a nop if it
530 thinks the thread is currently running, so set this first
531 before calling linux_resume_one_lwp. */
532 new_lwp->stopped = 1;
533
534 /* If we're suspending all threads, leave this one suspended
535 too. */
536 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
537 new_lwp->suspended = 1;
538
539 /* Normally we will get the pending SIGSTOP. But in some cases
540 we might get another signal delivered to the group first.
541 If we do get another signal, be sure not to lose it. */
542 if (WSTOPSIG (status) != SIGSTOP)
543 {
544 new_lwp->stop_expected = 1;
545 new_lwp->status_pending_p = 1;
546 new_lwp->status_pending = status;
547 }
548
549 /* Don't report the event. */
550 return 1;
551 }
552 else if (event == PTRACE_EVENT_VFORK_DONE)
553 {
554 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
555
556 /* Report the event. */
557 return 0;
558 }
559
560 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
561 }
562
563 /* Return the PC as read from the regcache of LWP, without any
564 adjustment. */
565
566 static CORE_ADDR
567 get_pc (struct lwp_info *lwp)
568 {
569 struct thread_info *saved_thread;
570 struct regcache *regcache;
571 CORE_ADDR pc;
572
573 if (the_low_target.get_pc == NULL)
574 return 0;
575
576 saved_thread = current_thread;
577 current_thread = get_lwp_thread (lwp);
578
579 regcache = get_thread_regcache (current_thread, 1);
580 pc = (*the_low_target.get_pc) (regcache);
581
582 if (debug_threads)
583 debug_printf ("pc is 0x%lx\n", (long) pc);
584
585 current_thread = saved_thread;
586 return pc;
587 }
588
589 /* This function should only be called if LWP got a SIGTRAP.
590 The SIGTRAP could mean several things.
591
592 On i386, where decr_pc_after_break is non-zero:
593
594 If we were single-stepping this process using PTRACE_SINGLESTEP, we
595 will get only the one SIGTRAP. The value of $eip will be the next
596 instruction. If the instruction we stepped over was a breakpoint,
597 we need to decrement the PC.
598
599 If we continue the process using PTRACE_CONT, we will get a
600 SIGTRAP when we hit a breakpoint. The value of $eip will be
601 the instruction after the breakpoint (i.e. needs to be
602 decremented). If we report the SIGTRAP to GDB, we must also
603 report the undecremented PC. If the breakpoint is removed, we
604 must resume at the decremented PC.
605
606 On a non-decr_pc_after_break machine with hardware or kernel
607 single-step:
608
609 If we either single-step a breakpoint instruction, or continue and
610 hit a breakpoint instruction, our PC will point at the breakpoint
611 instruction. */
612
613 static int
614 check_stopped_by_breakpoint (struct lwp_info *lwp)
615 {
616 CORE_ADDR pc;
617 CORE_ADDR sw_breakpoint_pc;
618 struct thread_info *saved_thread;
619 #if USE_SIGTRAP_SIGINFO
620 siginfo_t siginfo;
621 #endif
622
623 if (the_low_target.get_pc == NULL)
624 return 0;
625
626 pc = get_pc (lwp);
627 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
628
629 /* breakpoint_at reads from the current thread. */
630 saved_thread = current_thread;
631 current_thread = get_lwp_thread (lwp);
632
633 #if USE_SIGTRAP_SIGINFO
634 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
635 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
636 {
637 if (siginfo.si_signo == SIGTRAP)
638 {
639 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
640 {
641 if (debug_threads)
642 {
643 struct thread_info *thr = get_lwp_thread (lwp);
644
645 debug_printf ("CSBB: %s stopped by software breakpoint\n",
646 target_pid_to_str (ptid_of (thr)));
647 }
648
649 /* Back up the PC if necessary. */
650 if (pc != sw_breakpoint_pc)
651 {
652 struct regcache *regcache
653 = get_thread_regcache (current_thread, 1);
654 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
655 }
656
657 lwp->stop_pc = sw_breakpoint_pc;
658 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
659 current_thread = saved_thread;
660 return 1;
661 }
662 else if (siginfo.si_code == TRAP_HWBKPT)
663 {
664 if (debug_threads)
665 {
666 struct thread_info *thr = get_lwp_thread (lwp);
667
668 debug_printf ("CSBB: %s stopped by hardware "
669 "breakpoint/watchpoint\n",
670 target_pid_to_str (ptid_of (thr)));
671 }
672
673 lwp->stop_pc = pc;
674 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
675 current_thread = saved_thread;
676 return 1;
677 }
678 else if (siginfo.si_code == TRAP_TRACE)
679 {
680 if (debug_threads)
681 {
682 struct thread_info *thr = get_lwp_thread (lwp);
683
684 debug_printf ("CSBB: %s stopped by trace\n",
685 target_pid_to_str (ptid_of (thr)));
686 }
687 }
688 }
689 }
690 #else
691 /* We may have just stepped a breakpoint instruction. E.g., in
692 non-stop mode, GDB first tells the thread A to step a range, and
693 then the user inserts a breakpoint inside the range. In that
694 case we need to report the breakpoint PC. */
695 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
696 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
697 {
698 if (debug_threads)
699 {
700 struct thread_info *thr = get_lwp_thread (lwp);
701
702 debug_printf ("CSBB: %s stopped by software breakpoint\n",
703 target_pid_to_str (ptid_of (thr)));
704 }
705
706 /* Back up the PC if necessary. */
707 if (pc != sw_breakpoint_pc)
708 {
709 struct regcache *regcache
710 = get_thread_regcache (current_thread, 1);
711 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
712 }
713
714 lwp->stop_pc = sw_breakpoint_pc;
715 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
716 current_thread = saved_thread;
717 return 1;
718 }
719
720 if (hardware_breakpoint_inserted_here (pc))
721 {
722 if (debug_threads)
723 {
724 struct thread_info *thr = get_lwp_thread (lwp);
725
726 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
727 target_pid_to_str (ptid_of (thr)));
728 }
729
730 lwp->stop_pc = pc;
731 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
732 current_thread = saved_thread;
733 return 1;
734 }
735 #endif
736
737 current_thread = saved_thread;
738 return 0;
739 }
740
741 static struct lwp_info *
742 add_lwp (ptid_t ptid)
743 {
744 struct lwp_info *lwp;
745
746 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
747 memset (lwp, 0, sizeof (*lwp));
748
749 if (the_low_target.new_thread != NULL)
750 the_low_target.new_thread (lwp);
751
752 lwp->thread = add_thread (ptid, lwp);
753
754 return lwp;
755 }
756
757 /* Start an inferior process and returns its pid.
758 ALLARGS is a vector of program-name and args. */
759
760 static int
761 linux_create_inferior (char *program, char **allargs)
762 {
763 struct lwp_info *new_lwp;
764 int pid;
765 ptid_t ptid;
766 struct cleanup *restore_personality
767 = maybe_disable_address_space_randomization (disable_randomization);
768
769 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
770 pid = vfork ();
771 #else
772 pid = fork ();
773 #endif
774 if (pid < 0)
775 perror_with_name ("fork");
776
777 if (pid == 0)
778 {
779 close_most_fds ();
780 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
781
782 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
783 signal (__SIGRTMIN + 1, SIG_DFL);
784 #endif
785
786 setpgid (0, 0);
787
788 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
789 stdout to stderr so that inferior i/o doesn't corrupt the connection.
790 Also, redirect stdin to /dev/null. */
791 if (remote_connection_is_stdio ())
792 {
793 close (0);
794 open ("/dev/null", O_RDONLY);
795 dup2 (2, 1);
796 if (write (2, "stdin/stdout redirected\n",
797 sizeof ("stdin/stdout redirected\n") - 1) < 0)
798 {
799 /* Errors ignored. */;
800 }
801 }
802
803 execv (program, allargs);
804 if (errno == ENOENT)
805 execvp (program, allargs);
806
807 fprintf (stderr, "Cannot exec %s: %s.\n", program,
808 strerror (errno));
809 fflush (stderr);
810 _exit (0177);
811 }
812
813 do_cleanups (restore_personality);
814
815 linux_add_process (pid, 0);
816
817 ptid = ptid_build (pid, pid, 0);
818 new_lwp = add_lwp (ptid);
819 new_lwp->must_set_ptrace_flags = 1;
820
821 return pid;
822 }
823
824 /* Attach to an inferior process. Returns 0 on success, ERRNO on
825 error. */
826
827 int
828 linux_attach_lwp (ptid_t ptid)
829 {
830 struct lwp_info *new_lwp;
831 int lwpid = ptid_get_lwp (ptid);
832
833 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
834 != 0)
835 return errno;
836
837 new_lwp = add_lwp (ptid);
838
839 /* We need to wait for SIGSTOP before being able to make the next
840 ptrace call on this LWP. */
841 new_lwp->must_set_ptrace_flags = 1;
842
843 if (linux_proc_pid_is_stopped (lwpid))
844 {
845 if (debug_threads)
846 debug_printf ("Attached to a stopped process\n");
847
848 /* The process is definitely stopped. It is in a job control
849 stop, unless the kernel predates the TASK_STOPPED /
850 TASK_TRACED distinction, in which case it might be in a
851 ptrace stop. Make sure it is in a ptrace stop; from there we
852 can kill it, signal it, et cetera.
853
854 First make sure there is a pending SIGSTOP. Since we are
855 already attached, the process can not transition from stopped
856 to running without a PTRACE_CONT; so we know this signal will
857 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
858 probably already in the queue (unless this kernel is old
859 enough to use TASK_STOPPED for ptrace stops); but since
860 SIGSTOP is not an RT signal, it can only be queued once. */
861 kill_lwp (lwpid, SIGSTOP);
862
863 /* Finally, resume the stopped process. This will deliver the
864 SIGSTOP (or a higher priority signal, just like normal
865 PTRACE_ATTACH), which we'll catch later on. */
866 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
867 }
868
869 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
870 brings it to a halt.
871
872 There are several cases to consider here:
873
874 1) gdbserver has already attached to the process and is being notified
875 of a new thread that is being created.
876 In this case we should ignore that SIGSTOP and resume the
877 process. This is handled below by setting stop_expected = 1,
878 and the fact that add_thread sets last_resume_kind ==
879 resume_continue.
880
881 2) This is the first thread (the process thread), and we're attaching
882 to it via attach_inferior.
883 In this case we want the process thread to stop.
884 This is handled by having linux_attach set last_resume_kind ==
885 resume_stop after we return.
886
887 If the pid we are attaching to is also the tgid, we attach to and
888 stop all the existing threads. Otherwise, we attach to pid and
889 ignore any other threads in the same group as this pid.
890
891 3) GDB is connecting to gdbserver and is requesting an enumeration of all
892 existing threads.
893 In this case we want the thread to stop.
894 FIXME: This case is currently not properly handled.
895 We should wait for the SIGSTOP but don't. Things work apparently
896 because enough time passes between when we ptrace (ATTACH) and when
897 gdb makes the next ptrace call on the thread.
898
899 On the other hand, if we are currently trying to stop all threads, we
900 should treat the new thread as if we had sent it a SIGSTOP. This works
901 because we are guaranteed that the add_lwp call above added us to the
902 end of the list, and so the new thread has not yet reached
903 wait_for_sigstop (but will). */
904 new_lwp->stop_expected = 1;
905
906 return 0;
907 }
908
909 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
910 already attached. Returns true if a new LWP is found, false
911 otherwise. */
912
913 static int
914 attach_proc_task_lwp_callback (ptid_t ptid)
915 {
916 /* Is this a new thread? */
917 if (find_thread_ptid (ptid) == NULL)
918 {
919 int lwpid = ptid_get_lwp (ptid);
920 int err;
921
922 if (debug_threads)
923 debug_printf ("Found new lwp %d\n", lwpid);
924
925 err = linux_attach_lwp (ptid);
926
927 /* Be quiet if we simply raced with the thread exiting. EPERM
928 is returned if the thread's task still exists, and is marked
929 as exited or zombie, as well as other conditions, so in that
930 case, confirm the status in /proc/PID/status. */
931 if (err == ESRCH
932 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
933 {
934 if (debug_threads)
935 {
936 debug_printf ("Cannot attach to lwp %d: "
937 "thread is gone (%d: %s)\n",
938 lwpid, err, strerror (err));
939 }
940 }
941 else if (err != 0)
942 {
943 warning (_("Cannot attach to lwp %d: %s"),
944 lwpid,
945 linux_ptrace_attach_fail_reason_string (ptid, err));
946 }
947
948 return 1;
949 }
950 return 0;
951 }
952
953 /* Attach to PID. If PID is the tgid, attach to it and all
954 of its threads. */
955
956 static int
957 linux_attach (unsigned long pid)
958 {
959 ptid_t ptid = ptid_build (pid, pid, 0);
960 int err;
961
962 /* Attach to PID. We will check for other threads
963 soon. */
964 err = linux_attach_lwp (ptid);
965 if (err != 0)
966 error ("Cannot attach to process %ld: %s",
967 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
968
969 linux_add_process (pid, 1);
970
971 if (!non_stop)
972 {
973 struct thread_info *thread;
974
975 /* Don't ignore the initial SIGSTOP if we just attached to this
976 process. It will be collected by wait shortly. */
977 thread = find_thread_ptid (ptid_build (pid, pid, 0));
978 thread->last_resume_kind = resume_stop;
979 }
980
981 /* We must attach to every LWP. If /proc is mounted, use that to
982 find them now. On the one hand, the inferior may be using raw
983 clone instead of using pthreads. On the other hand, even if it
984 is using pthreads, GDB may not be connected yet (thread_db needs
985 to do symbol lookups, through qSymbol). Also, thread_db walks
986 structures in the inferior's address space to find the list of
987 threads/LWPs, and those structures may well be corrupted. Note
988 that once thread_db is loaded, we'll still use it to list threads
989 and associate pthread info with each LWP. */
990 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
991 return 0;
992 }
993
994 struct counter
995 {
996 int pid;
997 int count;
998 };
999
1000 static int
1001 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1002 {
1003 struct counter *counter = args;
1004
1005 if (ptid_get_pid (entry->id) == counter->pid)
1006 {
1007 if (++counter->count > 1)
1008 return 1;
1009 }
1010
1011 return 0;
1012 }
1013
1014 static int
1015 last_thread_of_process_p (int pid)
1016 {
1017 struct counter counter = { pid , 0 };
1018
1019 return (find_inferior (&all_threads,
1020 second_thread_of_pid_p, &counter) == NULL);
1021 }
1022
1023 /* Kill LWP. */
1024
1025 static void
1026 linux_kill_one_lwp (struct lwp_info *lwp)
1027 {
1028 struct thread_info *thr = get_lwp_thread (lwp);
1029 int pid = lwpid_of (thr);
1030
1031 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1032 there is no signal context, and ptrace(PTRACE_KILL) (or
1033 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1034 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1035 alternative is to kill with SIGKILL. We only need one SIGKILL
1036 per process, not one for each thread. But since we still support
1037 linuxthreads, and we also support debugging programs using raw
1038 clone without CLONE_THREAD, we send one for each thread. For
1039 years, we used PTRACE_KILL only, so we're being a bit paranoid
1040 about some old kernels where PTRACE_KILL might work better
1041 (dubious if there are any such, but that's why it's paranoia), so
1042 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1043 everywhere. */
1044
1045 errno = 0;
1046 kill_lwp (pid, SIGKILL);
1047 if (debug_threads)
1048 {
1049 int save_errno = errno;
1050
1051 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1052 target_pid_to_str (ptid_of (thr)),
1053 save_errno ? strerror (save_errno) : "OK");
1054 }
1055
1056 errno = 0;
1057 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1058 if (debug_threads)
1059 {
1060 int save_errno = errno;
1061
1062 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1063 target_pid_to_str (ptid_of (thr)),
1064 save_errno ? strerror (save_errno) : "OK");
1065 }
1066 }
1067
1068 /* Kill LWP and wait for it to die. */
1069
1070 static void
1071 kill_wait_lwp (struct lwp_info *lwp)
1072 {
1073 struct thread_info *thr = get_lwp_thread (lwp);
1074 int pid = ptid_get_pid (ptid_of (thr));
1075 int lwpid = ptid_get_lwp (ptid_of (thr));
1076 int wstat;
1077 int res;
1078
1079 if (debug_threads)
1080 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1081
1082 do
1083 {
1084 linux_kill_one_lwp (lwp);
1085
1086 /* Make sure it died. Notes:
1087
1088 - The loop is most likely unnecessary.
1089
1090 - We don't use linux_wait_for_event as that could delete lwps
1091 while we're iterating over them. We're not interested in
1092 any pending status at this point, only in making sure all
1093 wait status on the kernel side are collected until the
1094 process is reaped.
1095
1096 - We don't use __WALL here as the __WALL emulation relies on
1097 SIGCHLD, and killing a stopped process doesn't generate
1098 one, nor an exit status.
1099 */
1100 res = my_waitpid (lwpid, &wstat, 0);
1101 if (res == -1 && errno == ECHILD)
1102 res = my_waitpid (lwpid, &wstat, __WCLONE);
1103 } while (res > 0 && WIFSTOPPED (wstat));
1104
1105 gdb_assert (res > 0);
1106 }
1107
1108 /* Callback for `find_inferior'. Kills an lwp of a given process,
1109 except the leader. */
1110
1111 static int
1112 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1113 {
1114 struct thread_info *thread = (struct thread_info *) entry;
1115 struct lwp_info *lwp = get_thread_lwp (thread);
1116 int pid = * (int *) args;
1117
1118 if (ptid_get_pid (entry->id) != pid)
1119 return 0;
1120
1121 /* We avoid killing the first thread here, because of a Linux kernel (at
1122 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1123 the children get a chance to be reaped, it will remain a zombie
1124 forever. */
1125
1126 if (lwpid_of (thread) == pid)
1127 {
1128 if (debug_threads)
1129 debug_printf ("lkop: is last of process %s\n",
1130 target_pid_to_str (entry->id));
1131 return 0;
1132 }
1133
1134 kill_wait_lwp (lwp);
1135 return 0;
1136 }
1137
1138 static int
1139 linux_kill (int pid)
1140 {
1141 struct process_info *process;
1142 struct lwp_info *lwp;
1143
1144 process = find_process_pid (pid);
1145 if (process == NULL)
1146 return -1;
1147
1148 /* If we're killing a running inferior, make sure it is stopped
1149 first, as PTRACE_KILL will not work otherwise. */
1150 stop_all_lwps (0, NULL);
1151
1152 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1153
1154 /* See the comment in linux_kill_one_lwp. We did not kill the first
1155 thread in the list, so do so now. */
1156 lwp = find_lwp_pid (pid_to_ptid (pid));
1157
1158 if (lwp == NULL)
1159 {
1160 if (debug_threads)
1161 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1162 pid);
1163 }
1164 else
1165 kill_wait_lwp (lwp);
1166
1167 the_target->mourn (process);
1168
1169 /* Since we presently can only stop all lwps of all processes, we
1170 need to unstop lwps of other processes. */
1171 unstop_all_lwps (0, NULL);
1172 return 0;
1173 }
1174
1175 /* Get pending signal of THREAD, for detaching purposes. This is the
1176 signal the thread last stopped for, which we need to deliver to the
1177 thread when detaching, otherwise, it'd be suppressed/lost. */
1178
1179 static int
1180 get_detach_signal (struct thread_info *thread)
1181 {
1182 enum gdb_signal signo = GDB_SIGNAL_0;
1183 int status;
1184 struct lwp_info *lp = get_thread_lwp (thread);
1185
1186 if (lp->status_pending_p)
1187 status = lp->status_pending;
1188 else
1189 {
1190 /* If the thread had been suspended by gdbserver, and it stopped
1191 cleanly, then it'll have stopped with SIGSTOP. But we don't
1192 want to deliver that SIGSTOP. */
1193 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1194 || thread->last_status.value.sig == GDB_SIGNAL_0)
1195 return 0;
1196
1197 /* Otherwise, we may need to deliver the signal we
1198 intercepted. */
1199 status = lp->last_status;
1200 }
1201
1202 if (!WIFSTOPPED (status))
1203 {
1204 if (debug_threads)
1205 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1206 target_pid_to_str (ptid_of (thread)));
1207 return 0;
1208 }
1209
1210 /* Extended wait statuses aren't real SIGTRAPs. */
1211 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1212 {
1213 if (debug_threads)
1214 debug_printf ("GPS: lwp %s had stopped with extended "
1215 "status: no pending signal\n",
1216 target_pid_to_str (ptid_of (thread)));
1217 return 0;
1218 }
1219
1220 signo = gdb_signal_from_host (WSTOPSIG (status));
1221
1222 if (program_signals_p && !program_signals[signo])
1223 {
1224 if (debug_threads)
1225 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1226 target_pid_to_str (ptid_of (thread)),
1227 gdb_signal_to_string (signo));
1228 return 0;
1229 }
1230 else if (!program_signals_p
1231 /* If we have no way to know which signals GDB does not
1232 want to have passed to the program, assume
1233 SIGTRAP/SIGINT, which is GDB's default. */
1234 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1235 {
1236 if (debug_threads)
1237 debug_printf ("GPS: lwp %s had signal %s, "
1238 "but we don't know if we should pass it. "
1239 "Default to not.\n",
1240 target_pid_to_str (ptid_of (thread)),
1241 gdb_signal_to_string (signo));
1242 return 0;
1243 }
1244 else
1245 {
1246 if (debug_threads)
1247 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1248 target_pid_to_str (ptid_of (thread)),
1249 gdb_signal_to_string (signo));
1250
1251 return WSTOPSIG (status);
1252 }
1253 }
1254
1255 static int
1256 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1257 {
1258 struct thread_info *thread = (struct thread_info *) entry;
1259 struct lwp_info *lwp = get_thread_lwp (thread);
1260 int pid = * (int *) args;
1261 int sig;
1262
1263 if (ptid_get_pid (entry->id) != pid)
1264 return 0;
1265
1266 /* If there is a pending SIGSTOP, get rid of it. */
1267 if (lwp->stop_expected)
1268 {
1269 if (debug_threads)
1270 debug_printf ("Sending SIGCONT to %s\n",
1271 target_pid_to_str (ptid_of (thread)));
1272
1273 kill_lwp (lwpid_of (thread), SIGCONT);
1274 lwp->stop_expected = 0;
1275 }
1276
1277 /* Flush any pending changes to the process's registers. */
1278 regcache_invalidate_thread (thread);
1279
1280 /* Pass on any pending signal for this thread. */
1281 sig = get_detach_signal (thread);
1282
1283 /* Finally, let it resume. */
1284 if (the_low_target.prepare_to_resume != NULL)
1285 the_low_target.prepare_to_resume (lwp);
1286 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1287 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1288 error (_("Can't detach %s: %s"),
1289 target_pid_to_str (ptid_of (thread)),
1290 strerror (errno));
1291
1292 delete_lwp (lwp);
1293 return 0;
1294 }
1295
1296 static int
1297 linux_detach (int pid)
1298 {
1299 struct process_info *process;
1300
1301 process = find_process_pid (pid);
1302 if (process == NULL)
1303 return -1;
1304
1305 /* Stop all threads before detaching. First, ptrace requires that
1306 the thread is stopped to sucessfully detach. Second, thread_db
1307 may need to uninstall thread event breakpoints from memory, which
1308 only works with a stopped process anyway. */
1309 stop_all_lwps (0, NULL);
1310
1311 #ifdef USE_THREAD_DB
1312 thread_db_detach (process);
1313 #endif
1314
1315 /* Stabilize threads (move out of jump pads). */
1316 stabilize_threads ();
1317
1318 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1319
1320 the_target->mourn (process);
1321
1322 /* Since we presently can only stop all lwps of all processes, we
1323 need to unstop lwps of other processes. */
1324 unstop_all_lwps (0, NULL);
1325 return 0;
1326 }
1327
1328 /* Remove all LWPs that belong to process PROC from the lwp list. */
1329
1330 static int
1331 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1332 {
1333 struct thread_info *thread = (struct thread_info *) entry;
1334 struct lwp_info *lwp = get_thread_lwp (thread);
1335 struct process_info *process = proc;
1336
1337 if (pid_of (thread) == pid_of (process))
1338 delete_lwp (lwp);
1339
1340 return 0;
1341 }
1342
1343 static void
1344 linux_mourn (struct process_info *process)
1345 {
1346 struct process_info_private *priv;
1347
1348 #ifdef USE_THREAD_DB
1349 thread_db_mourn (process);
1350 #endif
1351
1352 find_inferior (&all_threads, delete_lwp_callback, process);
1353
1354 /* Freeing all private data. */
1355 priv = process->priv;
1356 free (priv->arch_private);
1357 free (priv);
1358 process->priv = NULL;
1359
1360 remove_process (process);
1361 }
1362
1363 static void
1364 linux_join (int pid)
1365 {
1366 int status, ret;
1367
1368 do {
1369 ret = my_waitpid (pid, &status, 0);
1370 if (WIFEXITED (status) || WIFSIGNALED (status))
1371 break;
1372 } while (ret != -1 || errno != ECHILD);
1373 }
1374
1375 /* Return nonzero if the given thread is still alive. */
1376 static int
1377 linux_thread_alive (ptid_t ptid)
1378 {
1379 struct lwp_info *lwp = find_lwp_pid (ptid);
1380
1381 /* We assume we always know if a thread exits. If a whole process
1382 exited but we still haven't been able to report it to GDB, we'll
1383 hold on to the last lwp of the dead process. */
1384 if (lwp != NULL)
1385 return !lwp->dead;
1386 else
1387 return 0;
1388 }
1389
1390 /* Return 1 if this lwp still has an interesting status pending. If
1391 not (e.g., it had stopped for a breakpoint that is gone), return
1392 false. */
1393
1394 static int
1395 thread_still_has_status_pending_p (struct thread_info *thread)
1396 {
1397 struct lwp_info *lp = get_thread_lwp (thread);
1398
1399 if (!lp->status_pending_p)
1400 return 0;
1401
1402 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1403 report any status pending the LWP may have. */
1404 if (thread->last_resume_kind == resume_stop
1405 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1406 return 0;
1407
1408 if (thread->last_resume_kind != resume_stop
1409 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1410 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1411 {
1412 struct thread_info *saved_thread;
1413 CORE_ADDR pc;
1414 int discard = 0;
1415
1416 gdb_assert (lp->last_status != 0);
1417
1418 pc = get_pc (lp);
1419
1420 saved_thread = current_thread;
1421 current_thread = thread;
1422
1423 if (pc != lp->stop_pc)
1424 {
1425 if (debug_threads)
1426 debug_printf ("PC of %ld changed\n",
1427 lwpid_of (thread));
1428 discard = 1;
1429 }
1430
1431 #if !USE_SIGTRAP_SIGINFO
1432 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1433 && !(*the_low_target.breakpoint_at) (pc))
1434 {
1435 if (debug_threads)
1436 debug_printf ("previous SW breakpoint of %ld gone\n",
1437 lwpid_of (thread));
1438 discard = 1;
1439 }
1440 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1441 && !hardware_breakpoint_inserted_here (pc))
1442 {
1443 if (debug_threads)
1444 debug_printf ("previous HW breakpoint of %ld gone\n",
1445 lwpid_of (thread));
1446 discard = 1;
1447 }
1448 #endif
1449
1450 current_thread = saved_thread;
1451
1452 if (discard)
1453 {
1454 if (debug_threads)
1455 debug_printf ("discarding pending breakpoint status\n");
1456 lp->status_pending_p = 0;
1457 return 0;
1458 }
1459 }
1460
1461 return 1;
1462 }
1463
1464 /* Return 1 if this lwp has an interesting status pending. */
1465 static int
1466 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1467 {
1468 struct thread_info *thread = (struct thread_info *) entry;
1469 struct lwp_info *lp = get_thread_lwp (thread);
1470 ptid_t ptid = * (ptid_t *) arg;
1471
1472 /* Check if we're only interested in events from a specific process
1473 or a specific LWP. */
1474 if (!ptid_match (ptid_of (thread), ptid))
1475 return 0;
1476
1477 if (lp->status_pending_p
1478 && !thread_still_has_status_pending_p (thread))
1479 {
1480 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1481 return 0;
1482 }
1483
1484 return lp->status_pending_p;
1485 }
1486
1487 static int
1488 same_lwp (struct inferior_list_entry *entry, void *data)
1489 {
1490 ptid_t ptid = *(ptid_t *) data;
1491 int lwp;
1492
1493 if (ptid_get_lwp (ptid) != 0)
1494 lwp = ptid_get_lwp (ptid);
1495 else
1496 lwp = ptid_get_pid (ptid);
1497
1498 if (ptid_get_lwp (entry->id) == lwp)
1499 return 1;
1500
1501 return 0;
1502 }
1503
1504 struct lwp_info *
1505 find_lwp_pid (ptid_t ptid)
1506 {
1507 struct inferior_list_entry *thread
1508 = find_inferior (&all_threads, same_lwp, &ptid);
1509
1510 if (thread == NULL)
1511 return NULL;
1512
1513 return get_thread_lwp ((struct thread_info *) thread);
1514 }
1515
1516 /* Return the number of known LWPs in the tgid given by PID. */
1517
1518 static int
1519 num_lwps (int pid)
1520 {
1521 struct inferior_list_entry *inf, *tmp;
1522 int count = 0;
1523
1524 ALL_INFERIORS (&all_threads, inf, tmp)
1525 {
1526 if (ptid_get_pid (inf->id) == pid)
1527 count++;
1528 }
1529
1530 return count;
1531 }
1532
1533 /* The arguments passed to iterate_over_lwps. */
1534
1535 struct iterate_over_lwps_args
1536 {
1537 /* The FILTER argument passed to iterate_over_lwps. */
1538 ptid_t filter;
1539
1540 /* The CALLBACK argument passed to iterate_over_lwps. */
1541 iterate_over_lwps_ftype *callback;
1542
1543 /* The DATA argument passed to iterate_over_lwps. */
1544 void *data;
1545 };
1546
1547 /* Callback for find_inferior used by iterate_over_lwps to filter
1548 calls to the callback supplied to that function. Returning a
1549 nonzero value causes find_inferiors to stop iterating and return
1550 the current inferior_list_entry. Returning zero indicates that
1551 find_inferiors should continue iterating. */
1552
1553 static int
1554 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1555 {
1556 struct iterate_over_lwps_args *args
1557 = (struct iterate_over_lwps_args *) args_p;
1558
1559 if (ptid_match (entry->id, args->filter))
1560 {
1561 struct thread_info *thr = (struct thread_info *) entry;
1562 struct lwp_info *lwp = get_thread_lwp (thr);
1563
1564 return (*args->callback) (lwp, args->data);
1565 }
1566
1567 return 0;
1568 }
1569
1570 /* See nat/linux-nat.h. */
1571
1572 struct lwp_info *
1573 iterate_over_lwps (ptid_t filter,
1574 iterate_over_lwps_ftype callback,
1575 void *data)
1576 {
1577 struct iterate_over_lwps_args args = {filter, callback, data};
1578 struct inferior_list_entry *entry;
1579
1580 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1581 if (entry == NULL)
1582 return NULL;
1583
1584 return get_thread_lwp ((struct thread_info *) entry);
1585 }
1586
1587 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1588 their exits until all other threads in the group have exited. */
1589
1590 static void
1591 check_zombie_leaders (void)
1592 {
1593 struct process_info *proc, *tmp;
1594
1595 ALL_PROCESSES (proc, tmp)
1596 {
1597 pid_t leader_pid = pid_of (proc);
1598 struct lwp_info *leader_lp;
1599
1600 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1601
1602 if (debug_threads)
1603 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1604 "num_lwps=%d, zombie=%d\n",
1605 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1606 linux_proc_pid_is_zombie (leader_pid));
1607
1608 if (leader_lp != NULL
1609 /* Check if there are other threads in the group, as we may
1610 have raced with the inferior simply exiting. */
1611 && !last_thread_of_process_p (leader_pid)
1612 && linux_proc_pid_is_zombie (leader_pid))
1613 {
1614 /* A leader zombie can mean one of two things:
1615
1616 - It exited, and there's an exit status pending
1617 available, or only the leader exited (not the whole
1618 program). In the latter case, we can't waitpid the
1619 leader's exit status until all other threads are gone.
1620
1621 - There are 3 or more threads in the group, and a thread
1622 other than the leader exec'd. On an exec, the Linux
1623 kernel destroys all other threads (except the execing
1624 one) in the thread group, and resets the execing thread's
1625 tid to the tgid. No exit notification is sent for the
1626 execing thread -- from the ptracer's perspective, it
1627 appears as though the execing thread just vanishes.
1628 Until we reap all other threads except the leader and the
1629 execing thread, the leader will be zombie, and the
1630 execing thread will be in `D (disc sleep)'. As soon as
1631 all other threads are reaped, the execing thread changes
1632 it's tid to the tgid, and the previous (zombie) leader
1633 vanishes, giving place to the "new" leader. We could try
1634 distinguishing the exit and exec cases, by waiting once
1635 more, and seeing if something comes out, but it doesn't
1636 sound useful. The previous leader _does_ go away, and
1637 we'll re-add the new one once we see the exec event
1638 (which is just the same as what would happen if the
1639 previous leader did exit voluntarily before some other
1640 thread execs). */
1641
1642 if (debug_threads)
1643 fprintf (stderr,
1644 "CZL: Thread group leader %d zombie "
1645 "(it exited, or another thread execd).\n",
1646 leader_pid);
1647
1648 delete_lwp (leader_lp);
1649 }
1650 }
1651 }
1652
1653 /* Callback for `find_inferior'. Returns the first LWP that is not
1654 stopped. ARG is a PTID filter. */
1655
1656 static int
1657 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1658 {
1659 struct thread_info *thr = (struct thread_info *) entry;
1660 struct lwp_info *lwp;
1661 ptid_t filter = *(ptid_t *) arg;
1662
1663 if (!ptid_match (ptid_of (thr), filter))
1664 return 0;
1665
1666 lwp = get_thread_lwp (thr);
1667 if (!lwp->stopped)
1668 return 1;
1669
1670 return 0;
1671 }
1672
1673 /* This function should only be called if the LWP got a SIGTRAP.
1674
1675 Handle any tracepoint steps or hits. Return true if a tracepoint
1676 event was handled, 0 otherwise. */
1677
1678 static int
1679 handle_tracepoints (struct lwp_info *lwp)
1680 {
1681 struct thread_info *tinfo = get_lwp_thread (lwp);
1682 int tpoint_related_event = 0;
1683
1684 gdb_assert (lwp->suspended == 0);
1685
1686 /* If this tracepoint hit causes a tracing stop, we'll immediately
1687 uninsert tracepoints. To do this, we temporarily pause all
1688 threads, unpatch away, and then unpause threads. We need to make
1689 sure the unpausing doesn't resume LWP too. */
1690 lwp->suspended++;
1691
1692 /* And we need to be sure that any all-threads-stopping doesn't try
1693 to move threads out of the jump pads, as it could deadlock the
1694 inferior (LWP could be in the jump pad, maybe even holding the
1695 lock.) */
1696
1697 /* Do any necessary step collect actions. */
1698 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1699
1700 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1701
1702 /* See if we just hit a tracepoint and do its main collect
1703 actions. */
1704 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1705
1706 lwp->suspended--;
1707
1708 gdb_assert (lwp->suspended == 0);
1709 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1710
1711 if (tpoint_related_event)
1712 {
1713 if (debug_threads)
1714 debug_printf ("got a tracepoint event\n");
1715 return 1;
1716 }
1717
1718 return 0;
1719 }
1720
1721 /* Convenience wrapper. Returns true if LWP is presently collecting a
1722 fast tracepoint. */
1723
1724 static int
1725 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1726 struct fast_tpoint_collect_status *status)
1727 {
1728 CORE_ADDR thread_area;
1729 struct thread_info *thread = get_lwp_thread (lwp);
1730
1731 if (the_low_target.get_thread_area == NULL)
1732 return 0;
1733
1734 /* Get the thread area address. This is used to recognize which
1735 thread is which when tracing with the in-process agent library.
1736 We don't read anything from the address, and treat it as opaque;
1737 it's the address itself that we assume is unique per-thread. */
1738 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1739 return 0;
1740
1741 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1742 }
1743
1744 /* The reason we resume in the caller, is because we want to be able
1745 to pass lwp->status_pending as WSTAT, and we need to clear
1746 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1747 refuses to resume. */
1748
1749 static int
1750 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1751 {
1752 struct thread_info *saved_thread;
1753
1754 saved_thread = current_thread;
1755 current_thread = get_lwp_thread (lwp);
1756
1757 if ((wstat == NULL
1758 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1759 && supports_fast_tracepoints ()
1760 && agent_loaded_p ())
1761 {
1762 struct fast_tpoint_collect_status status;
1763 int r;
1764
1765 if (debug_threads)
1766 debug_printf ("Checking whether LWP %ld needs to move out of the "
1767 "jump pad.\n",
1768 lwpid_of (current_thread));
1769
1770 r = linux_fast_tracepoint_collecting (lwp, &status);
1771
1772 if (wstat == NULL
1773 || (WSTOPSIG (*wstat) != SIGILL
1774 && WSTOPSIG (*wstat) != SIGFPE
1775 && WSTOPSIG (*wstat) != SIGSEGV
1776 && WSTOPSIG (*wstat) != SIGBUS))
1777 {
1778 lwp->collecting_fast_tracepoint = r;
1779
1780 if (r != 0)
1781 {
1782 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1783 {
1784 /* Haven't executed the original instruction yet.
1785 Set breakpoint there, and wait till it's hit,
1786 then single-step until exiting the jump pad. */
1787 lwp->exit_jump_pad_bkpt
1788 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1789 }
1790
1791 if (debug_threads)
1792 debug_printf ("Checking whether LWP %ld needs to move out of "
1793 "the jump pad...it does\n",
1794 lwpid_of (current_thread));
1795 current_thread = saved_thread;
1796
1797 return 1;
1798 }
1799 }
1800 else
1801 {
1802 /* If we get a synchronous signal while collecting, *and*
1803 while executing the (relocated) original instruction,
1804 reset the PC to point at the tpoint address, before
1805 reporting to GDB. Otherwise, it's an IPA lib bug: just
1806 report the signal to GDB, and pray for the best. */
1807
1808 lwp->collecting_fast_tracepoint = 0;
1809
1810 if (r != 0
1811 && (status.adjusted_insn_addr <= lwp->stop_pc
1812 && lwp->stop_pc < status.adjusted_insn_addr_end))
1813 {
1814 siginfo_t info;
1815 struct regcache *regcache;
1816
1817 /* The si_addr on a few signals references the address
1818 of the faulting instruction. Adjust that as
1819 well. */
1820 if ((WSTOPSIG (*wstat) == SIGILL
1821 || WSTOPSIG (*wstat) == SIGFPE
1822 || WSTOPSIG (*wstat) == SIGBUS
1823 || WSTOPSIG (*wstat) == SIGSEGV)
1824 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1825 (PTRACE_TYPE_ARG3) 0, &info) == 0
1826 /* Final check just to make sure we don't clobber
1827 the siginfo of non-kernel-sent signals. */
1828 && (uintptr_t) info.si_addr == lwp->stop_pc)
1829 {
1830 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1831 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1832 (PTRACE_TYPE_ARG3) 0, &info);
1833 }
1834
1835 regcache = get_thread_regcache (current_thread, 1);
1836 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1837 lwp->stop_pc = status.tpoint_addr;
1838
1839 /* Cancel any fast tracepoint lock this thread was
1840 holding. */
1841 force_unlock_trace_buffer ();
1842 }
1843
1844 if (lwp->exit_jump_pad_bkpt != NULL)
1845 {
1846 if (debug_threads)
1847 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1848 "stopping all threads momentarily.\n");
1849
1850 stop_all_lwps (1, lwp);
1851
1852 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1853 lwp->exit_jump_pad_bkpt = NULL;
1854
1855 unstop_all_lwps (1, lwp);
1856
1857 gdb_assert (lwp->suspended >= 0);
1858 }
1859 }
1860 }
1861
1862 if (debug_threads)
1863 debug_printf ("Checking whether LWP %ld needs to move out of the "
1864 "jump pad...no\n",
1865 lwpid_of (current_thread));
1866
1867 current_thread = saved_thread;
1868 return 0;
1869 }
1870
1871 /* Enqueue one signal in the "signals to report later when out of the
1872 jump pad" list. */
1873
1874 static void
1875 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1876 {
1877 struct pending_signals *p_sig;
1878 struct thread_info *thread = get_lwp_thread (lwp);
1879
1880 if (debug_threads)
1881 debug_printf ("Deferring signal %d for LWP %ld.\n",
1882 WSTOPSIG (*wstat), lwpid_of (thread));
1883
1884 if (debug_threads)
1885 {
1886 struct pending_signals *sig;
1887
1888 for (sig = lwp->pending_signals_to_report;
1889 sig != NULL;
1890 sig = sig->prev)
1891 debug_printf (" Already queued %d\n",
1892 sig->signal);
1893
1894 debug_printf (" (no more currently queued signals)\n");
1895 }
1896
1897 /* Don't enqueue non-RT signals if they are already in the deferred
1898 queue. (SIGSTOP being the easiest signal to see ending up here
1899 twice) */
1900 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1901 {
1902 struct pending_signals *sig;
1903
1904 for (sig = lwp->pending_signals_to_report;
1905 sig != NULL;
1906 sig = sig->prev)
1907 {
1908 if (sig->signal == WSTOPSIG (*wstat))
1909 {
1910 if (debug_threads)
1911 debug_printf ("Not requeuing already queued non-RT signal %d"
1912 " for LWP %ld\n",
1913 sig->signal,
1914 lwpid_of (thread));
1915 return;
1916 }
1917 }
1918 }
1919
1920 p_sig = xmalloc (sizeof (*p_sig));
1921 p_sig->prev = lwp->pending_signals_to_report;
1922 p_sig->signal = WSTOPSIG (*wstat);
1923 memset (&p_sig->info, 0, sizeof (siginfo_t));
1924 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1925 &p_sig->info);
1926
1927 lwp->pending_signals_to_report = p_sig;
1928 }
1929
1930 /* Dequeue one signal from the "signals to report later when out of
1931 the jump pad" list. */
1932
1933 static int
1934 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1935 {
1936 struct thread_info *thread = get_lwp_thread (lwp);
1937
1938 if (lwp->pending_signals_to_report != NULL)
1939 {
1940 struct pending_signals **p_sig;
1941
1942 p_sig = &lwp->pending_signals_to_report;
1943 while ((*p_sig)->prev != NULL)
1944 p_sig = &(*p_sig)->prev;
1945
1946 *wstat = W_STOPCODE ((*p_sig)->signal);
1947 if ((*p_sig)->info.si_signo != 0)
1948 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1949 &(*p_sig)->info);
1950 free (*p_sig);
1951 *p_sig = NULL;
1952
1953 if (debug_threads)
1954 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1955 WSTOPSIG (*wstat), lwpid_of (thread));
1956
1957 if (debug_threads)
1958 {
1959 struct pending_signals *sig;
1960
1961 for (sig = lwp->pending_signals_to_report;
1962 sig != NULL;
1963 sig = sig->prev)
1964 debug_printf (" Still queued %d\n",
1965 sig->signal);
1966
1967 debug_printf (" (no more queued signals)\n");
1968 }
1969
1970 return 1;
1971 }
1972
1973 return 0;
1974 }
1975
1976 /* Fetch the possibly triggered data watchpoint info and store it in
1977 CHILD.
1978
1979 On some archs, like x86, that use debug registers to set
1980 watchpoints, it's possible that the way to know which watched
1981 address trapped, is to check the register that is used to select
1982 which address to watch. Problem is, between setting the watchpoint
1983 and reading back which data address trapped, the user may change
1984 the set of watchpoints, and, as a consequence, GDB changes the
1985 debug registers in the inferior. To avoid reading back a stale
1986 stopped-data-address when that happens, we cache in LP the fact
1987 that a watchpoint trapped, and the corresponding data address, as
1988 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1989 registers meanwhile, we have the cached data we can rely on. */
1990
1991 static int
1992 check_stopped_by_watchpoint (struct lwp_info *child)
1993 {
1994 if (the_low_target.stopped_by_watchpoint != NULL)
1995 {
1996 struct thread_info *saved_thread;
1997
1998 saved_thread = current_thread;
1999 current_thread = get_lwp_thread (child);
2000
2001 if (the_low_target.stopped_by_watchpoint ())
2002 {
2003 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2004
2005 if (the_low_target.stopped_data_address != NULL)
2006 child->stopped_data_address
2007 = the_low_target.stopped_data_address ();
2008 else
2009 child->stopped_data_address = 0;
2010 }
2011
2012 current_thread = saved_thread;
2013 }
2014
2015 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2016 }
2017
2018 /* Return the ptrace options that we want to try to enable. */
2019
2020 static int
2021 linux_low_ptrace_options (int attached)
2022 {
2023 int options = 0;
2024
2025 if (!attached)
2026 options |= PTRACE_O_EXITKILL;
2027
2028 if (report_fork_events)
2029 options |= PTRACE_O_TRACEFORK;
2030
2031 if (report_vfork_events)
2032 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2033
2034 return options;
2035 }
2036
2037 /* Do low-level handling of the event, and check if we should go on
2038 and pass it to caller code. Return the affected lwp if we are, or
2039 NULL otherwise. */
2040
2041 static struct lwp_info *
2042 linux_low_filter_event (int lwpid, int wstat)
2043 {
2044 struct lwp_info *child;
2045 struct thread_info *thread;
2046 int have_stop_pc = 0;
2047
2048 child = find_lwp_pid (pid_to_ptid (lwpid));
2049
2050 /* If we didn't find a process, one of two things presumably happened:
2051 - A process we started and then detached from has exited. Ignore it.
2052 - A process we are controlling has forked and the new child's stop
2053 was reported to us by the kernel. Save its PID. */
2054 if (child == NULL && WIFSTOPPED (wstat))
2055 {
2056 add_to_pid_list (&stopped_pids, lwpid, wstat);
2057 return NULL;
2058 }
2059 else if (child == NULL)
2060 return NULL;
2061
2062 thread = get_lwp_thread (child);
2063
2064 child->stopped = 1;
2065
2066 child->last_status = wstat;
2067
2068 /* Check if the thread has exited. */
2069 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2070 {
2071 if (debug_threads)
2072 debug_printf ("LLFE: %d exited.\n", lwpid);
2073 if (num_lwps (pid_of (thread)) > 1)
2074 {
2075
2076 /* If there is at least one more LWP, then the exit signal was
2077 not the end of the debugged application and should be
2078 ignored. */
2079 delete_lwp (child);
2080 return NULL;
2081 }
2082 else
2083 {
2084 /* This was the last lwp in the process. Since events are
2085 serialized to GDB core, and we can't report this one
2086 right now, but GDB core and the other target layers will
2087 want to be notified about the exit code/signal, leave the
2088 status pending for the next time we're able to report
2089 it. */
2090 mark_lwp_dead (child, wstat);
2091 return child;
2092 }
2093 }
2094
2095 gdb_assert (WIFSTOPPED (wstat));
2096
2097 if (WIFSTOPPED (wstat))
2098 {
2099 struct process_info *proc;
2100
2101 /* Architecture-specific setup after inferior is running. This
2102 needs to happen after we have attached to the inferior and it
2103 is stopped for the first time, but before we access any
2104 inferior registers. */
2105 proc = find_process_pid (pid_of (thread));
2106 if (proc->priv->new_inferior)
2107 {
2108 struct thread_info *saved_thread;
2109
2110 saved_thread = current_thread;
2111 current_thread = thread;
2112
2113 the_low_target.arch_setup ();
2114
2115 current_thread = saved_thread;
2116
2117 proc->priv->new_inferior = 0;
2118 }
2119 }
2120
2121 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2122 {
2123 struct process_info *proc = find_process_pid (pid_of (thread));
2124 int options = linux_low_ptrace_options (proc->attached);
2125
2126 linux_enable_event_reporting (lwpid, options);
2127 child->must_set_ptrace_flags = 0;
2128 }
2129
2130 /* Be careful to not overwrite stop_pc until
2131 check_stopped_by_breakpoint is called. */
2132 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2133 && linux_is_extended_waitstatus (wstat))
2134 {
2135 child->stop_pc = get_pc (child);
2136 if (handle_extended_wait (child, wstat))
2137 {
2138 /* The event has been handled, so just return without
2139 reporting it. */
2140 return NULL;
2141 }
2142 }
2143
2144 /* Check first whether this was a SW/HW breakpoint before checking
2145 watchpoints, because at least s390 can't tell the data address of
2146 hardware watchpoint hits, and returns stopped-by-watchpoint as
2147 long as there's a watchpoint set. */
2148 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2149 {
2150 if (check_stopped_by_breakpoint (child))
2151 have_stop_pc = 1;
2152 }
2153
2154 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2155 or hardware watchpoint. Check which is which if we got
2156 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2157 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2158 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2159 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2160 check_stopped_by_watchpoint (child);
2161
2162 if (!have_stop_pc)
2163 child->stop_pc = get_pc (child);
2164
2165 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2166 && child->stop_expected)
2167 {
2168 if (debug_threads)
2169 debug_printf ("Expected stop.\n");
2170 child->stop_expected = 0;
2171
2172 if (thread->last_resume_kind == resume_stop)
2173 {
2174 /* We want to report the stop to the core. Treat the
2175 SIGSTOP as a normal event. */
2176 if (debug_threads)
2177 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2178 target_pid_to_str (ptid_of (thread)));
2179 }
2180 else if (stopping_threads != NOT_STOPPING_THREADS)
2181 {
2182 /* Stopping threads. We don't want this SIGSTOP to end up
2183 pending. */
2184 if (debug_threads)
2185 debug_printf ("LLW: SIGSTOP caught for %s "
2186 "while stopping threads.\n",
2187 target_pid_to_str (ptid_of (thread)));
2188 return NULL;
2189 }
2190 else
2191 {
2192 /* This is a delayed SIGSTOP. Filter out the event. */
2193 if (debug_threads)
2194 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2195 child->stepping ? "step" : "continue",
2196 target_pid_to_str (ptid_of (thread)));
2197
2198 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2199 return NULL;
2200 }
2201 }
2202
2203 child->status_pending_p = 1;
2204 child->status_pending = wstat;
2205 return child;
2206 }
2207
2208 /* Resume LWPs that are currently stopped without any pending status
2209 to report, but are resumed from the core's perspective. */
2210
2211 static void
2212 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2213 {
2214 struct thread_info *thread = (struct thread_info *) entry;
2215 struct lwp_info *lp = get_thread_lwp (thread);
2216
2217 if (lp->stopped
2218 && !lp->status_pending_p
2219 && thread->last_resume_kind != resume_stop
2220 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2221 {
2222 int step = thread->last_resume_kind == resume_step;
2223
2224 if (debug_threads)
2225 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2226 target_pid_to_str (ptid_of (thread)),
2227 paddress (lp->stop_pc),
2228 step);
2229
2230 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2231 }
2232 }
2233
2234 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2235 match FILTER_PTID (leaving others pending). The PTIDs can be:
2236 minus_one_ptid, to specify any child; a pid PTID, specifying all
2237 lwps of a thread group; or a PTID representing a single lwp. Store
2238 the stop status through the status pointer WSTAT. OPTIONS is
2239 passed to the waitpid call. Return 0 if no event was found and
2240 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2241 was found. Return the PID of the stopped child otherwise. */
2242
2243 static int
2244 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2245 int *wstatp, int options)
2246 {
2247 struct thread_info *event_thread;
2248 struct lwp_info *event_child, *requested_child;
2249 sigset_t block_mask, prev_mask;
2250
2251 retry:
2252 /* N.B. event_thread points to the thread_info struct that contains
2253 event_child. Keep them in sync. */
2254 event_thread = NULL;
2255 event_child = NULL;
2256 requested_child = NULL;
2257
2258 /* Check for a lwp with a pending status. */
2259
2260 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2261 {
2262 event_thread = (struct thread_info *)
2263 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2264 if (event_thread != NULL)
2265 event_child = get_thread_lwp (event_thread);
2266 if (debug_threads && event_thread)
2267 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2268 }
2269 else if (!ptid_equal (filter_ptid, null_ptid))
2270 {
2271 requested_child = find_lwp_pid (filter_ptid);
2272
2273 if (stopping_threads == NOT_STOPPING_THREADS
2274 && requested_child->status_pending_p
2275 && requested_child->collecting_fast_tracepoint)
2276 {
2277 enqueue_one_deferred_signal (requested_child,
2278 &requested_child->status_pending);
2279 requested_child->status_pending_p = 0;
2280 requested_child->status_pending = 0;
2281 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2282 }
2283
2284 if (requested_child->suspended
2285 && requested_child->status_pending_p)
2286 {
2287 internal_error (__FILE__, __LINE__,
2288 "requesting an event out of a"
2289 " suspended child?");
2290 }
2291
2292 if (requested_child->status_pending_p)
2293 {
2294 event_child = requested_child;
2295 event_thread = get_lwp_thread (event_child);
2296 }
2297 }
2298
2299 if (event_child != NULL)
2300 {
2301 if (debug_threads)
2302 debug_printf ("Got an event from pending child %ld (%04x)\n",
2303 lwpid_of (event_thread), event_child->status_pending);
2304 *wstatp = event_child->status_pending;
2305 event_child->status_pending_p = 0;
2306 event_child->status_pending = 0;
2307 current_thread = event_thread;
2308 return lwpid_of (event_thread);
2309 }
2310
2311 /* But if we don't find a pending event, we'll have to wait.
2312
2313 We only enter this loop if no process has a pending wait status.
2314 Thus any action taken in response to a wait status inside this
2315 loop is responding as soon as we detect the status, not after any
2316 pending events. */
2317
2318 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2319 all signals while here. */
2320 sigfillset (&block_mask);
2321 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2322
2323 /* Always pull all events out of the kernel. We'll randomly select
2324 an event LWP out of all that have events, to prevent
2325 starvation. */
2326 while (event_child == NULL)
2327 {
2328 pid_t ret = 0;
2329
2330 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2331 quirks:
2332
2333 - If the thread group leader exits while other threads in the
2334 thread group still exist, waitpid(TGID, ...) hangs. That
2335 waitpid won't return an exit status until the other threads
2336 in the group are reaped.
2337
2338 - When a non-leader thread execs, that thread just vanishes
2339 without reporting an exit (so we'd hang if we waited for it
2340 explicitly in that case). The exec event is reported to
2341 the TGID pid (although we don't currently enable exec
2342 events). */
2343 errno = 0;
2344 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2345
2346 if (debug_threads)
2347 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2348 ret, errno ? strerror (errno) : "ERRNO-OK");
2349
2350 if (ret > 0)
2351 {
2352 if (debug_threads)
2353 {
2354 debug_printf ("LLW: waitpid %ld received %s\n",
2355 (long) ret, status_to_str (*wstatp));
2356 }
2357
2358 /* Filter all events. IOW, leave all events pending. We'll
2359 randomly select an event LWP out of all that have events
2360 below. */
2361 linux_low_filter_event (ret, *wstatp);
2362 /* Retry until nothing comes out of waitpid. A single
2363 SIGCHLD can indicate more than one child stopped. */
2364 continue;
2365 }
2366
2367 /* Now that we've pulled all events out of the kernel, resume
2368 LWPs that don't have an interesting event to report. */
2369 if (stopping_threads == NOT_STOPPING_THREADS)
2370 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2371
2372 /* ... and find an LWP with a status to report to the core, if
2373 any. */
2374 event_thread = (struct thread_info *)
2375 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2376 if (event_thread != NULL)
2377 {
2378 event_child = get_thread_lwp (event_thread);
2379 *wstatp = event_child->status_pending;
2380 event_child->status_pending_p = 0;
2381 event_child->status_pending = 0;
2382 break;
2383 }
2384
2385 /* Check for zombie thread group leaders. Those can't be reaped
2386 until all other threads in the thread group are. */
2387 check_zombie_leaders ();
2388
2389 /* If there are no resumed children left in the set of LWPs we
2390 want to wait for, bail. We can't just block in
2391 waitpid/sigsuspend, because lwps might have been left stopped
2392 in trace-stop state, and we'd be stuck forever waiting for
2393 their status to change (which would only happen if we resumed
2394 them). Even if WNOHANG is set, this return code is preferred
2395 over 0 (below), as it is more detailed. */
2396 if ((find_inferior (&all_threads,
2397 not_stopped_callback,
2398 &wait_ptid) == NULL))
2399 {
2400 if (debug_threads)
2401 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2402 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2403 return -1;
2404 }
2405
2406 /* No interesting event to report to the caller. */
2407 if ((options & WNOHANG))
2408 {
2409 if (debug_threads)
2410 debug_printf ("WNOHANG set, no event found\n");
2411
2412 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2413 return 0;
2414 }
2415
2416 /* Block until we get an event reported with SIGCHLD. */
2417 if (debug_threads)
2418 debug_printf ("sigsuspend'ing\n");
2419
2420 sigsuspend (&prev_mask);
2421 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2422 goto retry;
2423 }
2424
2425 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2426
2427 current_thread = event_thread;
2428
2429 /* Check for thread exit. */
2430 if (! WIFSTOPPED (*wstatp))
2431 {
2432 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2433
2434 if (debug_threads)
2435 debug_printf ("LWP %d is the last lwp of process. "
2436 "Process %ld exiting.\n",
2437 pid_of (event_thread), lwpid_of (event_thread));
2438 return lwpid_of (event_thread);
2439 }
2440
2441 return lwpid_of (event_thread);
2442 }
2443
2444 /* Wait for an event from child(ren) PTID. PTIDs can be:
2445 minus_one_ptid, to specify any child; a pid PTID, specifying all
2446 lwps of a thread group; or a PTID representing a single lwp. Store
2447 the stop status through the status pointer WSTAT. OPTIONS is
2448 passed to the waitpid call. Return 0 if no event was found and
2449 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2450 was found. Return the PID of the stopped child otherwise. */
2451
2452 static int
2453 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2454 {
2455 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2456 }
2457
2458 /* Count the LWP's that have had events. */
2459
2460 static int
2461 count_events_callback (struct inferior_list_entry *entry, void *data)
2462 {
2463 struct thread_info *thread = (struct thread_info *) entry;
2464 struct lwp_info *lp = get_thread_lwp (thread);
2465 int *count = data;
2466
2467 gdb_assert (count != NULL);
2468
2469 /* Count only resumed LWPs that have an event pending. */
2470 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2471 && lp->status_pending_p)
2472 (*count)++;
2473
2474 return 0;
2475 }
2476
2477 /* Select the LWP (if any) that is currently being single-stepped. */
2478
2479 static int
2480 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2481 {
2482 struct thread_info *thread = (struct thread_info *) entry;
2483 struct lwp_info *lp = get_thread_lwp (thread);
2484
2485 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2486 && thread->last_resume_kind == resume_step
2487 && lp->status_pending_p)
2488 return 1;
2489 else
2490 return 0;
2491 }
2492
2493 /* Select the Nth LWP that has had an event. */
2494
2495 static int
2496 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2497 {
2498 struct thread_info *thread = (struct thread_info *) entry;
2499 struct lwp_info *lp = get_thread_lwp (thread);
2500 int *selector = data;
2501
2502 gdb_assert (selector != NULL);
2503
2504 /* Select only resumed LWPs that have an event pending. */
2505 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2506 && lp->status_pending_p)
2507 if ((*selector)-- == 0)
2508 return 1;
2509
2510 return 0;
2511 }
2512
2513 /* Select one LWP out of those that have events pending. */
2514
2515 static void
2516 select_event_lwp (struct lwp_info **orig_lp)
2517 {
2518 int num_events = 0;
2519 int random_selector;
2520 struct thread_info *event_thread = NULL;
2521
2522 /* In all-stop, give preference to the LWP that is being
2523 single-stepped. There will be at most one, and it's the LWP that
2524 the core is most interested in. If we didn't do this, then we'd
2525 have to handle pending step SIGTRAPs somehow in case the core
2526 later continues the previously-stepped thread, otherwise we'd
2527 report the pending SIGTRAP, and the core, not having stepped the
2528 thread, wouldn't understand what the trap was for, and therefore
2529 would report it to the user as a random signal. */
2530 if (!non_stop)
2531 {
2532 event_thread
2533 = (struct thread_info *) find_inferior (&all_threads,
2534 select_singlestep_lwp_callback,
2535 NULL);
2536 if (event_thread != NULL)
2537 {
2538 if (debug_threads)
2539 debug_printf ("SEL: Select single-step %s\n",
2540 target_pid_to_str (ptid_of (event_thread)));
2541 }
2542 }
2543 if (event_thread == NULL)
2544 {
2545 /* No single-stepping LWP. Select one at random, out of those
2546 which have had events. */
2547
2548 /* First see how many events we have. */
2549 find_inferior (&all_threads, count_events_callback, &num_events);
2550 gdb_assert (num_events > 0);
2551
2552 /* Now randomly pick a LWP out of those that have had
2553 events. */
2554 random_selector = (int)
2555 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2556
2557 if (debug_threads && num_events > 1)
2558 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2559 num_events, random_selector);
2560
2561 event_thread
2562 = (struct thread_info *) find_inferior (&all_threads,
2563 select_event_lwp_callback,
2564 &random_selector);
2565 }
2566
2567 if (event_thread != NULL)
2568 {
2569 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2570
2571 /* Switch the event LWP. */
2572 *orig_lp = event_lp;
2573 }
2574 }
2575
2576 /* Decrement the suspend count of an LWP. */
2577
2578 static int
2579 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2580 {
2581 struct thread_info *thread = (struct thread_info *) entry;
2582 struct lwp_info *lwp = get_thread_lwp (thread);
2583
2584 /* Ignore EXCEPT. */
2585 if (lwp == except)
2586 return 0;
2587
2588 lwp->suspended--;
2589
2590 gdb_assert (lwp->suspended >= 0);
2591 return 0;
2592 }
2593
2594 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2595 NULL. */
2596
2597 static void
2598 unsuspend_all_lwps (struct lwp_info *except)
2599 {
2600 find_inferior (&all_threads, unsuspend_one_lwp, except);
2601 }
2602
2603 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2604 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2605 void *data);
2606 static int lwp_running (struct inferior_list_entry *entry, void *data);
2607 static ptid_t linux_wait_1 (ptid_t ptid,
2608 struct target_waitstatus *ourstatus,
2609 int target_options);
2610
2611 /* Stabilize threads (move out of jump pads).
2612
2613 If a thread is midway collecting a fast tracepoint, we need to
2614 finish the collection and move it out of the jump pad before
2615 reporting the signal.
2616
2617 This avoids recursion while collecting (when a signal arrives
2618 midway, and the signal handler itself collects), which would trash
2619 the trace buffer. In case the user set a breakpoint in a signal
2620 handler, this avoids the backtrace showing the jump pad, etc..
2621 Most importantly, there are certain things we can't do safely if
2622 threads are stopped in a jump pad (or in its callee's). For
2623 example:
2624
2625 - starting a new trace run. A thread still collecting the
2626 previous run, could trash the trace buffer when resumed. The trace
2627 buffer control structures would have been reset but the thread had
2628 no way to tell. The thread could even midway memcpy'ing to the
2629 buffer, which would mean that when resumed, it would clobber the
2630 trace buffer that had been set for a new run.
2631
2632 - we can't rewrite/reuse the jump pads for new tracepoints
2633 safely. Say you do tstart while a thread is stopped midway while
2634 collecting. When the thread is later resumed, it finishes the
2635 collection, and returns to the jump pad, to execute the original
2636 instruction that was under the tracepoint jump at the time the
2637 older run had been started. If the jump pad had been rewritten
2638 since for something else in the new run, the thread would now
2639 execute the wrong / random instructions. */
2640
2641 static void
2642 linux_stabilize_threads (void)
2643 {
2644 struct thread_info *saved_thread;
2645 struct thread_info *thread_stuck;
2646
2647 thread_stuck
2648 = (struct thread_info *) find_inferior (&all_threads,
2649 stuck_in_jump_pad_callback,
2650 NULL);
2651 if (thread_stuck != NULL)
2652 {
2653 if (debug_threads)
2654 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2655 lwpid_of (thread_stuck));
2656 return;
2657 }
2658
2659 saved_thread = current_thread;
2660
2661 stabilizing_threads = 1;
2662
2663 /* Kick 'em all. */
2664 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2665
2666 /* Loop until all are stopped out of the jump pads. */
2667 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2668 {
2669 struct target_waitstatus ourstatus;
2670 struct lwp_info *lwp;
2671 int wstat;
2672
2673 /* Note that we go through the full wait even loop. While
2674 moving threads out of jump pad, we need to be able to step
2675 over internal breakpoints and such. */
2676 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2677
2678 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2679 {
2680 lwp = get_thread_lwp (current_thread);
2681
2682 /* Lock it. */
2683 lwp->suspended++;
2684
2685 if (ourstatus.value.sig != GDB_SIGNAL_0
2686 || current_thread->last_resume_kind == resume_stop)
2687 {
2688 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2689 enqueue_one_deferred_signal (lwp, &wstat);
2690 }
2691 }
2692 }
2693
2694 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2695
2696 stabilizing_threads = 0;
2697
2698 current_thread = saved_thread;
2699
2700 if (debug_threads)
2701 {
2702 thread_stuck
2703 = (struct thread_info *) find_inferior (&all_threads,
2704 stuck_in_jump_pad_callback,
2705 NULL);
2706 if (thread_stuck != NULL)
2707 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2708 lwpid_of (thread_stuck));
2709 }
2710 }
2711
2712 static void async_file_mark (void);
2713
2714 /* Convenience function that is called when the kernel reports an
2715 event that is not passed out to GDB. */
2716
2717 static ptid_t
2718 ignore_event (struct target_waitstatus *ourstatus)
2719 {
2720 /* If we got an event, there may still be others, as a single
2721 SIGCHLD can indicate more than one child stopped. This forces
2722 another target_wait call. */
2723 async_file_mark ();
2724
2725 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2726 return null_ptid;
2727 }
2728
2729 /* Return non-zero if WAITSTATUS reflects an extended linux
2730 event. Otherwise, return zero. */
2731
2732 static int
2733 extended_event_reported (const struct target_waitstatus *waitstatus)
2734 {
2735 if (waitstatus == NULL)
2736 return 0;
2737
2738 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2739 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2740 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2741 }
2742
2743 /* Wait for process, returns status. */
2744
2745 static ptid_t
2746 linux_wait_1 (ptid_t ptid,
2747 struct target_waitstatus *ourstatus, int target_options)
2748 {
2749 int w;
2750 struct lwp_info *event_child;
2751 int options;
2752 int pid;
2753 int step_over_finished;
2754 int bp_explains_trap;
2755 int maybe_internal_trap;
2756 int report_to_gdb;
2757 int trace_event;
2758 int in_step_range;
2759
2760 if (debug_threads)
2761 {
2762 debug_enter ();
2763 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2764 }
2765
2766 /* Translate generic target options into linux options. */
2767 options = __WALL;
2768 if (target_options & TARGET_WNOHANG)
2769 options |= WNOHANG;
2770
2771 bp_explains_trap = 0;
2772 trace_event = 0;
2773 in_step_range = 0;
2774 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2775
2776 if (ptid_equal (step_over_bkpt, null_ptid))
2777 pid = linux_wait_for_event (ptid, &w, options);
2778 else
2779 {
2780 if (debug_threads)
2781 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2782 target_pid_to_str (step_over_bkpt));
2783 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2784 }
2785
2786 if (pid == 0)
2787 {
2788 gdb_assert (target_options & TARGET_WNOHANG);
2789
2790 if (debug_threads)
2791 {
2792 debug_printf ("linux_wait_1 ret = null_ptid, "
2793 "TARGET_WAITKIND_IGNORE\n");
2794 debug_exit ();
2795 }
2796
2797 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2798 return null_ptid;
2799 }
2800 else if (pid == -1)
2801 {
2802 if (debug_threads)
2803 {
2804 debug_printf ("linux_wait_1 ret = null_ptid, "
2805 "TARGET_WAITKIND_NO_RESUMED\n");
2806 debug_exit ();
2807 }
2808
2809 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2810 return null_ptid;
2811 }
2812
2813 event_child = get_thread_lwp (current_thread);
2814
2815 /* linux_wait_for_event only returns an exit status for the last
2816 child of a process. Report it. */
2817 if (WIFEXITED (w) || WIFSIGNALED (w))
2818 {
2819 if (WIFEXITED (w))
2820 {
2821 ourstatus->kind = TARGET_WAITKIND_EXITED;
2822 ourstatus->value.integer = WEXITSTATUS (w);
2823
2824 if (debug_threads)
2825 {
2826 debug_printf ("linux_wait_1 ret = %s, exited with "
2827 "retcode %d\n",
2828 target_pid_to_str (ptid_of (current_thread)),
2829 WEXITSTATUS (w));
2830 debug_exit ();
2831 }
2832 }
2833 else
2834 {
2835 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2836 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2837
2838 if (debug_threads)
2839 {
2840 debug_printf ("linux_wait_1 ret = %s, terminated with "
2841 "signal %d\n",
2842 target_pid_to_str (ptid_of (current_thread)),
2843 WTERMSIG (w));
2844 debug_exit ();
2845 }
2846 }
2847
2848 return ptid_of (current_thread);
2849 }
2850
2851 /* If step-over executes a breakpoint instruction, it means a
2852 gdb/gdbserver breakpoint had been planted on top of a permanent
2853 breakpoint. The PC has been adjusted by
2854 check_stopped_by_breakpoint to point at the breakpoint address.
2855 Advance the PC manually past the breakpoint, otherwise the
2856 program would keep trapping the permanent breakpoint forever. */
2857 if (!ptid_equal (step_over_bkpt, null_ptid)
2858 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2859 {
2860 unsigned int increment_pc = the_low_target.breakpoint_len;
2861
2862 if (debug_threads)
2863 {
2864 debug_printf ("step-over for %s executed software breakpoint\n",
2865 target_pid_to_str (ptid_of (current_thread)));
2866 }
2867
2868 if (increment_pc != 0)
2869 {
2870 struct regcache *regcache
2871 = get_thread_regcache (current_thread, 1);
2872
2873 event_child->stop_pc += increment_pc;
2874 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2875
2876 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2877 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2878 }
2879 }
2880
2881 /* If this event was not handled before, and is not a SIGTRAP, we
2882 report it. SIGILL and SIGSEGV are also treated as traps in case
2883 a breakpoint is inserted at the current PC. If this target does
2884 not support internal breakpoints at all, we also report the
2885 SIGTRAP without further processing; it's of no concern to us. */
2886 maybe_internal_trap
2887 = (supports_breakpoints ()
2888 && (WSTOPSIG (w) == SIGTRAP
2889 || ((WSTOPSIG (w) == SIGILL
2890 || WSTOPSIG (w) == SIGSEGV)
2891 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2892
2893 if (maybe_internal_trap)
2894 {
2895 /* Handle anything that requires bookkeeping before deciding to
2896 report the event or continue waiting. */
2897
2898 /* First check if we can explain the SIGTRAP with an internal
2899 breakpoint, or if we should possibly report the event to GDB.
2900 Do this before anything that may remove or insert a
2901 breakpoint. */
2902 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2903
2904 /* We have a SIGTRAP, possibly a step-over dance has just
2905 finished. If so, tweak the state machine accordingly,
2906 reinsert breakpoints and delete any reinsert (software
2907 single-step) breakpoints. */
2908 step_over_finished = finish_step_over (event_child);
2909
2910 /* Now invoke the callbacks of any internal breakpoints there. */
2911 check_breakpoints (event_child->stop_pc);
2912
2913 /* Handle tracepoint data collecting. This may overflow the
2914 trace buffer, and cause a tracing stop, removing
2915 breakpoints. */
2916 trace_event = handle_tracepoints (event_child);
2917
2918 if (bp_explains_trap)
2919 {
2920 /* If we stepped or ran into an internal breakpoint, we've
2921 already handled it. So next time we resume (from this
2922 PC), we should step over it. */
2923 if (debug_threads)
2924 debug_printf ("Hit a gdbserver breakpoint.\n");
2925
2926 if (breakpoint_here (event_child->stop_pc))
2927 event_child->need_step_over = 1;
2928 }
2929 }
2930 else
2931 {
2932 /* We have some other signal, possibly a step-over dance was in
2933 progress, and it should be cancelled too. */
2934 step_over_finished = finish_step_over (event_child);
2935 }
2936
2937 /* We have all the data we need. Either report the event to GDB, or
2938 resume threads and keep waiting for more. */
2939
2940 /* If we're collecting a fast tracepoint, finish the collection and
2941 move out of the jump pad before delivering a signal. See
2942 linux_stabilize_threads. */
2943
2944 if (WIFSTOPPED (w)
2945 && WSTOPSIG (w) != SIGTRAP
2946 && supports_fast_tracepoints ()
2947 && agent_loaded_p ())
2948 {
2949 if (debug_threads)
2950 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2951 "to defer or adjust it.\n",
2952 WSTOPSIG (w), lwpid_of (current_thread));
2953
2954 /* Allow debugging the jump pad itself. */
2955 if (current_thread->last_resume_kind != resume_step
2956 && maybe_move_out_of_jump_pad (event_child, &w))
2957 {
2958 enqueue_one_deferred_signal (event_child, &w);
2959
2960 if (debug_threads)
2961 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2962 WSTOPSIG (w), lwpid_of (current_thread));
2963
2964 linux_resume_one_lwp (event_child, 0, 0, NULL);
2965
2966 return ignore_event (ourstatus);
2967 }
2968 }
2969
2970 if (event_child->collecting_fast_tracepoint)
2971 {
2972 if (debug_threads)
2973 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2974 "Check if we're already there.\n",
2975 lwpid_of (current_thread),
2976 event_child->collecting_fast_tracepoint);
2977
2978 trace_event = 1;
2979
2980 event_child->collecting_fast_tracepoint
2981 = linux_fast_tracepoint_collecting (event_child, NULL);
2982
2983 if (event_child->collecting_fast_tracepoint != 1)
2984 {
2985 /* No longer need this breakpoint. */
2986 if (event_child->exit_jump_pad_bkpt != NULL)
2987 {
2988 if (debug_threads)
2989 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2990 "stopping all threads momentarily.\n");
2991
2992 /* Other running threads could hit this breakpoint.
2993 We don't handle moribund locations like GDB does,
2994 instead we always pause all threads when removing
2995 breakpoints, so that any step-over or
2996 decr_pc_after_break adjustment is always taken
2997 care of while the breakpoint is still
2998 inserted. */
2999 stop_all_lwps (1, event_child);
3000
3001 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3002 event_child->exit_jump_pad_bkpt = NULL;
3003
3004 unstop_all_lwps (1, event_child);
3005
3006 gdb_assert (event_child->suspended >= 0);
3007 }
3008 }
3009
3010 if (event_child->collecting_fast_tracepoint == 0)
3011 {
3012 if (debug_threads)
3013 debug_printf ("fast tracepoint finished "
3014 "collecting successfully.\n");
3015
3016 /* We may have a deferred signal to report. */
3017 if (dequeue_one_deferred_signal (event_child, &w))
3018 {
3019 if (debug_threads)
3020 debug_printf ("dequeued one signal.\n");
3021 }
3022 else
3023 {
3024 if (debug_threads)
3025 debug_printf ("no deferred signals.\n");
3026
3027 if (stabilizing_threads)
3028 {
3029 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3030 ourstatus->value.sig = GDB_SIGNAL_0;
3031
3032 if (debug_threads)
3033 {
3034 debug_printf ("linux_wait_1 ret = %s, stopped "
3035 "while stabilizing threads\n",
3036 target_pid_to_str (ptid_of (current_thread)));
3037 debug_exit ();
3038 }
3039
3040 return ptid_of (current_thread);
3041 }
3042 }
3043 }
3044 }
3045
3046 /* Check whether GDB would be interested in this event. */
3047
3048 /* If GDB is not interested in this signal, don't stop other
3049 threads, and don't report it to GDB. Just resume the inferior
3050 right away. We do this for threading-related signals as well as
3051 any that GDB specifically requested we ignore. But never ignore
3052 SIGSTOP if we sent it ourselves, and do not ignore signals when
3053 stepping - they may require special handling to skip the signal
3054 handler. Also never ignore signals that could be caused by a
3055 breakpoint. */
3056 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3057 thread library? */
3058 if (WIFSTOPPED (w)
3059 && current_thread->last_resume_kind != resume_step
3060 && (
3061 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3062 (current_process ()->priv->thread_db != NULL
3063 && (WSTOPSIG (w) == __SIGRTMIN
3064 || WSTOPSIG (w) == __SIGRTMIN + 1))
3065 ||
3066 #endif
3067 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3068 && !(WSTOPSIG (w) == SIGSTOP
3069 && current_thread->last_resume_kind == resume_stop)
3070 && !linux_wstatus_maybe_breakpoint (w))))
3071 {
3072 siginfo_t info, *info_p;
3073
3074 if (debug_threads)
3075 debug_printf ("Ignored signal %d for LWP %ld.\n",
3076 WSTOPSIG (w), lwpid_of (current_thread));
3077
3078 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3079 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3080 info_p = &info;
3081 else
3082 info_p = NULL;
3083 linux_resume_one_lwp (event_child, event_child->stepping,
3084 WSTOPSIG (w), info_p);
3085 return ignore_event (ourstatus);
3086 }
3087
3088 /* Note that all addresses are always "out of the step range" when
3089 there's no range to begin with. */
3090 in_step_range = lwp_in_step_range (event_child);
3091
3092 /* If GDB wanted this thread to single step, and the thread is out
3093 of the step range, we always want to report the SIGTRAP, and let
3094 GDB handle it. Watchpoints should always be reported. So should
3095 signals we can't explain. A SIGTRAP we can't explain could be a
3096 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3097 do, we're be able to handle GDB breakpoints on top of internal
3098 breakpoints, by handling the internal breakpoint and still
3099 reporting the event to GDB. If we don't, we're out of luck, GDB
3100 won't see the breakpoint hit. */
3101 report_to_gdb = (!maybe_internal_trap
3102 || (current_thread->last_resume_kind == resume_step
3103 && !in_step_range)
3104 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3105 || (!step_over_finished && !in_step_range
3106 && !bp_explains_trap && !trace_event)
3107 || (gdb_breakpoint_here (event_child->stop_pc)
3108 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3109 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3110 || extended_event_reported (&event_child->waitstatus));
3111
3112 run_breakpoint_commands (event_child->stop_pc);
3113
3114 /* We found no reason GDB would want us to stop. We either hit one
3115 of our own breakpoints, or finished an internal step GDB
3116 shouldn't know about. */
3117 if (!report_to_gdb)
3118 {
3119 if (debug_threads)
3120 {
3121 if (bp_explains_trap)
3122 debug_printf ("Hit a gdbserver breakpoint.\n");
3123 if (step_over_finished)
3124 debug_printf ("Step-over finished.\n");
3125 if (trace_event)
3126 debug_printf ("Tracepoint event.\n");
3127 if (lwp_in_step_range (event_child))
3128 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3129 paddress (event_child->stop_pc),
3130 paddress (event_child->step_range_start),
3131 paddress (event_child->step_range_end));
3132 if (extended_event_reported (&event_child->waitstatus))
3133 {
3134 char *str = target_waitstatus_to_string (ourstatus);
3135 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3136 lwpid_of (get_lwp_thread (event_child)), str);
3137 xfree (str);
3138 }
3139 }
3140
3141 /* We're not reporting this breakpoint to GDB, so apply the
3142 decr_pc_after_break adjustment to the inferior's regcache
3143 ourselves. */
3144
3145 if (the_low_target.set_pc != NULL)
3146 {
3147 struct regcache *regcache
3148 = get_thread_regcache (current_thread, 1);
3149 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3150 }
3151
3152 /* We may have finished stepping over a breakpoint. If so,
3153 we've stopped and suspended all LWPs momentarily except the
3154 stepping one. This is where we resume them all again. We're
3155 going to keep waiting, so use proceed, which handles stepping
3156 over the next breakpoint. */
3157 if (debug_threads)
3158 debug_printf ("proceeding all threads.\n");
3159
3160 if (step_over_finished)
3161 unsuspend_all_lwps (event_child);
3162
3163 proceed_all_lwps ();
3164 return ignore_event (ourstatus);
3165 }
3166
3167 if (debug_threads)
3168 {
3169 if (current_thread->last_resume_kind == resume_step)
3170 {
3171 if (event_child->step_range_start == event_child->step_range_end)
3172 debug_printf ("GDB wanted to single-step, reporting event.\n");
3173 else if (!lwp_in_step_range (event_child))
3174 debug_printf ("Out of step range, reporting event.\n");
3175 }
3176 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3177 debug_printf ("Stopped by watchpoint.\n");
3178 else if (gdb_breakpoint_here (event_child->stop_pc))
3179 debug_printf ("Stopped by GDB breakpoint.\n");
3180 if (debug_threads)
3181 debug_printf ("Hit a non-gdbserver trap event.\n");
3182 }
3183
3184 /* Alright, we're going to report a stop. */
3185
3186 if (!stabilizing_threads)
3187 {
3188 /* In all-stop, stop all threads. */
3189 if (!non_stop)
3190 stop_all_lwps (0, NULL);
3191
3192 /* If we're not waiting for a specific LWP, choose an event LWP
3193 from among those that have had events. Giving equal priority
3194 to all LWPs that have had events helps prevent
3195 starvation. */
3196 if (ptid_equal (ptid, minus_one_ptid))
3197 {
3198 event_child->status_pending_p = 1;
3199 event_child->status_pending = w;
3200
3201 select_event_lwp (&event_child);
3202
3203 /* current_thread and event_child must stay in sync. */
3204 current_thread = get_lwp_thread (event_child);
3205
3206 event_child->status_pending_p = 0;
3207 w = event_child->status_pending;
3208 }
3209
3210 if (step_over_finished)
3211 {
3212 if (!non_stop)
3213 {
3214 /* If we were doing a step-over, all other threads but
3215 the stepping one had been paused in start_step_over,
3216 with their suspend counts incremented. We don't want
3217 to do a full unstop/unpause, because we're in
3218 all-stop mode (so we want threads stopped), but we
3219 still need to unsuspend the other threads, to
3220 decrement their `suspended' count back. */
3221 unsuspend_all_lwps (event_child);
3222 }
3223 else
3224 {
3225 /* If we just finished a step-over, then all threads had
3226 been momentarily paused. In all-stop, that's fine,
3227 we want threads stopped by now anyway. In non-stop,
3228 we need to re-resume threads that GDB wanted to be
3229 running. */
3230 unstop_all_lwps (1, event_child);
3231 }
3232 }
3233
3234 /* Stabilize threads (move out of jump pads). */
3235 if (!non_stop)
3236 stabilize_threads ();
3237 }
3238 else
3239 {
3240 /* If we just finished a step-over, then all threads had been
3241 momentarily paused. In all-stop, that's fine, we want
3242 threads stopped by now anyway. In non-stop, we need to
3243 re-resume threads that GDB wanted to be running. */
3244 if (step_over_finished)
3245 unstop_all_lwps (1, event_child);
3246 }
3247
3248 if (extended_event_reported (&event_child->waitstatus))
3249 {
3250 /* If the reported event is a fork, vfork or exec, let GDB know. */
3251 ourstatus->kind = event_child->waitstatus.kind;
3252 ourstatus->value = event_child->waitstatus.value;
3253
3254 /* Clear the event lwp's waitstatus since we handled it already. */
3255 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3256 }
3257 else
3258 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3259
3260 /* Now that we've selected our final event LWP, un-adjust its PC if
3261 it was a software breakpoint, and the client doesn't know we can
3262 adjust the breakpoint ourselves. */
3263 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3264 && !swbreak_feature)
3265 {
3266 int decr_pc = the_low_target.decr_pc_after_break;
3267
3268 if (decr_pc != 0)
3269 {
3270 struct regcache *regcache
3271 = get_thread_regcache (current_thread, 1);
3272 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3273 }
3274 }
3275
3276 if (current_thread->last_resume_kind == resume_stop
3277 && WSTOPSIG (w) == SIGSTOP)
3278 {
3279 /* A thread that has been requested to stop by GDB with vCont;t,
3280 and it stopped cleanly, so report as SIG0. The use of
3281 SIGSTOP is an implementation detail. */
3282 ourstatus->value.sig = GDB_SIGNAL_0;
3283 }
3284 else if (current_thread->last_resume_kind == resume_stop
3285 && WSTOPSIG (w) != SIGSTOP)
3286 {
3287 /* A thread that has been requested to stop by GDB with vCont;t,
3288 but, it stopped for other reasons. */
3289 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3290 }
3291 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3292 {
3293 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3294 }
3295
3296 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3297
3298 if (debug_threads)
3299 {
3300 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3301 target_pid_to_str (ptid_of (current_thread)),
3302 ourstatus->kind, ourstatus->value.sig);
3303 debug_exit ();
3304 }
3305
3306 return ptid_of (current_thread);
3307 }
3308
3309 /* Get rid of any pending event in the pipe. */
3310 static void
3311 async_file_flush (void)
3312 {
3313 int ret;
3314 char buf;
3315
3316 do
3317 ret = read (linux_event_pipe[0], &buf, 1);
3318 while (ret >= 0 || (ret == -1 && errno == EINTR));
3319 }
3320
3321 /* Put something in the pipe, so the event loop wakes up. */
3322 static void
3323 async_file_mark (void)
3324 {
3325 int ret;
3326
3327 async_file_flush ();
3328
3329 do
3330 ret = write (linux_event_pipe[1], "+", 1);
3331 while (ret == 0 || (ret == -1 && errno == EINTR));
3332
3333 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3334 be awakened anyway. */
3335 }
3336
3337 static ptid_t
3338 linux_wait (ptid_t ptid,
3339 struct target_waitstatus *ourstatus, int target_options)
3340 {
3341 ptid_t event_ptid;
3342
3343 /* Flush the async file first. */
3344 if (target_is_async_p ())
3345 async_file_flush ();
3346
3347 do
3348 {
3349 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3350 }
3351 while ((target_options & TARGET_WNOHANG) == 0
3352 && ptid_equal (event_ptid, null_ptid)
3353 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3354
3355 /* If at least one stop was reported, there may be more. A single
3356 SIGCHLD can signal more than one child stop. */
3357 if (target_is_async_p ()
3358 && (target_options & TARGET_WNOHANG) != 0
3359 && !ptid_equal (event_ptid, null_ptid))
3360 async_file_mark ();
3361
3362 return event_ptid;
3363 }
3364
3365 /* Send a signal to an LWP. */
3366
3367 static int
3368 kill_lwp (unsigned long lwpid, int signo)
3369 {
3370 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3371 fails, then we are not using nptl threads and we should be using kill. */
3372
3373 #ifdef __NR_tkill
3374 {
3375 static int tkill_failed;
3376
3377 if (!tkill_failed)
3378 {
3379 int ret;
3380
3381 errno = 0;
3382 ret = syscall (__NR_tkill, lwpid, signo);
3383 if (errno != ENOSYS)
3384 return ret;
3385 tkill_failed = 1;
3386 }
3387 }
3388 #endif
3389
3390 return kill (lwpid, signo);
3391 }
3392
3393 void
3394 linux_stop_lwp (struct lwp_info *lwp)
3395 {
3396 send_sigstop (lwp);
3397 }
3398
3399 static void
3400 send_sigstop (struct lwp_info *lwp)
3401 {
3402 int pid;
3403
3404 pid = lwpid_of (get_lwp_thread (lwp));
3405
3406 /* If we already have a pending stop signal for this process, don't
3407 send another. */
3408 if (lwp->stop_expected)
3409 {
3410 if (debug_threads)
3411 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3412
3413 return;
3414 }
3415
3416 if (debug_threads)
3417 debug_printf ("Sending sigstop to lwp %d\n", pid);
3418
3419 lwp->stop_expected = 1;
3420 kill_lwp (pid, SIGSTOP);
3421 }
3422
3423 static int
3424 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3425 {
3426 struct thread_info *thread = (struct thread_info *) entry;
3427 struct lwp_info *lwp = get_thread_lwp (thread);
3428
3429 /* Ignore EXCEPT. */
3430 if (lwp == except)
3431 return 0;
3432
3433 if (lwp->stopped)
3434 return 0;
3435
3436 send_sigstop (lwp);
3437 return 0;
3438 }
3439
3440 /* Increment the suspend count of an LWP, and stop it, if not stopped
3441 yet. */
3442 static int
3443 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3444 void *except)
3445 {
3446 struct thread_info *thread = (struct thread_info *) entry;
3447 struct lwp_info *lwp = get_thread_lwp (thread);
3448
3449 /* Ignore EXCEPT. */
3450 if (lwp == except)
3451 return 0;
3452
3453 lwp->suspended++;
3454
3455 return send_sigstop_callback (entry, except);
3456 }
3457
3458 static void
3459 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3460 {
3461 /* It's dead, really. */
3462 lwp->dead = 1;
3463
3464 /* Store the exit status for later. */
3465 lwp->status_pending_p = 1;
3466 lwp->status_pending = wstat;
3467
3468 /* Prevent trying to stop it. */
3469 lwp->stopped = 1;
3470
3471 /* No further stops are expected from a dead lwp. */
3472 lwp->stop_expected = 0;
3473 }
3474
3475 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3476
3477 static void
3478 wait_for_sigstop (void)
3479 {
3480 struct thread_info *saved_thread;
3481 ptid_t saved_tid;
3482 int wstat;
3483 int ret;
3484
3485 saved_thread = current_thread;
3486 if (saved_thread != NULL)
3487 saved_tid = saved_thread->entry.id;
3488 else
3489 saved_tid = null_ptid; /* avoid bogus unused warning */
3490
3491 if (debug_threads)
3492 debug_printf ("wait_for_sigstop: pulling events\n");
3493
3494 /* Passing NULL_PTID as filter indicates we want all events to be
3495 left pending. Eventually this returns when there are no
3496 unwaited-for children left. */
3497 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3498 &wstat, __WALL);
3499 gdb_assert (ret == -1);
3500
3501 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3502 current_thread = saved_thread;
3503 else
3504 {
3505 if (debug_threads)
3506 debug_printf ("Previously current thread died.\n");
3507
3508 if (non_stop)
3509 {
3510 /* We can't change the current inferior behind GDB's back,
3511 otherwise, a subsequent command may apply to the wrong
3512 process. */
3513 current_thread = NULL;
3514 }
3515 else
3516 {
3517 /* Set a valid thread as current. */
3518 set_desired_thread (0);
3519 }
3520 }
3521 }
3522
3523 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3524 move it out, because we need to report the stop event to GDB. For
3525 example, if the user puts a breakpoint in the jump pad, it's
3526 because she wants to debug it. */
3527
3528 static int
3529 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3530 {
3531 struct thread_info *thread = (struct thread_info *) entry;
3532 struct lwp_info *lwp = get_thread_lwp (thread);
3533
3534 gdb_assert (lwp->suspended == 0);
3535 gdb_assert (lwp->stopped);
3536
3537 /* Allow debugging the jump pad, gdb_collect, etc.. */
3538 return (supports_fast_tracepoints ()
3539 && agent_loaded_p ()
3540 && (gdb_breakpoint_here (lwp->stop_pc)
3541 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3542 || thread->last_resume_kind == resume_step)
3543 && linux_fast_tracepoint_collecting (lwp, NULL));
3544 }
3545
3546 static void
3547 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3548 {
3549 struct thread_info *thread = (struct thread_info *) entry;
3550 struct lwp_info *lwp = get_thread_lwp (thread);
3551 int *wstat;
3552
3553 gdb_assert (lwp->suspended == 0);
3554 gdb_assert (lwp->stopped);
3555
3556 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3557
3558 /* Allow debugging the jump pad, gdb_collect, etc. */
3559 if (!gdb_breakpoint_here (lwp->stop_pc)
3560 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3561 && thread->last_resume_kind != resume_step
3562 && maybe_move_out_of_jump_pad (lwp, wstat))
3563 {
3564 if (debug_threads)
3565 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3566 lwpid_of (thread));
3567
3568 if (wstat)
3569 {
3570 lwp->status_pending_p = 0;
3571 enqueue_one_deferred_signal (lwp, wstat);
3572
3573 if (debug_threads)
3574 debug_printf ("Signal %d for LWP %ld deferred "
3575 "(in jump pad)\n",
3576 WSTOPSIG (*wstat), lwpid_of (thread));
3577 }
3578
3579 linux_resume_one_lwp (lwp, 0, 0, NULL);
3580 }
3581 else
3582 lwp->suspended++;
3583 }
3584
3585 static int
3586 lwp_running (struct inferior_list_entry *entry, void *data)
3587 {
3588 struct thread_info *thread = (struct thread_info *) entry;
3589 struct lwp_info *lwp = get_thread_lwp (thread);
3590
3591 if (lwp->dead)
3592 return 0;
3593 if (lwp->stopped)
3594 return 0;
3595 return 1;
3596 }
3597
3598 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3599 If SUSPEND, then also increase the suspend count of every LWP,
3600 except EXCEPT. */
3601
3602 static void
3603 stop_all_lwps (int suspend, struct lwp_info *except)
3604 {
3605 /* Should not be called recursively. */
3606 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3607
3608 if (debug_threads)
3609 {
3610 debug_enter ();
3611 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3612 suspend ? "stop-and-suspend" : "stop",
3613 except != NULL
3614 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3615 : "none");
3616 }
3617
3618 stopping_threads = (suspend
3619 ? STOPPING_AND_SUSPENDING_THREADS
3620 : STOPPING_THREADS);
3621
3622 if (suspend)
3623 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3624 else
3625 find_inferior (&all_threads, send_sigstop_callback, except);
3626 wait_for_sigstop ();
3627 stopping_threads = NOT_STOPPING_THREADS;
3628
3629 if (debug_threads)
3630 {
3631 debug_printf ("stop_all_lwps done, setting stopping_threads "
3632 "back to !stopping\n");
3633 debug_exit ();
3634 }
3635 }
3636
3637 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3638 SIGNAL is nonzero, give it that signal. */
3639
3640 static void
3641 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3642 int step, int signal, siginfo_t *info)
3643 {
3644 struct thread_info *thread = get_lwp_thread (lwp);
3645 struct thread_info *saved_thread;
3646 int fast_tp_collecting;
3647
3648 if (lwp->stopped == 0)
3649 return;
3650
3651 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3652
3653 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3654
3655 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3656 user used the "jump" command, or "set $pc = foo"). */
3657 if (lwp->stop_pc != get_pc (lwp))
3658 {
3659 /* Collecting 'while-stepping' actions doesn't make sense
3660 anymore. */
3661 release_while_stepping_state_list (thread);
3662 }
3663
3664 /* If we have pending signals or status, and a new signal, enqueue the
3665 signal. Also enqueue the signal if we are waiting to reinsert a
3666 breakpoint; it will be picked up again below. */
3667 if (signal != 0
3668 && (lwp->status_pending_p
3669 || lwp->pending_signals != NULL
3670 || lwp->bp_reinsert != 0
3671 || fast_tp_collecting))
3672 {
3673 struct pending_signals *p_sig;
3674 p_sig = xmalloc (sizeof (*p_sig));
3675 p_sig->prev = lwp->pending_signals;
3676 p_sig->signal = signal;
3677 if (info == NULL)
3678 memset (&p_sig->info, 0, sizeof (siginfo_t));
3679 else
3680 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3681 lwp->pending_signals = p_sig;
3682 }
3683
3684 if (lwp->status_pending_p)
3685 {
3686 if (debug_threads)
3687 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3688 " has pending status\n",
3689 lwpid_of (thread), step ? "step" : "continue", signal,
3690 lwp->stop_expected ? "expected" : "not expected");
3691 return;
3692 }
3693
3694 saved_thread = current_thread;
3695 current_thread = thread;
3696
3697 if (debug_threads)
3698 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3699 lwpid_of (thread), step ? "step" : "continue", signal,
3700 lwp->stop_expected ? "expected" : "not expected");
3701
3702 /* This bit needs some thinking about. If we get a signal that
3703 we must report while a single-step reinsert is still pending,
3704 we often end up resuming the thread. It might be better to
3705 (ew) allow a stack of pending events; then we could be sure that
3706 the reinsert happened right away and not lose any signals.
3707
3708 Making this stack would also shrink the window in which breakpoints are
3709 uninserted (see comment in linux_wait_for_lwp) but not enough for
3710 complete correctness, so it won't solve that problem. It may be
3711 worthwhile just to solve this one, however. */
3712 if (lwp->bp_reinsert != 0)
3713 {
3714 if (debug_threads)
3715 debug_printf (" pending reinsert at 0x%s\n",
3716 paddress (lwp->bp_reinsert));
3717
3718 if (can_hardware_single_step ())
3719 {
3720 if (fast_tp_collecting == 0)
3721 {
3722 if (step == 0)
3723 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3724 if (lwp->suspended)
3725 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3726 lwp->suspended);
3727 }
3728
3729 step = 1;
3730 }
3731
3732 /* Postpone any pending signal. It was enqueued above. */
3733 signal = 0;
3734 }
3735
3736 if (fast_tp_collecting == 1)
3737 {
3738 if (debug_threads)
3739 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3740 " (exit-jump-pad-bkpt)\n",
3741 lwpid_of (thread));
3742
3743 /* Postpone any pending signal. It was enqueued above. */
3744 signal = 0;
3745 }
3746 else if (fast_tp_collecting == 2)
3747 {
3748 if (debug_threads)
3749 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3750 " single-stepping\n",
3751 lwpid_of (thread));
3752
3753 if (can_hardware_single_step ())
3754 step = 1;
3755 else
3756 {
3757 internal_error (__FILE__, __LINE__,
3758 "moving out of jump pad single-stepping"
3759 " not implemented on this target");
3760 }
3761
3762 /* Postpone any pending signal. It was enqueued above. */
3763 signal = 0;
3764 }
3765
3766 /* If we have while-stepping actions in this thread set it stepping.
3767 If we have a signal to deliver, it may or may not be set to
3768 SIG_IGN, we don't know. Assume so, and allow collecting
3769 while-stepping into a signal handler. A possible smart thing to
3770 do would be to set an internal breakpoint at the signal return
3771 address, continue, and carry on catching this while-stepping
3772 action only when that breakpoint is hit. A future
3773 enhancement. */
3774 if (thread->while_stepping != NULL
3775 && can_hardware_single_step ())
3776 {
3777 if (debug_threads)
3778 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3779 lwpid_of (thread));
3780 step = 1;
3781 }
3782
3783 if (the_low_target.get_pc != NULL)
3784 {
3785 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3786
3787 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3788
3789 if (debug_threads)
3790 {
3791 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3792 (long) lwp->stop_pc);
3793 }
3794 }
3795
3796 /* If we have pending signals, consume one unless we are trying to
3797 reinsert a breakpoint or we're trying to finish a fast tracepoint
3798 collect. */
3799 if (lwp->pending_signals != NULL
3800 && lwp->bp_reinsert == 0
3801 && fast_tp_collecting == 0)
3802 {
3803 struct pending_signals **p_sig;
3804
3805 p_sig = &lwp->pending_signals;
3806 while ((*p_sig)->prev != NULL)
3807 p_sig = &(*p_sig)->prev;
3808
3809 signal = (*p_sig)->signal;
3810 if ((*p_sig)->info.si_signo != 0)
3811 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3812 &(*p_sig)->info);
3813
3814 free (*p_sig);
3815 *p_sig = NULL;
3816 }
3817
3818 if (the_low_target.prepare_to_resume != NULL)
3819 the_low_target.prepare_to_resume (lwp);
3820
3821 regcache_invalidate_thread (thread);
3822 errno = 0;
3823 lwp->stepping = step;
3824 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3825 (PTRACE_TYPE_ARG3) 0,
3826 /* Coerce to a uintptr_t first to avoid potential gcc warning
3827 of coercing an 8 byte integer to a 4 byte pointer. */
3828 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3829
3830 current_thread = saved_thread;
3831 if (errno)
3832 perror_with_name ("resuming thread");
3833
3834 /* Successfully resumed. Clear state that no longer makes sense,
3835 and mark the LWP as running. Must not do this before resuming
3836 otherwise if that fails other code will be confused. E.g., we'd
3837 later try to stop the LWP and hang forever waiting for a stop
3838 status. Note that we must not throw after this is cleared,
3839 otherwise handle_zombie_lwp_error would get confused. */
3840 lwp->stopped = 0;
3841 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3842 }
3843
3844 /* Called when we try to resume a stopped LWP and that errors out. If
3845 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3846 or about to become), discard the error, clear any pending status
3847 the LWP may have, and return true (we'll collect the exit status
3848 soon enough). Otherwise, return false. */
3849
3850 static int
3851 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3852 {
3853 struct thread_info *thread = get_lwp_thread (lp);
3854
3855 /* If we get an error after resuming the LWP successfully, we'd
3856 confuse !T state for the LWP being gone. */
3857 gdb_assert (lp->stopped);
3858
3859 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3860 because even if ptrace failed with ESRCH, the tracee may be "not
3861 yet fully dead", but already refusing ptrace requests. In that
3862 case the tracee has 'R (Running)' state for a little bit
3863 (observed in Linux 3.18). See also the note on ESRCH in the
3864 ptrace(2) man page. Instead, check whether the LWP has any state
3865 other than ptrace-stopped. */
3866
3867 /* Don't assume anything if /proc/PID/status can't be read. */
3868 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3869 {
3870 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3871 lp->status_pending_p = 0;
3872 return 1;
3873 }
3874 return 0;
3875 }
3876
3877 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3878 disappears while we try to resume it. */
3879
3880 static void
3881 linux_resume_one_lwp (struct lwp_info *lwp,
3882 int step, int signal, siginfo_t *info)
3883 {
3884 TRY
3885 {
3886 linux_resume_one_lwp_throw (lwp, step, signal, info);
3887 }
3888 CATCH (ex, RETURN_MASK_ERROR)
3889 {
3890 if (!check_ptrace_stopped_lwp_gone (lwp))
3891 throw_exception (ex);
3892 }
3893 END_CATCH
3894 }
3895
3896 struct thread_resume_array
3897 {
3898 struct thread_resume *resume;
3899 size_t n;
3900 };
3901
3902 /* This function is called once per thread via find_inferior.
3903 ARG is a pointer to a thread_resume_array struct.
3904 We look up the thread specified by ENTRY in ARG, and mark the thread
3905 with a pointer to the appropriate resume request.
3906
3907 This algorithm is O(threads * resume elements), but resume elements
3908 is small (and will remain small at least until GDB supports thread
3909 suspension). */
3910
3911 static int
3912 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3913 {
3914 struct thread_info *thread = (struct thread_info *) entry;
3915 struct lwp_info *lwp = get_thread_lwp (thread);
3916 int ndx;
3917 struct thread_resume_array *r;
3918
3919 r = arg;
3920
3921 for (ndx = 0; ndx < r->n; ndx++)
3922 {
3923 ptid_t ptid = r->resume[ndx].thread;
3924 if (ptid_equal (ptid, minus_one_ptid)
3925 || ptid_equal (ptid, entry->id)
3926 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3927 of PID'. */
3928 || (ptid_get_pid (ptid) == pid_of (thread)
3929 && (ptid_is_pid (ptid)
3930 || ptid_get_lwp (ptid) == -1)))
3931 {
3932 if (r->resume[ndx].kind == resume_stop
3933 && thread->last_resume_kind == resume_stop)
3934 {
3935 if (debug_threads)
3936 debug_printf ("already %s LWP %ld at GDB's request\n",
3937 (thread->last_status.kind
3938 == TARGET_WAITKIND_STOPPED)
3939 ? "stopped"
3940 : "stopping",
3941 lwpid_of (thread));
3942
3943 continue;
3944 }
3945
3946 lwp->resume = &r->resume[ndx];
3947 thread->last_resume_kind = lwp->resume->kind;
3948
3949 lwp->step_range_start = lwp->resume->step_range_start;
3950 lwp->step_range_end = lwp->resume->step_range_end;
3951
3952 /* If we had a deferred signal to report, dequeue one now.
3953 This can happen if LWP gets more than one signal while
3954 trying to get out of a jump pad. */
3955 if (lwp->stopped
3956 && !lwp->status_pending_p
3957 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3958 {
3959 lwp->status_pending_p = 1;
3960
3961 if (debug_threads)
3962 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3963 "leaving status pending.\n",
3964 WSTOPSIG (lwp->status_pending),
3965 lwpid_of (thread));
3966 }
3967
3968 return 0;
3969 }
3970 }
3971
3972 /* No resume action for this thread. */
3973 lwp->resume = NULL;
3974
3975 return 0;
3976 }
3977
3978 /* find_inferior callback for linux_resume.
3979 Set *FLAG_P if this lwp has an interesting status pending. */
3980
3981 static int
3982 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3983 {
3984 struct thread_info *thread = (struct thread_info *) entry;
3985 struct lwp_info *lwp = get_thread_lwp (thread);
3986
3987 /* LWPs which will not be resumed are not interesting, because
3988 we might not wait for them next time through linux_wait. */
3989 if (lwp->resume == NULL)
3990 return 0;
3991
3992 if (thread_still_has_status_pending_p (thread))
3993 * (int *) flag_p = 1;
3994
3995 return 0;
3996 }
3997
3998 /* Return 1 if this lwp that GDB wants running is stopped at an
3999 internal breakpoint that we need to step over. It assumes that any
4000 required STOP_PC adjustment has already been propagated to the
4001 inferior's regcache. */
4002
4003 static int
4004 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4005 {
4006 struct thread_info *thread = (struct thread_info *) entry;
4007 struct lwp_info *lwp = get_thread_lwp (thread);
4008 struct thread_info *saved_thread;
4009 CORE_ADDR pc;
4010
4011 /* LWPs which will not be resumed are not interesting, because we
4012 might not wait for them next time through linux_wait. */
4013
4014 if (!lwp->stopped)
4015 {
4016 if (debug_threads)
4017 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4018 lwpid_of (thread));
4019 return 0;
4020 }
4021
4022 if (thread->last_resume_kind == resume_stop)
4023 {
4024 if (debug_threads)
4025 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4026 " stopped\n",
4027 lwpid_of (thread));
4028 return 0;
4029 }
4030
4031 gdb_assert (lwp->suspended >= 0);
4032
4033 if (lwp->suspended)
4034 {
4035 if (debug_threads)
4036 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4037 lwpid_of (thread));
4038 return 0;
4039 }
4040
4041 if (!lwp->need_step_over)
4042 {
4043 if (debug_threads)
4044 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4045 }
4046
4047 if (lwp->status_pending_p)
4048 {
4049 if (debug_threads)
4050 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4051 " status.\n",
4052 lwpid_of (thread));
4053 return 0;
4054 }
4055
4056 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4057 or we have. */
4058 pc = get_pc (lwp);
4059
4060 /* If the PC has changed since we stopped, then don't do anything,
4061 and let the breakpoint/tracepoint be hit. This happens if, for
4062 instance, GDB handled the decr_pc_after_break subtraction itself,
4063 GDB is OOL stepping this thread, or the user has issued a "jump"
4064 command, or poked thread's registers herself. */
4065 if (pc != lwp->stop_pc)
4066 {
4067 if (debug_threads)
4068 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4069 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4070 lwpid_of (thread),
4071 paddress (lwp->stop_pc), paddress (pc));
4072
4073 lwp->need_step_over = 0;
4074 return 0;
4075 }
4076
4077 saved_thread = current_thread;
4078 current_thread = thread;
4079
4080 /* We can only step over breakpoints we know about. */
4081 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4082 {
4083 /* Don't step over a breakpoint that GDB expects to hit
4084 though. If the condition is being evaluated on the target's side
4085 and it evaluate to false, step over this breakpoint as well. */
4086 if (gdb_breakpoint_here (pc)
4087 && gdb_condition_true_at_breakpoint (pc)
4088 && gdb_no_commands_at_breakpoint (pc))
4089 {
4090 if (debug_threads)
4091 debug_printf ("Need step over [LWP %ld]? yes, but found"
4092 " GDB breakpoint at 0x%s; skipping step over\n",
4093 lwpid_of (thread), paddress (pc));
4094
4095 current_thread = saved_thread;
4096 return 0;
4097 }
4098 else
4099 {
4100 if (debug_threads)
4101 debug_printf ("Need step over [LWP %ld]? yes, "
4102 "found breakpoint at 0x%s\n",
4103 lwpid_of (thread), paddress (pc));
4104
4105 /* We've found an lwp that needs stepping over --- return 1 so
4106 that find_inferior stops looking. */
4107 current_thread = saved_thread;
4108
4109 /* If the step over is cancelled, this is set again. */
4110 lwp->need_step_over = 0;
4111 return 1;
4112 }
4113 }
4114
4115 current_thread = saved_thread;
4116
4117 if (debug_threads)
4118 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4119 " at 0x%s\n",
4120 lwpid_of (thread), paddress (pc));
4121
4122 return 0;
4123 }
4124
4125 /* Start a step-over operation on LWP. When LWP stopped at a
4126 breakpoint, to make progress, we need to remove the breakpoint out
4127 of the way. If we let other threads run while we do that, they may
4128 pass by the breakpoint location and miss hitting it. To avoid
4129 that, a step-over momentarily stops all threads while LWP is
4130 single-stepped while the breakpoint is temporarily uninserted from
4131 the inferior. When the single-step finishes, we reinsert the
4132 breakpoint, and let all threads that are supposed to be running,
4133 run again.
4134
4135 On targets that don't support hardware single-step, we don't
4136 currently support full software single-stepping. Instead, we only
4137 support stepping over the thread event breakpoint, by asking the
4138 low target where to place a reinsert breakpoint. Since this
4139 routine assumes the breakpoint being stepped over is a thread event
4140 breakpoint, it usually assumes the return address of the current
4141 function is a good enough place to set the reinsert breakpoint. */
4142
4143 static int
4144 start_step_over (struct lwp_info *lwp)
4145 {
4146 struct thread_info *thread = get_lwp_thread (lwp);
4147 struct thread_info *saved_thread;
4148 CORE_ADDR pc;
4149 int step;
4150
4151 if (debug_threads)
4152 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4153 lwpid_of (thread));
4154
4155 stop_all_lwps (1, lwp);
4156 gdb_assert (lwp->suspended == 0);
4157
4158 if (debug_threads)
4159 debug_printf ("Done stopping all threads for step-over.\n");
4160
4161 /* Note, we should always reach here with an already adjusted PC,
4162 either by GDB (if we're resuming due to GDB's request), or by our
4163 caller, if we just finished handling an internal breakpoint GDB
4164 shouldn't care about. */
4165 pc = get_pc (lwp);
4166
4167 saved_thread = current_thread;
4168 current_thread = thread;
4169
4170 lwp->bp_reinsert = pc;
4171 uninsert_breakpoints_at (pc);
4172 uninsert_fast_tracepoint_jumps_at (pc);
4173
4174 if (can_hardware_single_step ())
4175 {
4176 step = 1;
4177 }
4178 else
4179 {
4180 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4181 set_reinsert_breakpoint (raddr);
4182 step = 0;
4183 }
4184
4185 current_thread = saved_thread;
4186
4187 linux_resume_one_lwp (lwp, step, 0, NULL);
4188
4189 /* Require next event from this LWP. */
4190 step_over_bkpt = thread->entry.id;
4191 return 1;
4192 }
4193
4194 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4195 start_step_over, if still there, and delete any reinsert
4196 breakpoints we've set, on non hardware single-step targets. */
4197
4198 static int
4199 finish_step_over (struct lwp_info *lwp)
4200 {
4201 if (lwp->bp_reinsert != 0)
4202 {
4203 if (debug_threads)
4204 debug_printf ("Finished step over.\n");
4205
4206 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4207 may be no breakpoint to reinsert there by now. */
4208 reinsert_breakpoints_at (lwp->bp_reinsert);
4209 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4210
4211 lwp->bp_reinsert = 0;
4212
4213 /* Delete any software-single-step reinsert breakpoints. No
4214 longer needed. We don't have to worry about other threads
4215 hitting this trap, and later not being able to explain it,
4216 because we were stepping over a breakpoint, and we hold all
4217 threads but LWP stopped while doing that. */
4218 if (!can_hardware_single_step ())
4219 delete_reinsert_breakpoints ();
4220
4221 step_over_bkpt = null_ptid;
4222 return 1;
4223 }
4224 else
4225 return 0;
4226 }
4227
4228 /* This function is called once per thread. We check the thread's resume
4229 request, which will tell us whether to resume, step, or leave the thread
4230 stopped; and what signal, if any, it should be sent.
4231
4232 For threads which we aren't explicitly told otherwise, we preserve
4233 the stepping flag; this is used for stepping over gdbserver-placed
4234 breakpoints.
4235
4236 If pending_flags was set in any thread, we queue any needed
4237 signals, since we won't actually resume. We already have a pending
4238 event to report, so we don't need to preserve any step requests;
4239 they should be re-issued if necessary. */
4240
4241 static int
4242 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4243 {
4244 struct thread_info *thread = (struct thread_info *) entry;
4245 struct lwp_info *lwp = get_thread_lwp (thread);
4246 int step;
4247 int leave_all_stopped = * (int *) arg;
4248 int leave_pending;
4249
4250 if (lwp->resume == NULL)
4251 return 0;
4252
4253 if (lwp->resume->kind == resume_stop)
4254 {
4255 if (debug_threads)
4256 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4257
4258 if (!lwp->stopped)
4259 {
4260 if (debug_threads)
4261 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4262
4263 /* Stop the thread, and wait for the event asynchronously,
4264 through the event loop. */
4265 send_sigstop (lwp);
4266 }
4267 else
4268 {
4269 if (debug_threads)
4270 debug_printf ("already stopped LWP %ld\n",
4271 lwpid_of (thread));
4272
4273 /* The LWP may have been stopped in an internal event that
4274 was not meant to be notified back to GDB (e.g., gdbserver
4275 breakpoint), so we should be reporting a stop event in
4276 this case too. */
4277
4278 /* If the thread already has a pending SIGSTOP, this is a
4279 no-op. Otherwise, something later will presumably resume
4280 the thread and this will cause it to cancel any pending
4281 operation, due to last_resume_kind == resume_stop. If
4282 the thread already has a pending status to report, we
4283 will still report it the next time we wait - see
4284 status_pending_p_callback. */
4285
4286 /* If we already have a pending signal to report, then
4287 there's no need to queue a SIGSTOP, as this means we're
4288 midway through moving the LWP out of the jumppad, and we
4289 will report the pending signal as soon as that is
4290 finished. */
4291 if (lwp->pending_signals_to_report == NULL)
4292 send_sigstop (lwp);
4293 }
4294
4295 /* For stop requests, we're done. */
4296 lwp->resume = NULL;
4297 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4298 return 0;
4299 }
4300
4301 /* If this thread which is about to be resumed has a pending status,
4302 then don't resume any threads - we can just report the pending
4303 status. Make sure to queue any signals that would otherwise be
4304 sent. In all-stop mode, we do this decision based on if *any*
4305 thread has a pending status. If there's a thread that needs the
4306 step-over-breakpoint dance, then don't resume any other thread
4307 but that particular one. */
4308 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4309
4310 if (!leave_pending)
4311 {
4312 if (debug_threads)
4313 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4314
4315 step = (lwp->resume->kind == resume_step);
4316 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4317 }
4318 else
4319 {
4320 if (debug_threads)
4321 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4322
4323 /* If we have a new signal, enqueue the signal. */
4324 if (lwp->resume->sig != 0)
4325 {
4326 struct pending_signals *p_sig;
4327 p_sig = xmalloc (sizeof (*p_sig));
4328 p_sig->prev = lwp->pending_signals;
4329 p_sig->signal = lwp->resume->sig;
4330 memset (&p_sig->info, 0, sizeof (siginfo_t));
4331
4332 /* If this is the same signal we were previously stopped by,
4333 make sure to queue its siginfo. We can ignore the return
4334 value of ptrace; if it fails, we'll skip
4335 PTRACE_SETSIGINFO. */
4336 if (WIFSTOPPED (lwp->last_status)
4337 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4338 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4339 &p_sig->info);
4340
4341 lwp->pending_signals = p_sig;
4342 }
4343 }
4344
4345 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4346 lwp->resume = NULL;
4347 return 0;
4348 }
4349
4350 static void
4351 linux_resume (struct thread_resume *resume_info, size_t n)
4352 {
4353 struct thread_resume_array array = { resume_info, n };
4354 struct thread_info *need_step_over = NULL;
4355 int any_pending;
4356 int leave_all_stopped;
4357
4358 if (debug_threads)
4359 {
4360 debug_enter ();
4361 debug_printf ("linux_resume:\n");
4362 }
4363
4364 find_inferior (&all_threads, linux_set_resume_request, &array);
4365
4366 /* If there is a thread which would otherwise be resumed, which has
4367 a pending status, then don't resume any threads - we can just
4368 report the pending status. Make sure to queue any signals that
4369 would otherwise be sent. In non-stop mode, we'll apply this
4370 logic to each thread individually. We consume all pending events
4371 before considering to start a step-over (in all-stop). */
4372 any_pending = 0;
4373 if (!non_stop)
4374 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4375
4376 /* If there is a thread which would otherwise be resumed, which is
4377 stopped at a breakpoint that needs stepping over, then don't
4378 resume any threads - have it step over the breakpoint with all
4379 other threads stopped, then resume all threads again. Make sure
4380 to queue any signals that would otherwise be delivered or
4381 queued. */
4382 if (!any_pending && supports_breakpoints ())
4383 need_step_over
4384 = (struct thread_info *) find_inferior (&all_threads,
4385 need_step_over_p, NULL);
4386
4387 leave_all_stopped = (need_step_over != NULL || any_pending);
4388
4389 if (debug_threads)
4390 {
4391 if (need_step_over != NULL)
4392 debug_printf ("Not resuming all, need step over\n");
4393 else if (any_pending)
4394 debug_printf ("Not resuming, all-stop and found "
4395 "an LWP with pending status\n");
4396 else
4397 debug_printf ("Resuming, no pending status or step over needed\n");
4398 }
4399
4400 /* Even if we're leaving threads stopped, queue all signals we'd
4401 otherwise deliver. */
4402 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4403
4404 if (need_step_over)
4405 start_step_over (get_thread_lwp (need_step_over));
4406
4407 if (debug_threads)
4408 {
4409 debug_printf ("linux_resume done\n");
4410 debug_exit ();
4411 }
4412 }
4413
4414 /* This function is called once per thread. We check the thread's
4415 last resume request, which will tell us whether to resume, step, or
4416 leave the thread stopped. Any signal the client requested to be
4417 delivered has already been enqueued at this point.
4418
4419 If any thread that GDB wants running is stopped at an internal
4420 breakpoint that needs stepping over, we start a step-over operation
4421 on that particular thread, and leave all others stopped. */
4422
4423 static int
4424 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4425 {
4426 struct thread_info *thread = (struct thread_info *) entry;
4427 struct lwp_info *lwp = get_thread_lwp (thread);
4428 int step;
4429
4430 if (lwp == except)
4431 return 0;
4432
4433 if (debug_threads)
4434 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4435
4436 if (!lwp->stopped)
4437 {
4438 if (debug_threads)
4439 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4440 return 0;
4441 }
4442
4443 if (thread->last_resume_kind == resume_stop
4444 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4445 {
4446 if (debug_threads)
4447 debug_printf (" client wants LWP to remain %ld stopped\n",
4448 lwpid_of (thread));
4449 return 0;
4450 }
4451
4452 if (lwp->status_pending_p)
4453 {
4454 if (debug_threads)
4455 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4456 lwpid_of (thread));
4457 return 0;
4458 }
4459
4460 gdb_assert (lwp->suspended >= 0);
4461
4462 if (lwp->suspended)
4463 {
4464 if (debug_threads)
4465 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4466 return 0;
4467 }
4468
4469 if (thread->last_resume_kind == resume_stop
4470 && lwp->pending_signals_to_report == NULL
4471 && lwp->collecting_fast_tracepoint == 0)
4472 {
4473 /* We haven't reported this LWP as stopped yet (otherwise, the
4474 last_status.kind check above would catch it, and we wouldn't
4475 reach here. This LWP may have been momentarily paused by a
4476 stop_all_lwps call while handling for example, another LWP's
4477 step-over. In that case, the pending expected SIGSTOP signal
4478 that was queued at vCont;t handling time will have already
4479 been consumed by wait_for_sigstop, and so we need to requeue
4480 another one here. Note that if the LWP already has a SIGSTOP
4481 pending, this is a no-op. */
4482
4483 if (debug_threads)
4484 debug_printf ("Client wants LWP %ld to stop. "
4485 "Making sure it has a SIGSTOP pending\n",
4486 lwpid_of (thread));
4487
4488 send_sigstop (lwp);
4489 }
4490
4491 step = thread->last_resume_kind == resume_step;
4492 linux_resume_one_lwp (lwp, step, 0, NULL);
4493 return 0;
4494 }
4495
4496 static int
4497 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4498 {
4499 struct thread_info *thread = (struct thread_info *) entry;
4500 struct lwp_info *lwp = get_thread_lwp (thread);
4501
4502 if (lwp == except)
4503 return 0;
4504
4505 lwp->suspended--;
4506 gdb_assert (lwp->suspended >= 0);
4507
4508 return proceed_one_lwp (entry, except);
4509 }
4510
4511 /* When we finish a step-over, set threads running again. If there's
4512 another thread that may need a step-over, now's the time to start
4513 it. Eventually, we'll move all threads past their breakpoints. */
4514
4515 static void
4516 proceed_all_lwps (void)
4517 {
4518 struct thread_info *need_step_over;
4519
4520 /* If there is a thread which would otherwise be resumed, which is
4521 stopped at a breakpoint that needs stepping over, then don't
4522 resume any threads - have it step over the breakpoint with all
4523 other threads stopped, then resume all threads again. */
4524
4525 if (supports_breakpoints ())
4526 {
4527 need_step_over
4528 = (struct thread_info *) find_inferior (&all_threads,
4529 need_step_over_p, NULL);
4530
4531 if (need_step_over != NULL)
4532 {
4533 if (debug_threads)
4534 debug_printf ("proceed_all_lwps: found "
4535 "thread %ld needing a step-over\n",
4536 lwpid_of (need_step_over));
4537
4538 start_step_over (get_thread_lwp (need_step_over));
4539 return;
4540 }
4541 }
4542
4543 if (debug_threads)
4544 debug_printf ("Proceeding, no step-over needed\n");
4545
4546 find_inferior (&all_threads, proceed_one_lwp, NULL);
4547 }
4548
4549 /* Stopped LWPs that the client wanted to be running, that don't have
4550 pending statuses, are set to run again, except for EXCEPT, if not
4551 NULL. This undoes a stop_all_lwps call. */
4552
4553 static void
4554 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4555 {
4556 if (debug_threads)
4557 {
4558 debug_enter ();
4559 if (except)
4560 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4561 lwpid_of (get_lwp_thread (except)));
4562 else
4563 debug_printf ("unstopping all lwps\n");
4564 }
4565
4566 if (unsuspend)
4567 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4568 else
4569 find_inferior (&all_threads, proceed_one_lwp, except);
4570
4571 if (debug_threads)
4572 {
4573 debug_printf ("unstop_all_lwps done\n");
4574 debug_exit ();
4575 }
4576 }
4577
4578
4579 #ifdef HAVE_LINUX_REGSETS
4580
4581 #define use_linux_regsets 1
4582
4583 /* Returns true if REGSET has been disabled. */
4584
4585 static int
4586 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4587 {
4588 return (info->disabled_regsets != NULL
4589 && info->disabled_regsets[regset - info->regsets]);
4590 }
4591
4592 /* Disable REGSET. */
4593
4594 static void
4595 disable_regset (struct regsets_info *info, struct regset_info *regset)
4596 {
4597 int dr_offset;
4598
4599 dr_offset = regset - info->regsets;
4600 if (info->disabled_regsets == NULL)
4601 info->disabled_regsets = xcalloc (1, info->num_regsets);
4602 info->disabled_regsets[dr_offset] = 1;
4603 }
4604
4605 static int
4606 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4607 struct regcache *regcache)
4608 {
4609 struct regset_info *regset;
4610 int saw_general_regs = 0;
4611 int pid;
4612 struct iovec iov;
4613
4614 pid = lwpid_of (current_thread);
4615 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4616 {
4617 void *buf, *data;
4618 int nt_type, res;
4619
4620 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4621 continue;
4622
4623 buf = xmalloc (regset->size);
4624
4625 nt_type = regset->nt_type;
4626 if (nt_type)
4627 {
4628 iov.iov_base = buf;
4629 iov.iov_len = regset->size;
4630 data = (void *) &iov;
4631 }
4632 else
4633 data = buf;
4634
4635 #ifndef __sparc__
4636 res = ptrace (regset->get_request, pid,
4637 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4638 #else
4639 res = ptrace (regset->get_request, pid, data, nt_type);
4640 #endif
4641 if (res < 0)
4642 {
4643 if (errno == EIO)
4644 {
4645 /* If we get EIO on a regset, do not try it again for
4646 this process mode. */
4647 disable_regset (regsets_info, regset);
4648 }
4649 else if (errno == ENODATA)
4650 {
4651 /* ENODATA may be returned if the regset is currently
4652 not "active". This can happen in normal operation,
4653 so suppress the warning in this case. */
4654 }
4655 else
4656 {
4657 char s[256];
4658 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4659 pid);
4660 perror (s);
4661 }
4662 }
4663 else
4664 {
4665 if (regset->type == GENERAL_REGS)
4666 saw_general_regs = 1;
4667 regset->store_function (regcache, buf);
4668 }
4669 free (buf);
4670 }
4671 if (saw_general_regs)
4672 return 0;
4673 else
4674 return 1;
4675 }
4676
4677 static int
4678 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4679 struct regcache *regcache)
4680 {
4681 struct regset_info *regset;
4682 int saw_general_regs = 0;
4683 int pid;
4684 struct iovec iov;
4685
4686 pid = lwpid_of (current_thread);
4687 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4688 {
4689 void *buf, *data;
4690 int nt_type, res;
4691
4692 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4693 || regset->fill_function == NULL)
4694 continue;
4695
4696 buf = xmalloc (regset->size);
4697
4698 /* First fill the buffer with the current register set contents,
4699 in case there are any items in the kernel's regset that are
4700 not in gdbserver's regcache. */
4701
4702 nt_type = regset->nt_type;
4703 if (nt_type)
4704 {
4705 iov.iov_base = buf;
4706 iov.iov_len = regset->size;
4707 data = (void *) &iov;
4708 }
4709 else
4710 data = buf;
4711
4712 #ifndef __sparc__
4713 res = ptrace (regset->get_request, pid,
4714 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4715 #else
4716 res = ptrace (regset->get_request, pid, data, nt_type);
4717 #endif
4718
4719 if (res == 0)
4720 {
4721 /* Then overlay our cached registers on that. */
4722 regset->fill_function (regcache, buf);
4723
4724 /* Only now do we write the register set. */
4725 #ifndef __sparc__
4726 res = ptrace (regset->set_request, pid,
4727 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4728 #else
4729 res = ptrace (regset->set_request, pid, data, nt_type);
4730 #endif
4731 }
4732
4733 if (res < 0)
4734 {
4735 if (errno == EIO)
4736 {
4737 /* If we get EIO on a regset, do not try it again for
4738 this process mode. */
4739 disable_regset (regsets_info, regset);
4740 }
4741 else if (errno == ESRCH)
4742 {
4743 /* At this point, ESRCH should mean the process is
4744 already gone, in which case we simply ignore attempts
4745 to change its registers. See also the related
4746 comment in linux_resume_one_lwp. */
4747 free (buf);
4748 return 0;
4749 }
4750 else
4751 {
4752 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4753 }
4754 }
4755 else if (regset->type == GENERAL_REGS)
4756 saw_general_regs = 1;
4757 free (buf);
4758 }
4759 if (saw_general_regs)
4760 return 0;
4761 else
4762 return 1;
4763 }
4764
4765 #else /* !HAVE_LINUX_REGSETS */
4766
4767 #define use_linux_regsets 0
4768 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4769 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4770
4771 #endif
4772
4773 /* Return 1 if register REGNO is supported by one of the regset ptrace
4774 calls or 0 if it has to be transferred individually. */
4775
4776 static int
4777 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4778 {
4779 unsigned char mask = 1 << (regno % 8);
4780 size_t index = regno / 8;
4781
4782 return (use_linux_regsets
4783 && (regs_info->regset_bitmap == NULL
4784 || (regs_info->regset_bitmap[index] & mask) != 0));
4785 }
4786
4787 #ifdef HAVE_LINUX_USRREGS
4788
4789 int
4790 register_addr (const struct usrregs_info *usrregs, int regnum)
4791 {
4792 int addr;
4793
4794 if (regnum < 0 || regnum >= usrregs->num_regs)
4795 error ("Invalid register number %d.", regnum);
4796
4797 addr = usrregs->regmap[regnum];
4798
4799 return addr;
4800 }
4801
4802 /* Fetch one register. */
4803 static void
4804 fetch_register (const struct usrregs_info *usrregs,
4805 struct regcache *regcache, int regno)
4806 {
4807 CORE_ADDR regaddr;
4808 int i, size;
4809 char *buf;
4810 int pid;
4811
4812 if (regno >= usrregs->num_regs)
4813 return;
4814 if ((*the_low_target.cannot_fetch_register) (regno))
4815 return;
4816
4817 regaddr = register_addr (usrregs, regno);
4818 if (regaddr == -1)
4819 return;
4820
4821 size = ((register_size (regcache->tdesc, regno)
4822 + sizeof (PTRACE_XFER_TYPE) - 1)
4823 & -sizeof (PTRACE_XFER_TYPE));
4824 buf = alloca (size);
4825
4826 pid = lwpid_of (current_thread);
4827 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4828 {
4829 errno = 0;
4830 *(PTRACE_XFER_TYPE *) (buf + i) =
4831 ptrace (PTRACE_PEEKUSER, pid,
4832 /* Coerce to a uintptr_t first to avoid potential gcc warning
4833 of coercing an 8 byte integer to a 4 byte pointer. */
4834 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4835 regaddr += sizeof (PTRACE_XFER_TYPE);
4836 if (errno != 0)
4837 error ("reading register %d: %s", regno, strerror (errno));
4838 }
4839
4840 if (the_low_target.supply_ptrace_register)
4841 the_low_target.supply_ptrace_register (regcache, regno, buf);
4842 else
4843 supply_register (regcache, regno, buf);
4844 }
4845
4846 /* Store one register. */
4847 static void
4848 store_register (const struct usrregs_info *usrregs,
4849 struct regcache *regcache, int regno)
4850 {
4851 CORE_ADDR regaddr;
4852 int i, size;
4853 char *buf;
4854 int pid;
4855
4856 if (regno >= usrregs->num_regs)
4857 return;
4858 if ((*the_low_target.cannot_store_register) (regno))
4859 return;
4860
4861 regaddr = register_addr (usrregs, regno);
4862 if (regaddr == -1)
4863 return;
4864
4865 size = ((register_size (regcache->tdesc, regno)
4866 + sizeof (PTRACE_XFER_TYPE) - 1)
4867 & -sizeof (PTRACE_XFER_TYPE));
4868 buf = alloca (size);
4869 memset (buf, 0, size);
4870
4871 if (the_low_target.collect_ptrace_register)
4872 the_low_target.collect_ptrace_register (regcache, regno, buf);
4873 else
4874 collect_register (regcache, regno, buf);
4875
4876 pid = lwpid_of (current_thread);
4877 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4878 {
4879 errno = 0;
4880 ptrace (PTRACE_POKEUSER, pid,
4881 /* Coerce to a uintptr_t first to avoid potential gcc warning
4882 about coercing an 8 byte integer to a 4 byte pointer. */
4883 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4884 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4885 if (errno != 0)
4886 {
4887 /* At this point, ESRCH should mean the process is
4888 already gone, in which case we simply ignore attempts
4889 to change its registers. See also the related
4890 comment in linux_resume_one_lwp. */
4891 if (errno == ESRCH)
4892 return;
4893
4894 if ((*the_low_target.cannot_store_register) (regno) == 0)
4895 error ("writing register %d: %s", regno, strerror (errno));
4896 }
4897 regaddr += sizeof (PTRACE_XFER_TYPE);
4898 }
4899 }
4900
4901 /* Fetch all registers, or just one, from the child process.
4902 If REGNO is -1, do this for all registers, skipping any that are
4903 assumed to have been retrieved by regsets_fetch_inferior_registers,
4904 unless ALL is non-zero.
4905 Otherwise, REGNO specifies which register (so we can save time). */
4906 static void
4907 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4908 struct regcache *regcache, int regno, int all)
4909 {
4910 struct usrregs_info *usr = regs_info->usrregs;
4911
4912 if (regno == -1)
4913 {
4914 for (regno = 0; regno < usr->num_regs; regno++)
4915 if (all || !linux_register_in_regsets (regs_info, regno))
4916 fetch_register (usr, regcache, regno);
4917 }
4918 else
4919 fetch_register (usr, regcache, regno);
4920 }
4921
4922 /* Store our register values back into the inferior.
4923 If REGNO is -1, do this for all registers, skipping any that are
4924 assumed to have been saved by regsets_store_inferior_registers,
4925 unless ALL is non-zero.
4926 Otherwise, REGNO specifies which register (so we can save time). */
4927 static void
4928 usr_store_inferior_registers (const struct regs_info *regs_info,
4929 struct regcache *regcache, int regno, int all)
4930 {
4931 struct usrregs_info *usr = regs_info->usrregs;
4932
4933 if (regno == -1)
4934 {
4935 for (regno = 0; regno < usr->num_regs; regno++)
4936 if (all || !linux_register_in_regsets (regs_info, regno))
4937 store_register (usr, regcache, regno);
4938 }
4939 else
4940 store_register (usr, regcache, regno);
4941 }
4942
4943 #else /* !HAVE_LINUX_USRREGS */
4944
4945 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4946 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4947
4948 #endif
4949
4950
4951 void
4952 linux_fetch_registers (struct regcache *regcache, int regno)
4953 {
4954 int use_regsets;
4955 int all = 0;
4956 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4957
4958 if (regno == -1)
4959 {
4960 if (the_low_target.fetch_register != NULL
4961 && regs_info->usrregs != NULL)
4962 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4963 (*the_low_target.fetch_register) (regcache, regno);
4964
4965 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4966 if (regs_info->usrregs != NULL)
4967 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4968 }
4969 else
4970 {
4971 if (the_low_target.fetch_register != NULL
4972 && (*the_low_target.fetch_register) (regcache, regno))
4973 return;
4974
4975 use_regsets = linux_register_in_regsets (regs_info, regno);
4976 if (use_regsets)
4977 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4978 regcache);
4979 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4980 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4981 }
4982 }
4983
4984 void
4985 linux_store_registers (struct regcache *regcache, int regno)
4986 {
4987 int use_regsets;
4988 int all = 0;
4989 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4990
4991 if (regno == -1)
4992 {
4993 all = regsets_store_inferior_registers (regs_info->regsets_info,
4994 regcache);
4995 if (regs_info->usrregs != NULL)
4996 usr_store_inferior_registers (regs_info, regcache, regno, all);
4997 }
4998 else
4999 {
5000 use_regsets = linux_register_in_regsets (regs_info, regno);
5001 if (use_regsets)
5002 all = regsets_store_inferior_registers (regs_info->regsets_info,
5003 regcache);
5004 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5005 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5006 }
5007 }
5008
5009
5010 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5011 to debugger memory starting at MYADDR. */
5012
5013 static int
5014 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5015 {
5016 int pid = lwpid_of (current_thread);
5017 register PTRACE_XFER_TYPE *buffer;
5018 register CORE_ADDR addr;
5019 register int count;
5020 char filename[64];
5021 register int i;
5022 int ret;
5023 int fd;
5024
5025 /* Try using /proc. Don't bother for one word. */
5026 if (len >= 3 * sizeof (long))
5027 {
5028 int bytes;
5029
5030 /* We could keep this file open and cache it - possibly one per
5031 thread. That requires some juggling, but is even faster. */
5032 sprintf (filename, "/proc/%d/mem", pid);
5033 fd = open (filename, O_RDONLY | O_LARGEFILE);
5034 if (fd == -1)
5035 goto no_proc;
5036
5037 /* If pread64 is available, use it. It's faster if the kernel
5038 supports it (only one syscall), and it's 64-bit safe even on
5039 32-bit platforms (for instance, SPARC debugging a SPARC64
5040 application). */
5041 #ifdef HAVE_PREAD64
5042 bytes = pread64 (fd, myaddr, len, memaddr);
5043 #else
5044 bytes = -1;
5045 if (lseek (fd, memaddr, SEEK_SET) != -1)
5046 bytes = read (fd, myaddr, len);
5047 #endif
5048
5049 close (fd);
5050 if (bytes == len)
5051 return 0;
5052
5053 /* Some data was read, we'll try to get the rest with ptrace. */
5054 if (bytes > 0)
5055 {
5056 memaddr += bytes;
5057 myaddr += bytes;
5058 len -= bytes;
5059 }
5060 }
5061
5062 no_proc:
5063 /* Round starting address down to longword boundary. */
5064 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5065 /* Round ending address up; get number of longwords that makes. */
5066 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5067 / sizeof (PTRACE_XFER_TYPE));
5068 /* Allocate buffer of that many longwords. */
5069 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5070
5071 /* Read all the longwords */
5072 errno = 0;
5073 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5074 {
5075 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5076 about coercing an 8 byte integer to a 4 byte pointer. */
5077 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5078 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5079 (PTRACE_TYPE_ARG4) 0);
5080 if (errno)
5081 break;
5082 }
5083 ret = errno;
5084
5085 /* Copy appropriate bytes out of the buffer. */
5086 if (i > 0)
5087 {
5088 i *= sizeof (PTRACE_XFER_TYPE);
5089 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5090 memcpy (myaddr,
5091 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5092 i < len ? i : len);
5093 }
5094
5095 return ret;
5096 }
5097
5098 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5099 memory at MEMADDR. On failure (cannot write to the inferior)
5100 returns the value of errno. Always succeeds if LEN is zero. */
5101
5102 static int
5103 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5104 {
5105 register int i;
5106 /* Round starting address down to longword boundary. */
5107 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5108 /* Round ending address up; get number of longwords that makes. */
5109 register int count
5110 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5111 / sizeof (PTRACE_XFER_TYPE);
5112
5113 /* Allocate buffer of that many longwords. */
5114 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5115 alloca (count * sizeof (PTRACE_XFER_TYPE));
5116
5117 int pid = lwpid_of (current_thread);
5118
5119 if (len == 0)
5120 {
5121 /* Zero length write always succeeds. */
5122 return 0;
5123 }
5124
5125 if (debug_threads)
5126 {
5127 /* Dump up to four bytes. */
5128 unsigned int val = * (unsigned int *) myaddr;
5129 if (len == 1)
5130 val = val & 0xff;
5131 else if (len == 2)
5132 val = val & 0xffff;
5133 else if (len == 3)
5134 val = val & 0xffffff;
5135 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5136 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5137 }
5138
5139 /* Fill start and end extra bytes of buffer with existing memory data. */
5140
5141 errno = 0;
5142 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5143 about coercing an 8 byte integer to a 4 byte pointer. */
5144 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5145 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5146 (PTRACE_TYPE_ARG4) 0);
5147 if (errno)
5148 return errno;
5149
5150 if (count > 1)
5151 {
5152 errno = 0;
5153 buffer[count - 1]
5154 = ptrace (PTRACE_PEEKTEXT, pid,
5155 /* Coerce to a uintptr_t first to avoid potential gcc warning
5156 about coercing an 8 byte integer to a 4 byte pointer. */
5157 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5158 * sizeof (PTRACE_XFER_TYPE)),
5159 (PTRACE_TYPE_ARG4) 0);
5160 if (errno)
5161 return errno;
5162 }
5163
5164 /* Copy data to be written over corresponding part of buffer. */
5165
5166 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5167 myaddr, len);
5168
5169 /* Write the entire buffer. */
5170
5171 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5172 {
5173 errno = 0;
5174 ptrace (PTRACE_POKETEXT, pid,
5175 /* Coerce to a uintptr_t first to avoid potential gcc warning
5176 about coercing an 8 byte integer to a 4 byte pointer. */
5177 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5178 (PTRACE_TYPE_ARG4) buffer[i]);
5179 if (errno)
5180 return errno;
5181 }
5182
5183 return 0;
5184 }
5185
5186 static void
5187 linux_look_up_symbols (void)
5188 {
5189 #ifdef USE_THREAD_DB
5190 struct process_info *proc = current_process ();
5191
5192 if (proc->priv->thread_db != NULL)
5193 return;
5194
5195 /* If the kernel supports tracing clones, then we don't need to
5196 use the magic thread event breakpoint to learn about
5197 threads. */
5198 thread_db_init (!linux_supports_traceclone ());
5199 #endif
5200 }
5201
5202 static void
5203 linux_request_interrupt (void)
5204 {
5205 extern unsigned long signal_pid;
5206
5207 /* Send a SIGINT to the process group. This acts just like the user
5208 typed a ^C on the controlling terminal. */
5209 kill (-signal_pid, SIGINT);
5210 }
5211
5212 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5213 to debugger memory starting at MYADDR. */
5214
5215 static int
5216 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5217 {
5218 char filename[PATH_MAX];
5219 int fd, n;
5220 int pid = lwpid_of (current_thread);
5221
5222 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5223
5224 fd = open (filename, O_RDONLY);
5225 if (fd < 0)
5226 return -1;
5227
5228 if (offset != (CORE_ADDR) 0
5229 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5230 n = -1;
5231 else
5232 n = read (fd, myaddr, len);
5233
5234 close (fd);
5235
5236 return n;
5237 }
5238
5239 /* These breakpoint and watchpoint related wrapper functions simply
5240 pass on the function call if the target has registered a
5241 corresponding function. */
5242
5243 static int
5244 linux_supports_z_point_type (char z_type)
5245 {
5246 return (the_low_target.supports_z_point_type != NULL
5247 && the_low_target.supports_z_point_type (z_type));
5248 }
5249
5250 static int
5251 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5252 int size, struct raw_breakpoint *bp)
5253 {
5254 if (type == raw_bkpt_type_sw)
5255 return insert_memory_breakpoint (bp);
5256 else if (the_low_target.insert_point != NULL)
5257 return the_low_target.insert_point (type, addr, size, bp);
5258 else
5259 /* Unsupported (see target.h). */
5260 return 1;
5261 }
5262
5263 static int
5264 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5265 int size, struct raw_breakpoint *bp)
5266 {
5267 if (type == raw_bkpt_type_sw)
5268 return remove_memory_breakpoint (bp);
5269 else if (the_low_target.remove_point != NULL)
5270 return the_low_target.remove_point (type, addr, size, bp);
5271 else
5272 /* Unsupported (see target.h). */
5273 return 1;
5274 }
5275
5276 /* Implement the to_stopped_by_sw_breakpoint target_ops
5277 method. */
5278
5279 static int
5280 linux_stopped_by_sw_breakpoint (void)
5281 {
5282 struct lwp_info *lwp = get_thread_lwp (current_thread);
5283
5284 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5285 }
5286
5287 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5288 method. */
5289
5290 static int
5291 linux_supports_stopped_by_sw_breakpoint (void)
5292 {
5293 return USE_SIGTRAP_SIGINFO;
5294 }
5295
5296 /* Implement the to_stopped_by_hw_breakpoint target_ops
5297 method. */
5298
5299 static int
5300 linux_stopped_by_hw_breakpoint (void)
5301 {
5302 struct lwp_info *lwp = get_thread_lwp (current_thread);
5303
5304 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5305 }
5306
5307 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5308 method. */
5309
5310 static int
5311 linux_supports_stopped_by_hw_breakpoint (void)
5312 {
5313 return USE_SIGTRAP_SIGINFO;
5314 }
5315
5316 /* Implement the supports_conditional_breakpoints target_ops
5317 method. */
5318
5319 static int
5320 linux_supports_conditional_breakpoints (void)
5321 {
5322 /* GDBserver needs to step over the breakpoint if the condition is
5323 false. GDBserver software single step is too simple, so disable
5324 conditional breakpoints if the target doesn't have hardware single
5325 step. */
5326 return can_hardware_single_step ();
5327 }
5328
5329 static int
5330 linux_stopped_by_watchpoint (void)
5331 {
5332 struct lwp_info *lwp = get_thread_lwp (current_thread);
5333
5334 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5335 }
5336
5337 static CORE_ADDR
5338 linux_stopped_data_address (void)
5339 {
5340 struct lwp_info *lwp = get_thread_lwp (current_thread);
5341
5342 return lwp->stopped_data_address;
5343 }
5344
5345 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5346 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5347 && defined(PT_TEXT_END_ADDR)
5348
5349 /* This is only used for targets that define PT_TEXT_ADDR,
5350 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5351 the target has different ways of acquiring this information, like
5352 loadmaps. */
5353
5354 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5355 to tell gdb about. */
5356
5357 static int
5358 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5359 {
5360 unsigned long text, text_end, data;
5361 int pid = lwpid_of (current_thread);
5362
5363 errno = 0;
5364
5365 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5366 (PTRACE_TYPE_ARG4) 0);
5367 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5368 (PTRACE_TYPE_ARG4) 0);
5369 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5370 (PTRACE_TYPE_ARG4) 0);
5371
5372 if (errno == 0)
5373 {
5374 /* Both text and data offsets produced at compile-time (and so
5375 used by gdb) are relative to the beginning of the program,
5376 with the data segment immediately following the text segment.
5377 However, the actual runtime layout in memory may put the data
5378 somewhere else, so when we send gdb a data base-address, we
5379 use the real data base address and subtract the compile-time
5380 data base-address from it (which is just the length of the
5381 text segment). BSS immediately follows data in both
5382 cases. */
5383 *text_p = text;
5384 *data_p = data - (text_end - text);
5385
5386 return 1;
5387 }
5388 return 0;
5389 }
5390 #endif
5391
5392 static int
5393 linux_qxfer_osdata (const char *annex,
5394 unsigned char *readbuf, unsigned const char *writebuf,
5395 CORE_ADDR offset, int len)
5396 {
5397 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5398 }
5399
5400 /* Convert a native/host siginfo object, into/from the siginfo in the
5401 layout of the inferiors' architecture. */
5402
5403 static void
5404 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5405 {
5406 int done = 0;
5407
5408 if (the_low_target.siginfo_fixup != NULL)
5409 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5410
5411 /* If there was no callback, or the callback didn't do anything,
5412 then just do a straight memcpy. */
5413 if (!done)
5414 {
5415 if (direction == 1)
5416 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5417 else
5418 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5419 }
5420 }
5421
5422 static int
5423 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5424 unsigned const char *writebuf, CORE_ADDR offset, int len)
5425 {
5426 int pid;
5427 siginfo_t siginfo;
5428 char inf_siginfo[sizeof (siginfo_t)];
5429
5430 if (current_thread == NULL)
5431 return -1;
5432
5433 pid = lwpid_of (current_thread);
5434
5435 if (debug_threads)
5436 debug_printf ("%s siginfo for lwp %d.\n",
5437 readbuf != NULL ? "Reading" : "Writing",
5438 pid);
5439
5440 if (offset >= sizeof (siginfo))
5441 return -1;
5442
5443 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5444 return -1;
5445
5446 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5447 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5448 inferior with a 64-bit GDBSERVER should look the same as debugging it
5449 with a 32-bit GDBSERVER, we need to convert it. */
5450 siginfo_fixup (&siginfo, inf_siginfo, 0);
5451
5452 if (offset + len > sizeof (siginfo))
5453 len = sizeof (siginfo) - offset;
5454
5455 if (readbuf != NULL)
5456 memcpy (readbuf, inf_siginfo + offset, len);
5457 else
5458 {
5459 memcpy (inf_siginfo + offset, writebuf, len);
5460
5461 /* Convert back to ptrace layout before flushing it out. */
5462 siginfo_fixup (&siginfo, inf_siginfo, 1);
5463
5464 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5465 return -1;
5466 }
5467
5468 return len;
5469 }
5470
5471 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5472 so we notice when children change state; as the handler for the
5473 sigsuspend in my_waitpid. */
5474
5475 static void
5476 sigchld_handler (int signo)
5477 {
5478 int old_errno = errno;
5479
5480 if (debug_threads)
5481 {
5482 do
5483 {
5484 /* fprintf is not async-signal-safe, so call write
5485 directly. */
5486 if (write (2, "sigchld_handler\n",
5487 sizeof ("sigchld_handler\n") - 1) < 0)
5488 break; /* just ignore */
5489 } while (0);
5490 }
5491
5492 if (target_is_async_p ())
5493 async_file_mark (); /* trigger a linux_wait */
5494
5495 errno = old_errno;
5496 }
5497
5498 static int
5499 linux_supports_non_stop (void)
5500 {
5501 return 1;
5502 }
5503
5504 static int
5505 linux_async (int enable)
5506 {
5507 int previous = target_is_async_p ();
5508
5509 if (debug_threads)
5510 debug_printf ("linux_async (%d), previous=%d\n",
5511 enable, previous);
5512
5513 if (previous != enable)
5514 {
5515 sigset_t mask;
5516 sigemptyset (&mask);
5517 sigaddset (&mask, SIGCHLD);
5518
5519 sigprocmask (SIG_BLOCK, &mask, NULL);
5520
5521 if (enable)
5522 {
5523 if (pipe (linux_event_pipe) == -1)
5524 {
5525 linux_event_pipe[0] = -1;
5526 linux_event_pipe[1] = -1;
5527 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5528
5529 warning ("creating event pipe failed.");
5530 return previous;
5531 }
5532
5533 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5534 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5535
5536 /* Register the event loop handler. */
5537 add_file_handler (linux_event_pipe[0],
5538 handle_target_event, NULL);
5539
5540 /* Always trigger a linux_wait. */
5541 async_file_mark ();
5542 }
5543 else
5544 {
5545 delete_file_handler (linux_event_pipe[0]);
5546
5547 close (linux_event_pipe[0]);
5548 close (linux_event_pipe[1]);
5549 linux_event_pipe[0] = -1;
5550 linux_event_pipe[1] = -1;
5551 }
5552
5553 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5554 }
5555
5556 return previous;
5557 }
5558
5559 static int
5560 linux_start_non_stop (int nonstop)
5561 {
5562 /* Register or unregister from event-loop accordingly. */
5563 linux_async (nonstop);
5564
5565 if (target_is_async_p () != (nonstop != 0))
5566 return -1;
5567
5568 return 0;
5569 }
5570
5571 static int
5572 linux_supports_multi_process (void)
5573 {
5574 return 1;
5575 }
5576
5577 /* Check if fork events are supported. */
5578
5579 static int
5580 linux_supports_fork_events (void)
5581 {
5582 return linux_supports_tracefork ();
5583 }
5584
5585 /* Check if vfork events are supported. */
5586
5587 static int
5588 linux_supports_vfork_events (void)
5589 {
5590 return linux_supports_tracefork ();
5591 }
5592
5593 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5594 options for the specified lwp. */
5595
5596 static int
5597 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5598 void *args)
5599 {
5600 struct thread_info *thread = (struct thread_info *) entry;
5601 struct lwp_info *lwp = get_thread_lwp (thread);
5602
5603 if (!lwp->stopped)
5604 {
5605 /* Stop the lwp so we can modify its ptrace options. */
5606 lwp->must_set_ptrace_flags = 1;
5607 linux_stop_lwp (lwp);
5608 }
5609 else
5610 {
5611 /* Already stopped; go ahead and set the ptrace options. */
5612 struct process_info *proc = find_process_pid (pid_of (thread));
5613 int options = linux_low_ptrace_options (proc->attached);
5614
5615 linux_enable_event_reporting (lwpid_of (thread), options);
5616 lwp->must_set_ptrace_flags = 0;
5617 }
5618
5619 return 0;
5620 }
5621
5622 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5623 ptrace flags for all inferiors. This is in case the new GDB connection
5624 doesn't support the same set of events that the previous one did. */
5625
5626 static void
5627 linux_handle_new_gdb_connection (void)
5628 {
5629 pid_t pid;
5630
5631 /* Request that all the lwps reset their ptrace options. */
5632 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5633 }
5634
5635 static int
5636 linux_supports_disable_randomization (void)
5637 {
5638 #ifdef HAVE_PERSONALITY
5639 return 1;
5640 #else
5641 return 0;
5642 #endif
5643 }
5644
5645 static int
5646 linux_supports_agent (void)
5647 {
5648 return 1;
5649 }
5650
5651 static int
5652 linux_supports_range_stepping (void)
5653 {
5654 if (*the_low_target.supports_range_stepping == NULL)
5655 return 0;
5656
5657 return (*the_low_target.supports_range_stepping) ();
5658 }
5659
5660 /* Enumerate spufs IDs for process PID. */
5661 static int
5662 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5663 {
5664 int pos = 0;
5665 int written = 0;
5666 char path[128];
5667 DIR *dir;
5668 struct dirent *entry;
5669
5670 sprintf (path, "/proc/%ld/fd", pid);
5671 dir = opendir (path);
5672 if (!dir)
5673 return -1;
5674
5675 rewinddir (dir);
5676 while ((entry = readdir (dir)) != NULL)
5677 {
5678 struct stat st;
5679 struct statfs stfs;
5680 int fd;
5681
5682 fd = atoi (entry->d_name);
5683 if (!fd)
5684 continue;
5685
5686 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5687 if (stat (path, &st) != 0)
5688 continue;
5689 if (!S_ISDIR (st.st_mode))
5690 continue;
5691
5692 if (statfs (path, &stfs) != 0)
5693 continue;
5694 if (stfs.f_type != SPUFS_MAGIC)
5695 continue;
5696
5697 if (pos >= offset && pos + 4 <= offset + len)
5698 {
5699 *(unsigned int *)(buf + pos - offset) = fd;
5700 written += 4;
5701 }
5702 pos += 4;
5703 }
5704
5705 closedir (dir);
5706 return written;
5707 }
5708
5709 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5710 object type, using the /proc file system. */
5711 static int
5712 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5713 unsigned const char *writebuf,
5714 CORE_ADDR offset, int len)
5715 {
5716 long pid = lwpid_of (current_thread);
5717 char buf[128];
5718 int fd = 0;
5719 int ret = 0;
5720
5721 if (!writebuf && !readbuf)
5722 return -1;
5723
5724 if (!*annex)
5725 {
5726 if (!readbuf)
5727 return -1;
5728 else
5729 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5730 }
5731
5732 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5733 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5734 if (fd <= 0)
5735 return -1;
5736
5737 if (offset != 0
5738 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5739 {
5740 close (fd);
5741 return 0;
5742 }
5743
5744 if (writebuf)
5745 ret = write (fd, writebuf, (size_t) len);
5746 else
5747 ret = read (fd, readbuf, (size_t) len);
5748
5749 close (fd);
5750 return ret;
5751 }
5752
5753 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5754 struct target_loadseg
5755 {
5756 /* Core address to which the segment is mapped. */
5757 Elf32_Addr addr;
5758 /* VMA recorded in the program header. */
5759 Elf32_Addr p_vaddr;
5760 /* Size of this segment in memory. */
5761 Elf32_Word p_memsz;
5762 };
5763
5764 # if defined PT_GETDSBT
5765 struct target_loadmap
5766 {
5767 /* Protocol version number, must be zero. */
5768 Elf32_Word version;
5769 /* Pointer to the DSBT table, its size, and the DSBT index. */
5770 unsigned *dsbt_table;
5771 unsigned dsbt_size, dsbt_index;
5772 /* Number of segments in this map. */
5773 Elf32_Word nsegs;
5774 /* The actual memory map. */
5775 struct target_loadseg segs[/*nsegs*/];
5776 };
5777 # define LINUX_LOADMAP PT_GETDSBT
5778 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5779 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5780 # else
5781 struct target_loadmap
5782 {
5783 /* Protocol version number, must be zero. */
5784 Elf32_Half version;
5785 /* Number of segments in this map. */
5786 Elf32_Half nsegs;
5787 /* The actual memory map. */
5788 struct target_loadseg segs[/*nsegs*/];
5789 };
5790 # define LINUX_LOADMAP PTRACE_GETFDPIC
5791 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5792 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5793 # endif
5794
5795 static int
5796 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5797 unsigned char *myaddr, unsigned int len)
5798 {
5799 int pid = lwpid_of (current_thread);
5800 int addr = -1;
5801 struct target_loadmap *data = NULL;
5802 unsigned int actual_length, copy_length;
5803
5804 if (strcmp (annex, "exec") == 0)
5805 addr = (int) LINUX_LOADMAP_EXEC;
5806 else if (strcmp (annex, "interp") == 0)
5807 addr = (int) LINUX_LOADMAP_INTERP;
5808 else
5809 return -1;
5810
5811 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5812 return -1;
5813
5814 if (data == NULL)
5815 return -1;
5816
5817 actual_length = sizeof (struct target_loadmap)
5818 + sizeof (struct target_loadseg) * data->nsegs;
5819
5820 if (offset < 0 || offset > actual_length)
5821 return -1;
5822
5823 copy_length = actual_length - offset < len ? actual_length - offset : len;
5824 memcpy (myaddr, (char *) data + offset, copy_length);
5825 return copy_length;
5826 }
5827 #else
5828 # define linux_read_loadmap NULL
5829 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5830
5831 static void
5832 linux_process_qsupported (const char *query)
5833 {
5834 if (the_low_target.process_qsupported != NULL)
5835 the_low_target.process_qsupported (query);
5836 }
5837
5838 static int
5839 linux_supports_tracepoints (void)
5840 {
5841 if (*the_low_target.supports_tracepoints == NULL)
5842 return 0;
5843
5844 return (*the_low_target.supports_tracepoints) ();
5845 }
5846
5847 static CORE_ADDR
5848 linux_read_pc (struct regcache *regcache)
5849 {
5850 if (the_low_target.get_pc == NULL)
5851 return 0;
5852
5853 return (*the_low_target.get_pc) (regcache);
5854 }
5855
5856 static void
5857 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5858 {
5859 gdb_assert (the_low_target.set_pc != NULL);
5860
5861 (*the_low_target.set_pc) (regcache, pc);
5862 }
5863
5864 static int
5865 linux_thread_stopped (struct thread_info *thread)
5866 {
5867 return get_thread_lwp (thread)->stopped;
5868 }
5869
5870 /* This exposes stop-all-threads functionality to other modules. */
5871
5872 static void
5873 linux_pause_all (int freeze)
5874 {
5875 stop_all_lwps (freeze, NULL);
5876 }
5877
5878 /* This exposes unstop-all-threads functionality to other gdbserver
5879 modules. */
5880
5881 static void
5882 linux_unpause_all (int unfreeze)
5883 {
5884 unstop_all_lwps (unfreeze, NULL);
5885 }
5886
5887 static int
5888 linux_prepare_to_access_memory (void)
5889 {
5890 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5891 running LWP. */
5892 if (non_stop)
5893 linux_pause_all (1);
5894 return 0;
5895 }
5896
5897 static void
5898 linux_done_accessing_memory (void)
5899 {
5900 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5901 running LWP. */
5902 if (non_stop)
5903 linux_unpause_all (1);
5904 }
5905
5906 static int
5907 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5908 CORE_ADDR collector,
5909 CORE_ADDR lockaddr,
5910 ULONGEST orig_size,
5911 CORE_ADDR *jump_entry,
5912 CORE_ADDR *trampoline,
5913 ULONGEST *trampoline_size,
5914 unsigned char *jjump_pad_insn,
5915 ULONGEST *jjump_pad_insn_size,
5916 CORE_ADDR *adjusted_insn_addr,
5917 CORE_ADDR *adjusted_insn_addr_end,
5918 char *err)
5919 {
5920 return (*the_low_target.install_fast_tracepoint_jump_pad)
5921 (tpoint, tpaddr, collector, lockaddr, orig_size,
5922 jump_entry, trampoline, trampoline_size,
5923 jjump_pad_insn, jjump_pad_insn_size,
5924 adjusted_insn_addr, adjusted_insn_addr_end,
5925 err);
5926 }
5927
5928 static struct emit_ops *
5929 linux_emit_ops (void)
5930 {
5931 if (the_low_target.emit_ops != NULL)
5932 return (*the_low_target.emit_ops) ();
5933 else
5934 return NULL;
5935 }
5936
5937 static int
5938 linux_get_min_fast_tracepoint_insn_len (void)
5939 {
5940 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5941 }
5942
5943 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5944
5945 static int
5946 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5947 CORE_ADDR *phdr_memaddr, int *num_phdr)
5948 {
5949 char filename[PATH_MAX];
5950 int fd;
5951 const int auxv_size = is_elf64
5952 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5953 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5954
5955 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5956
5957 fd = open (filename, O_RDONLY);
5958 if (fd < 0)
5959 return 1;
5960
5961 *phdr_memaddr = 0;
5962 *num_phdr = 0;
5963 while (read (fd, buf, auxv_size) == auxv_size
5964 && (*phdr_memaddr == 0 || *num_phdr == 0))
5965 {
5966 if (is_elf64)
5967 {
5968 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5969
5970 switch (aux->a_type)
5971 {
5972 case AT_PHDR:
5973 *phdr_memaddr = aux->a_un.a_val;
5974 break;
5975 case AT_PHNUM:
5976 *num_phdr = aux->a_un.a_val;
5977 break;
5978 }
5979 }
5980 else
5981 {
5982 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5983
5984 switch (aux->a_type)
5985 {
5986 case AT_PHDR:
5987 *phdr_memaddr = aux->a_un.a_val;
5988 break;
5989 case AT_PHNUM:
5990 *num_phdr = aux->a_un.a_val;
5991 break;
5992 }
5993 }
5994 }
5995
5996 close (fd);
5997
5998 if (*phdr_memaddr == 0 || *num_phdr == 0)
5999 {
6000 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6001 "phdr_memaddr = %ld, phdr_num = %d",
6002 (long) *phdr_memaddr, *num_phdr);
6003 return 2;
6004 }
6005
6006 return 0;
6007 }
6008
6009 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6010
6011 static CORE_ADDR
6012 get_dynamic (const int pid, const int is_elf64)
6013 {
6014 CORE_ADDR phdr_memaddr, relocation;
6015 int num_phdr, i;
6016 unsigned char *phdr_buf;
6017 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6018
6019 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6020 return 0;
6021
6022 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6023 phdr_buf = alloca (num_phdr * phdr_size);
6024
6025 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6026 return 0;
6027
6028 /* Compute relocation: it is expected to be 0 for "regular" executables,
6029 non-zero for PIE ones. */
6030 relocation = -1;
6031 for (i = 0; relocation == -1 && i < num_phdr; i++)
6032 if (is_elf64)
6033 {
6034 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6035
6036 if (p->p_type == PT_PHDR)
6037 relocation = phdr_memaddr - p->p_vaddr;
6038 }
6039 else
6040 {
6041 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6042
6043 if (p->p_type == PT_PHDR)
6044 relocation = phdr_memaddr - p->p_vaddr;
6045 }
6046
6047 if (relocation == -1)
6048 {
6049 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6050 any real world executables, including PIE executables, have always
6051 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6052 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6053 or present DT_DEBUG anyway (fpc binaries are statically linked).
6054
6055 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6056
6057 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6058
6059 return 0;
6060 }
6061
6062 for (i = 0; i < num_phdr; i++)
6063 {
6064 if (is_elf64)
6065 {
6066 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6067
6068 if (p->p_type == PT_DYNAMIC)
6069 return p->p_vaddr + relocation;
6070 }
6071 else
6072 {
6073 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6074
6075 if (p->p_type == PT_DYNAMIC)
6076 return p->p_vaddr + relocation;
6077 }
6078 }
6079
6080 return 0;
6081 }
6082
6083 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6084 can be 0 if the inferior does not yet have the library list initialized.
6085 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6086 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6087
6088 static CORE_ADDR
6089 get_r_debug (const int pid, const int is_elf64)
6090 {
6091 CORE_ADDR dynamic_memaddr;
6092 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6093 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6094 CORE_ADDR map = -1;
6095
6096 dynamic_memaddr = get_dynamic (pid, is_elf64);
6097 if (dynamic_memaddr == 0)
6098 return map;
6099
6100 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6101 {
6102 if (is_elf64)
6103 {
6104 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6105 #ifdef DT_MIPS_RLD_MAP
6106 union
6107 {
6108 Elf64_Xword map;
6109 unsigned char buf[sizeof (Elf64_Xword)];
6110 }
6111 rld_map;
6112
6113 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6114 {
6115 if (linux_read_memory (dyn->d_un.d_val,
6116 rld_map.buf, sizeof (rld_map.buf)) == 0)
6117 return rld_map.map;
6118 else
6119 break;
6120 }
6121 #endif /* DT_MIPS_RLD_MAP */
6122
6123 if (dyn->d_tag == DT_DEBUG && map == -1)
6124 map = dyn->d_un.d_val;
6125
6126 if (dyn->d_tag == DT_NULL)
6127 break;
6128 }
6129 else
6130 {
6131 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6132 #ifdef DT_MIPS_RLD_MAP
6133 union
6134 {
6135 Elf32_Word map;
6136 unsigned char buf[sizeof (Elf32_Word)];
6137 }
6138 rld_map;
6139
6140 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6141 {
6142 if (linux_read_memory (dyn->d_un.d_val,
6143 rld_map.buf, sizeof (rld_map.buf)) == 0)
6144 return rld_map.map;
6145 else
6146 break;
6147 }
6148 #endif /* DT_MIPS_RLD_MAP */
6149
6150 if (dyn->d_tag == DT_DEBUG && map == -1)
6151 map = dyn->d_un.d_val;
6152
6153 if (dyn->d_tag == DT_NULL)
6154 break;
6155 }
6156
6157 dynamic_memaddr += dyn_size;
6158 }
6159
6160 return map;
6161 }
6162
6163 /* Read one pointer from MEMADDR in the inferior. */
6164
6165 static int
6166 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6167 {
6168 int ret;
6169
6170 /* Go through a union so this works on either big or little endian
6171 hosts, when the inferior's pointer size is smaller than the size
6172 of CORE_ADDR. It is assumed the inferior's endianness is the
6173 same of the superior's. */
6174 union
6175 {
6176 CORE_ADDR core_addr;
6177 unsigned int ui;
6178 unsigned char uc;
6179 } addr;
6180
6181 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6182 if (ret == 0)
6183 {
6184 if (ptr_size == sizeof (CORE_ADDR))
6185 *ptr = addr.core_addr;
6186 else if (ptr_size == sizeof (unsigned int))
6187 *ptr = addr.ui;
6188 else
6189 gdb_assert_not_reached ("unhandled pointer size");
6190 }
6191 return ret;
6192 }
6193
6194 struct link_map_offsets
6195 {
6196 /* Offset and size of r_debug.r_version. */
6197 int r_version_offset;
6198
6199 /* Offset and size of r_debug.r_map. */
6200 int r_map_offset;
6201
6202 /* Offset to l_addr field in struct link_map. */
6203 int l_addr_offset;
6204
6205 /* Offset to l_name field in struct link_map. */
6206 int l_name_offset;
6207
6208 /* Offset to l_ld field in struct link_map. */
6209 int l_ld_offset;
6210
6211 /* Offset to l_next field in struct link_map. */
6212 int l_next_offset;
6213
6214 /* Offset to l_prev field in struct link_map. */
6215 int l_prev_offset;
6216 };
6217
6218 /* Construct qXfer:libraries-svr4:read reply. */
6219
6220 static int
6221 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6222 unsigned const char *writebuf,
6223 CORE_ADDR offset, int len)
6224 {
6225 char *document;
6226 unsigned document_len;
6227 struct process_info_private *const priv = current_process ()->priv;
6228 char filename[PATH_MAX];
6229 int pid, is_elf64;
6230
6231 static const struct link_map_offsets lmo_32bit_offsets =
6232 {
6233 0, /* r_version offset. */
6234 4, /* r_debug.r_map offset. */
6235 0, /* l_addr offset in link_map. */
6236 4, /* l_name offset in link_map. */
6237 8, /* l_ld offset in link_map. */
6238 12, /* l_next offset in link_map. */
6239 16 /* l_prev offset in link_map. */
6240 };
6241
6242 static const struct link_map_offsets lmo_64bit_offsets =
6243 {
6244 0, /* r_version offset. */
6245 8, /* r_debug.r_map offset. */
6246 0, /* l_addr offset in link_map. */
6247 8, /* l_name offset in link_map. */
6248 16, /* l_ld offset in link_map. */
6249 24, /* l_next offset in link_map. */
6250 32 /* l_prev offset in link_map. */
6251 };
6252 const struct link_map_offsets *lmo;
6253 unsigned int machine;
6254 int ptr_size;
6255 CORE_ADDR lm_addr = 0, lm_prev = 0;
6256 int allocated = 1024;
6257 char *p;
6258 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6259 int header_done = 0;
6260
6261 if (writebuf != NULL)
6262 return -2;
6263 if (readbuf == NULL)
6264 return -1;
6265
6266 pid = lwpid_of (current_thread);
6267 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6268 is_elf64 = elf_64_file_p (filename, &machine);
6269 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6270 ptr_size = is_elf64 ? 8 : 4;
6271
6272 while (annex[0] != '\0')
6273 {
6274 const char *sep;
6275 CORE_ADDR *addrp;
6276 int len;
6277
6278 sep = strchr (annex, '=');
6279 if (sep == NULL)
6280 break;
6281
6282 len = sep - annex;
6283 if (len == 5 && startswith (annex, "start"))
6284 addrp = &lm_addr;
6285 else if (len == 4 && startswith (annex, "prev"))
6286 addrp = &lm_prev;
6287 else
6288 {
6289 annex = strchr (sep, ';');
6290 if (annex == NULL)
6291 break;
6292 annex++;
6293 continue;
6294 }
6295
6296 annex = decode_address_to_semicolon (addrp, sep + 1);
6297 }
6298
6299 if (lm_addr == 0)
6300 {
6301 int r_version = 0;
6302
6303 if (priv->r_debug == 0)
6304 priv->r_debug = get_r_debug (pid, is_elf64);
6305
6306 /* We failed to find DT_DEBUG. Such situation will not change
6307 for this inferior - do not retry it. Report it to GDB as
6308 E01, see for the reasons at the GDB solib-svr4.c side. */
6309 if (priv->r_debug == (CORE_ADDR) -1)
6310 return -1;
6311
6312 if (priv->r_debug != 0)
6313 {
6314 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6315 (unsigned char *) &r_version,
6316 sizeof (r_version)) != 0
6317 || r_version != 1)
6318 {
6319 warning ("unexpected r_debug version %d", r_version);
6320 }
6321 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6322 &lm_addr, ptr_size) != 0)
6323 {
6324 warning ("unable to read r_map from 0x%lx",
6325 (long) priv->r_debug + lmo->r_map_offset);
6326 }
6327 }
6328 }
6329
6330 document = xmalloc (allocated);
6331 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6332 p = document + strlen (document);
6333
6334 while (lm_addr
6335 && read_one_ptr (lm_addr + lmo->l_name_offset,
6336 &l_name, ptr_size) == 0
6337 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6338 &l_addr, ptr_size) == 0
6339 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6340 &l_ld, ptr_size) == 0
6341 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6342 &l_prev, ptr_size) == 0
6343 && read_one_ptr (lm_addr + lmo->l_next_offset,
6344 &l_next, ptr_size) == 0)
6345 {
6346 unsigned char libname[PATH_MAX];
6347
6348 if (lm_prev != l_prev)
6349 {
6350 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6351 (long) lm_prev, (long) l_prev);
6352 break;
6353 }
6354
6355 /* Ignore the first entry even if it has valid name as the first entry
6356 corresponds to the main executable. The first entry should not be
6357 skipped if the dynamic loader was loaded late by a static executable
6358 (see solib-svr4.c parameter ignore_first). But in such case the main
6359 executable does not have PT_DYNAMIC present and this function already
6360 exited above due to failed get_r_debug. */
6361 if (lm_prev == 0)
6362 {
6363 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6364 p = p + strlen (p);
6365 }
6366 else
6367 {
6368 /* Not checking for error because reading may stop before
6369 we've got PATH_MAX worth of characters. */
6370 libname[0] = '\0';
6371 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6372 libname[sizeof (libname) - 1] = '\0';
6373 if (libname[0] != '\0')
6374 {
6375 /* 6x the size for xml_escape_text below. */
6376 size_t len = 6 * strlen ((char *) libname);
6377 char *name;
6378
6379 if (!header_done)
6380 {
6381 /* Terminate `<library-list-svr4'. */
6382 *p++ = '>';
6383 header_done = 1;
6384 }
6385
6386 while (allocated < p - document + len + 200)
6387 {
6388 /* Expand to guarantee sufficient storage. */
6389 uintptr_t document_len = p - document;
6390
6391 document = xrealloc (document, 2 * allocated);
6392 allocated *= 2;
6393 p = document + document_len;
6394 }
6395
6396 name = xml_escape_text ((char *) libname);
6397 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6398 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6399 name, (unsigned long) lm_addr,
6400 (unsigned long) l_addr, (unsigned long) l_ld);
6401 free (name);
6402 }
6403 }
6404
6405 lm_prev = lm_addr;
6406 lm_addr = l_next;
6407 }
6408
6409 if (!header_done)
6410 {
6411 /* Empty list; terminate `<library-list-svr4'. */
6412 strcpy (p, "/>");
6413 }
6414 else
6415 strcpy (p, "</library-list-svr4>");
6416
6417 document_len = strlen (document);
6418 if (offset < document_len)
6419 document_len -= offset;
6420 else
6421 document_len = 0;
6422 if (len > document_len)
6423 len = document_len;
6424
6425 memcpy (readbuf, document + offset, len);
6426 xfree (document);
6427
6428 return len;
6429 }
6430
6431 #ifdef HAVE_LINUX_BTRACE
6432
6433 /* See to_enable_btrace target method. */
6434
6435 static struct btrace_target_info *
6436 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6437 {
6438 struct btrace_target_info *tinfo;
6439
6440 tinfo = linux_enable_btrace (ptid, conf);
6441
6442 if (tinfo != NULL && tinfo->ptr_bits == 0)
6443 {
6444 struct thread_info *thread = find_thread_ptid (ptid);
6445 struct regcache *regcache = get_thread_regcache (thread, 0);
6446
6447 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6448 }
6449
6450 return tinfo;
6451 }
6452
6453 /* See to_disable_btrace target method. */
6454
6455 static int
6456 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6457 {
6458 enum btrace_error err;
6459
6460 err = linux_disable_btrace (tinfo);
6461 return (err == BTRACE_ERR_NONE ? 0 : -1);
6462 }
6463
6464 /* See to_read_btrace target method. */
6465
6466 static int
6467 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6468 int type)
6469 {
6470 struct btrace_data btrace;
6471 struct btrace_block *block;
6472 enum btrace_error err;
6473 int i;
6474
6475 btrace_data_init (&btrace);
6476
6477 err = linux_read_btrace (&btrace, tinfo, type);
6478 if (err != BTRACE_ERR_NONE)
6479 {
6480 if (err == BTRACE_ERR_OVERFLOW)
6481 buffer_grow_str0 (buffer, "E.Overflow.");
6482 else
6483 buffer_grow_str0 (buffer, "E.Generic Error.");
6484
6485 btrace_data_fini (&btrace);
6486 return -1;
6487 }
6488
6489 switch (btrace.format)
6490 {
6491 case BTRACE_FORMAT_NONE:
6492 buffer_grow_str0 (buffer, "E.No Trace.");
6493 break;
6494
6495 case BTRACE_FORMAT_BTS:
6496 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6497 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6498
6499 for (i = 0;
6500 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6501 i++)
6502 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6503 paddress (block->begin), paddress (block->end));
6504
6505 buffer_grow_str0 (buffer, "</btrace>\n");
6506 break;
6507
6508 default:
6509 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6510
6511 btrace_data_fini (&btrace);
6512 return -1;
6513 }
6514
6515 btrace_data_fini (&btrace);
6516 return 0;
6517 }
6518
6519 /* See to_btrace_conf target method. */
6520
6521 static int
6522 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6523 struct buffer *buffer)
6524 {
6525 const struct btrace_config *conf;
6526
6527 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6528 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6529
6530 conf = linux_btrace_conf (tinfo);
6531 if (conf != NULL)
6532 {
6533 switch (conf->format)
6534 {
6535 case BTRACE_FORMAT_NONE:
6536 break;
6537
6538 case BTRACE_FORMAT_BTS:
6539 buffer_xml_printf (buffer, "<bts");
6540 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6541 buffer_xml_printf (buffer, " />\n");
6542 break;
6543 }
6544 }
6545
6546 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6547 return 0;
6548 }
6549 #endif /* HAVE_LINUX_BTRACE */
6550
6551 /* See nat/linux-nat.h. */
6552
6553 ptid_t
6554 current_lwp_ptid (void)
6555 {
6556 return ptid_of (current_thread);
6557 }
6558
6559 static struct target_ops linux_target_ops = {
6560 linux_create_inferior,
6561 linux_attach,
6562 linux_kill,
6563 linux_detach,
6564 linux_mourn,
6565 linux_join,
6566 linux_thread_alive,
6567 linux_resume,
6568 linux_wait,
6569 linux_fetch_registers,
6570 linux_store_registers,
6571 linux_prepare_to_access_memory,
6572 linux_done_accessing_memory,
6573 linux_read_memory,
6574 linux_write_memory,
6575 linux_look_up_symbols,
6576 linux_request_interrupt,
6577 linux_read_auxv,
6578 linux_supports_z_point_type,
6579 linux_insert_point,
6580 linux_remove_point,
6581 linux_stopped_by_sw_breakpoint,
6582 linux_supports_stopped_by_sw_breakpoint,
6583 linux_stopped_by_hw_breakpoint,
6584 linux_supports_stopped_by_hw_breakpoint,
6585 linux_supports_conditional_breakpoints,
6586 linux_stopped_by_watchpoint,
6587 linux_stopped_data_address,
6588 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6589 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6590 && defined(PT_TEXT_END_ADDR)
6591 linux_read_offsets,
6592 #else
6593 NULL,
6594 #endif
6595 #ifdef USE_THREAD_DB
6596 thread_db_get_tls_address,
6597 #else
6598 NULL,
6599 #endif
6600 linux_qxfer_spu,
6601 hostio_last_error_from_errno,
6602 linux_qxfer_osdata,
6603 linux_xfer_siginfo,
6604 linux_supports_non_stop,
6605 linux_async,
6606 linux_start_non_stop,
6607 linux_supports_multi_process,
6608 linux_supports_fork_events,
6609 linux_supports_vfork_events,
6610 linux_handle_new_gdb_connection,
6611 #ifdef USE_THREAD_DB
6612 thread_db_handle_monitor_command,
6613 #else
6614 NULL,
6615 #endif
6616 linux_common_core_of_thread,
6617 linux_read_loadmap,
6618 linux_process_qsupported,
6619 linux_supports_tracepoints,
6620 linux_read_pc,
6621 linux_write_pc,
6622 linux_thread_stopped,
6623 NULL,
6624 linux_pause_all,
6625 linux_unpause_all,
6626 linux_stabilize_threads,
6627 linux_install_fast_tracepoint_jump_pad,
6628 linux_emit_ops,
6629 linux_supports_disable_randomization,
6630 linux_get_min_fast_tracepoint_insn_len,
6631 linux_qxfer_libraries_svr4,
6632 linux_supports_agent,
6633 #ifdef HAVE_LINUX_BTRACE
6634 linux_supports_btrace,
6635 linux_low_enable_btrace,
6636 linux_low_disable_btrace,
6637 linux_low_read_btrace,
6638 linux_low_btrace_conf,
6639 #else
6640 NULL,
6641 NULL,
6642 NULL,
6643 NULL,
6644 NULL,
6645 #endif
6646 linux_supports_range_stepping,
6647 linux_proc_pid_to_exec_file,
6648 linux_mntns_open_cloexec,
6649 linux_mntns_unlink,
6650 linux_mntns_readlink,
6651 };
6652
6653 static void
6654 linux_init_signals ()
6655 {
6656 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6657 to find what the cancel signal actually is. */
6658 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6659 signal (__SIGRTMIN+1, SIG_IGN);
6660 #endif
6661 }
6662
6663 #ifdef HAVE_LINUX_REGSETS
6664 void
6665 initialize_regsets_info (struct regsets_info *info)
6666 {
6667 for (info->num_regsets = 0;
6668 info->regsets[info->num_regsets].size >= 0;
6669 info->num_regsets++)
6670 ;
6671 }
6672 #endif
6673
6674 void
6675 initialize_low (void)
6676 {
6677 struct sigaction sigchld_action;
6678 memset (&sigchld_action, 0, sizeof (sigchld_action));
6679 set_target_ops (&linux_target_ops);
6680 set_breakpoint_data (the_low_target.breakpoint,
6681 the_low_target.breakpoint_len);
6682 linux_init_signals ();
6683 linux_ptrace_init_warnings ();
6684
6685 sigchld_action.sa_handler = sigchld_handler;
6686 sigemptyset (&sigchld_action.sa_mask);
6687 sigchld_action.sa_flags = SA_RESTART;
6688 sigaction (SIGCHLD, &sigchld_action, NULL);
6689
6690 initialize_low_arch ();
6691
6692 linux_check_ptrace_features ();
6693 }
This page took 0.246634 seconds and 4 git commands to generate.