btrace: support Intel(R) Processor Trace
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include <sys/ptrace.h>
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76
77 /* This is the kernel's hard limit. Not to be confused with
78 SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82
83 /* Some targets did not define these ptrace constants from the start,
84 so gdbserver defines them locally here. In the future, these may
85 be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87 || defined(PT_DATA_ADDR) \
88 || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116 uint32_t a_type; /* Entry type */
117 union
118 {
119 uint32_t a_val; /* Integer value */
120 /* We use to have pointer elements added here. We cannot do that,
121 though, since it does not work when using 32-bit definitions
122 on 64-bit platforms and vice versa. */
123 } a_un;
124 } Elf32_auxv_t;
125 #endif
126
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131 uint64_t a_type; /* Entry type */
132 union
133 {
134 uint64_t a_val; /* Integer value */
135 /* We use to have pointer elements added here. We cannot do that,
136 though, since it does not work when using 32-bit definitions
137 on 64-bit platforms and vice versa. */
138 } a_un;
139 } Elf64_auxv_t;
140 #endif
141
142 /* LWP accessors. */
143
144 /* See nat/linux-nat.h. */
145
146 ptid_t
147 ptid_of_lwp (struct lwp_info *lwp)
148 {
149 return ptid_of (get_lwp_thread (lwp));
150 }
151
152 /* See nat/linux-nat.h. */
153
154 void
155 lwp_set_arch_private_info (struct lwp_info *lwp,
156 struct arch_lwp_info *info)
157 {
158 lwp->arch_private = info;
159 }
160
161 /* See nat/linux-nat.h. */
162
163 struct arch_lwp_info *
164 lwp_arch_private_info (struct lwp_info *lwp)
165 {
166 return lwp->arch_private;
167 }
168
169 /* See nat/linux-nat.h. */
170
171 int
172 lwp_is_stopped (struct lwp_info *lwp)
173 {
174 return lwp->stopped;
175 }
176
177 /* See nat/linux-nat.h. */
178
179 enum target_stop_reason
180 lwp_stop_reason (struct lwp_info *lwp)
181 {
182 return lwp->stop_reason;
183 }
184
185 /* A list of all unknown processes which receive stop signals. Some
186 other process will presumably claim each of these as forked
187 children momentarily. */
188
189 struct simple_pid_list
190 {
191 /* The process ID. */
192 int pid;
193
194 /* The status as reported by waitpid. */
195 int status;
196
197 /* Next in chain. */
198 struct simple_pid_list *next;
199 };
200 struct simple_pid_list *stopped_pids;
201
202 /* Trivial list manipulation functions to keep track of a list of new
203 stopped processes. */
204
205 static void
206 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
207 {
208 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
209
210 new_pid->pid = pid;
211 new_pid->status = status;
212 new_pid->next = *listp;
213 *listp = new_pid;
214 }
215
216 static int
217 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
218 {
219 struct simple_pid_list **p;
220
221 for (p = listp; *p != NULL; p = &(*p)->next)
222 if ((*p)->pid == pid)
223 {
224 struct simple_pid_list *next = (*p)->next;
225
226 *statusp = (*p)->status;
227 xfree (*p);
228 *p = next;
229 return 1;
230 }
231 return 0;
232 }
233
234 enum stopping_threads_kind
235 {
236 /* Not stopping threads presently. */
237 NOT_STOPPING_THREADS,
238
239 /* Stopping threads. */
240 STOPPING_THREADS,
241
242 /* Stopping and suspending threads. */
243 STOPPING_AND_SUSPENDING_THREADS
244 };
245
246 /* This is set while stop_all_lwps is in effect. */
247 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
248
249 /* FIXME make into a target method? */
250 int using_threads = 1;
251
252 /* True if we're presently stabilizing threads (moving them out of
253 jump pads). */
254 static int stabilizing_threads;
255
256 static void linux_resume_one_lwp (struct lwp_info *lwp,
257 int step, int signal, siginfo_t *info);
258 static void linux_resume (struct thread_resume *resume_info, size_t n);
259 static void stop_all_lwps (int suspend, struct lwp_info *except);
260 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
261 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
262 int *wstat, int options);
263 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
264 static struct lwp_info *add_lwp (ptid_t ptid);
265 static int linux_stopped_by_watchpoint (void);
266 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
267 static void proceed_all_lwps (void);
268 static int finish_step_over (struct lwp_info *lwp);
269 static int kill_lwp (unsigned long lwpid, int signo);
270
271 /* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273 ptid_t step_over_bkpt;
274
275 /* True if the low target can hardware single-step. Such targets
276 don't need a BREAKPOINT_REINSERT_ADDR callback. */
277
278 static int
279 can_hardware_single_step (void)
280 {
281 return (the_low_target.breakpoint_reinsert_addr == NULL);
282 }
283
284 /* True if the low target supports memory breakpoints. If so, we'll
285 have a GET_PC implementation. */
286
287 static int
288 supports_breakpoints (void)
289 {
290 return (the_low_target.get_pc != NULL);
291 }
292
293 /* Returns true if this target can support fast tracepoints. This
294 does not mean that the in-process agent has been loaded in the
295 inferior. */
296
297 static int
298 supports_fast_tracepoints (void)
299 {
300 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
301 }
302
303 /* True if LWP is stopped in its stepping range. */
304
305 static int
306 lwp_in_step_range (struct lwp_info *lwp)
307 {
308 CORE_ADDR pc = lwp->stop_pc;
309
310 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
311 }
312
313 struct pending_signals
314 {
315 int signal;
316 siginfo_t info;
317 struct pending_signals *prev;
318 };
319
320 /* The read/write ends of the pipe registered as waitable file in the
321 event loop. */
322 static int linux_event_pipe[2] = { -1, -1 };
323
324 /* True if we're currently in async mode. */
325 #define target_is_async_p() (linux_event_pipe[0] != -1)
326
327 static void send_sigstop (struct lwp_info *lwp);
328 static void wait_for_sigstop (void);
329
330 /* Return non-zero if HEADER is a 64-bit ELF file. */
331
332 static int
333 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334 {
335 if (header->e_ident[EI_MAG0] == ELFMAG0
336 && header->e_ident[EI_MAG1] == ELFMAG1
337 && header->e_ident[EI_MAG2] == ELFMAG2
338 && header->e_ident[EI_MAG3] == ELFMAG3)
339 {
340 *machine = header->e_machine;
341 return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343 }
344 *machine = EM_NONE;
345 return -1;
346 }
347
348 /* Return non-zero if FILE is a 64-bit ELF file,
349 zero if the file is not a 64-bit ELF file,
350 and -1 if the file is not accessible or doesn't exist. */
351
352 static int
353 elf_64_file_p (const char *file, unsigned int *machine)
354 {
355 Elf64_Ehdr header;
356 int fd;
357
358 fd = open (file, O_RDONLY);
359 if (fd < 0)
360 return -1;
361
362 if (read (fd, &header, sizeof (header)) != sizeof (header))
363 {
364 close (fd);
365 return 0;
366 }
367 close (fd);
368
369 return elf_64_header_p (&header, machine);
370 }
371
372 /* Accepts an integer PID; Returns true if the executable PID is
373 running is a 64-bit ELF file.. */
374
375 int
376 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 {
378 char file[PATH_MAX];
379
380 sprintf (file, "/proc/%d/exe", pid);
381 return elf_64_file_p (file, machine);
382 }
383
384 static void
385 delete_lwp (struct lwp_info *lwp)
386 {
387 struct thread_info *thr = get_lwp_thread (lwp);
388
389 if (debug_threads)
390 debug_printf ("deleting %ld\n", lwpid_of (thr));
391
392 remove_thread (thr);
393 free (lwp->arch_private);
394 free (lwp);
395 }
396
397 /* Add a process to the common process list, and set its private
398 data. */
399
400 static struct process_info *
401 linux_add_process (int pid, int attached)
402 {
403 struct process_info *proc;
404
405 proc = add_process (pid, attached);
406 proc->priv = xcalloc (1, sizeof (*proc->priv));
407
408 /* Set the arch when the first LWP stops. */
409 proc->priv->new_inferior = 1;
410
411 if (the_low_target.new_process != NULL)
412 proc->priv->arch_private = the_low_target.new_process ();
413
414 return proc;
415 }
416
417 static CORE_ADDR get_pc (struct lwp_info *lwp);
418
419 /* Handle a GNU/Linux extended wait response. If we see a clone
420 event, we need to add the new LWP to our list (and return 0 so as
421 not to report the trap to higher layers). */
422
423 static int
424 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
425 {
426 int event = linux_ptrace_get_extended_event (wstat);
427 struct thread_info *event_thr = get_lwp_thread (event_lwp);
428 struct lwp_info *new_lwp;
429
430 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
431 || (event == PTRACE_EVENT_CLONE))
432 {
433 ptid_t ptid;
434 unsigned long new_pid;
435 int ret, status;
436
437 /* Get the pid of the new lwp. */
438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
439 &new_pid);
440
441 /* If we haven't already seen the new PID stop, wait for it now. */
442 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
443 {
444 /* The new child has a pending SIGSTOP. We can't affect it until it
445 hits the SIGSTOP, but we're already attached. */
446
447 ret = my_waitpid (new_pid, &status, __WALL);
448
449 if (ret == -1)
450 perror_with_name ("waiting for new child");
451 else if (ret != new_pid)
452 warning ("wait returned unexpected PID %d", ret);
453 else if (!WIFSTOPPED (status))
454 warning ("wait returned unexpected status 0x%x", status);
455 }
456
457 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
458 {
459 struct process_info *parent_proc;
460 struct process_info *child_proc;
461 struct lwp_info *child_lwp;
462 struct thread_info *child_thr;
463 struct target_desc *tdesc;
464
465 ptid = ptid_build (new_pid, new_pid, 0);
466
467 if (debug_threads)
468 {
469 debug_printf ("HEW: Got fork event from LWP %ld, "
470 "new child is %d\n",
471 ptid_get_lwp (ptid_of (event_thr)),
472 ptid_get_pid (ptid));
473 }
474
475 /* Add the new process to the tables and clone the breakpoint
476 lists of the parent. We need to do this even if the new process
477 will be detached, since we will need the process object and the
478 breakpoints to remove any breakpoints from memory when we
479 detach, and the client side will access registers. */
480 child_proc = linux_add_process (new_pid, 0);
481 gdb_assert (child_proc != NULL);
482 child_lwp = add_lwp (ptid);
483 gdb_assert (child_lwp != NULL);
484 child_lwp->stopped = 1;
485 child_lwp->must_set_ptrace_flags = 1;
486 child_lwp->status_pending_p = 0;
487 child_thr = get_lwp_thread (child_lwp);
488 child_thr->last_resume_kind = resume_stop;
489 parent_proc = get_thread_process (event_thr);
490 child_proc->attached = parent_proc->attached;
491 clone_all_breakpoints (&child_proc->breakpoints,
492 &child_proc->raw_breakpoints,
493 parent_proc->breakpoints);
494
495 tdesc = xmalloc (sizeof (struct target_desc));
496 copy_target_description (tdesc, parent_proc->tdesc);
497 child_proc->tdesc = tdesc;
498
499 /* Clone arch-specific process data. */
500 if (the_low_target.new_fork != NULL)
501 the_low_target.new_fork (parent_proc, child_proc);
502
503 /* Save fork info in the parent thread. */
504 if (event == PTRACE_EVENT_FORK)
505 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
506 else if (event == PTRACE_EVENT_VFORK)
507 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
508
509 event_lwp->waitstatus.value.related_pid = ptid;
510
511 /* The status_pending field contains bits denoting the
512 extended event, so when the pending event is handled,
513 the handler will look at lwp->waitstatus. */
514 event_lwp->status_pending_p = 1;
515 event_lwp->status_pending = wstat;
516
517 /* Report the event. */
518 return 0;
519 }
520
521 if (debug_threads)
522 debug_printf ("HEW: Got clone event "
523 "from LWP %ld, new child is LWP %ld\n",
524 lwpid_of (event_thr), new_pid);
525
526 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
527 new_lwp = add_lwp (ptid);
528
529 /* Either we're going to immediately resume the new thread
530 or leave it stopped. linux_resume_one_lwp is a nop if it
531 thinks the thread is currently running, so set this first
532 before calling linux_resume_one_lwp. */
533 new_lwp->stopped = 1;
534
535 /* If we're suspending all threads, leave this one suspended
536 too. */
537 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
538 new_lwp->suspended = 1;
539
540 /* Normally we will get the pending SIGSTOP. But in some cases
541 we might get another signal delivered to the group first.
542 If we do get another signal, be sure not to lose it. */
543 if (WSTOPSIG (status) != SIGSTOP)
544 {
545 new_lwp->stop_expected = 1;
546 new_lwp->status_pending_p = 1;
547 new_lwp->status_pending = status;
548 }
549
550 /* Don't report the event. */
551 return 1;
552 }
553 else if (event == PTRACE_EVENT_VFORK_DONE)
554 {
555 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
556
557 /* Report the event. */
558 return 0;
559 }
560
561 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
562 }
563
564 /* Return the PC as read from the regcache of LWP, without any
565 adjustment. */
566
567 static CORE_ADDR
568 get_pc (struct lwp_info *lwp)
569 {
570 struct thread_info *saved_thread;
571 struct regcache *regcache;
572 CORE_ADDR pc;
573
574 if (the_low_target.get_pc == NULL)
575 return 0;
576
577 saved_thread = current_thread;
578 current_thread = get_lwp_thread (lwp);
579
580 regcache = get_thread_regcache (current_thread, 1);
581 pc = (*the_low_target.get_pc) (regcache);
582
583 if (debug_threads)
584 debug_printf ("pc is 0x%lx\n", (long) pc);
585
586 current_thread = saved_thread;
587 return pc;
588 }
589
590 /* This function should only be called if LWP got a SIGTRAP.
591 The SIGTRAP could mean several things.
592
593 On i386, where decr_pc_after_break is non-zero:
594
595 If we were single-stepping this process using PTRACE_SINGLESTEP, we
596 will get only the one SIGTRAP. The value of $eip will be the next
597 instruction. If the instruction we stepped over was a breakpoint,
598 we need to decrement the PC.
599
600 If we continue the process using PTRACE_CONT, we will get a
601 SIGTRAP when we hit a breakpoint. The value of $eip will be
602 the instruction after the breakpoint (i.e. needs to be
603 decremented). If we report the SIGTRAP to GDB, we must also
604 report the undecremented PC. If the breakpoint is removed, we
605 must resume at the decremented PC.
606
607 On a non-decr_pc_after_break machine with hardware or kernel
608 single-step:
609
610 If we either single-step a breakpoint instruction, or continue and
611 hit a breakpoint instruction, our PC will point at the breakpoint
612 instruction. */
613
614 static int
615 check_stopped_by_breakpoint (struct lwp_info *lwp)
616 {
617 CORE_ADDR pc;
618 CORE_ADDR sw_breakpoint_pc;
619 struct thread_info *saved_thread;
620 #if USE_SIGTRAP_SIGINFO
621 siginfo_t siginfo;
622 #endif
623
624 if (the_low_target.get_pc == NULL)
625 return 0;
626
627 pc = get_pc (lwp);
628 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
629
630 /* breakpoint_at reads from the current thread. */
631 saved_thread = current_thread;
632 current_thread = get_lwp_thread (lwp);
633
634 #if USE_SIGTRAP_SIGINFO
635 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
636 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
637 {
638 if (siginfo.si_signo == SIGTRAP)
639 {
640 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
641 {
642 if (debug_threads)
643 {
644 struct thread_info *thr = get_lwp_thread (lwp);
645
646 debug_printf ("CSBB: %s stopped by software breakpoint\n",
647 target_pid_to_str (ptid_of (thr)));
648 }
649
650 /* Back up the PC if necessary. */
651 if (pc != sw_breakpoint_pc)
652 {
653 struct regcache *regcache
654 = get_thread_regcache (current_thread, 1);
655 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
656 }
657
658 lwp->stop_pc = sw_breakpoint_pc;
659 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
660 current_thread = saved_thread;
661 return 1;
662 }
663 else if (siginfo.si_code == TRAP_HWBKPT)
664 {
665 if (debug_threads)
666 {
667 struct thread_info *thr = get_lwp_thread (lwp);
668
669 debug_printf ("CSBB: %s stopped by hardware "
670 "breakpoint/watchpoint\n",
671 target_pid_to_str (ptid_of (thr)));
672 }
673
674 lwp->stop_pc = pc;
675 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
676 current_thread = saved_thread;
677 return 1;
678 }
679 else if (siginfo.si_code == TRAP_TRACE)
680 {
681 if (debug_threads)
682 {
683 struct thread_info *thr = get_lwp_thread (lwp);
684
685 debug_printf ("CSBB: %s stopped by trace\n",
686 target_pid_to_str (ptid_of (thr)));
687 }
688 }
689 }
690 }
691 #else
692 /* We may have just stepped a breakpoint instruction. E.g., in
693 non-stop mode, GDB first tells the thread A to step a range, and
694 then the user inserts a breakpoint inside the range. In that
695 case we need to report the breakpoint PC. */
696 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
697 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
698 {
699 if (debug_threads)
700 {
701 struct thread_info *thr = get_lwp_thread (lwp);
702
703 debug_printf ("CSBB: %s stopped by software breakpoint\n",
704 target_pid_to_str (ptid_of (thr)));
705 }
706
707 /* Back up the PC if necessary. */
708 if (pc != sw_breakpoint_pc)
709 {
710 struct regcache *regcache
711 = get_thread_regcache (current_thread, 1);
712 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
713 }
714
715 lwp->stop_pc = sw_breakpoint_pc;
716 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
717 current_thread = saved_thread;
718 return 1;
719 }
720
721 if (hardware_breakpoint_inserted_here (pc))
722 {
723 if (debug_threads)
724 {
725 struct thread_info *thr = get_lwp_thread (lwp);
726
727 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
728 target_pid_to_str (ptid_of (thr)));
729 }
730
731 lwp->stop_pc = pc;
732 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
733 current_thread = saved_thread;
734 return 1;
735 }
736 #endif
737
738 current_thread = saved_thread;
739 return 0;
740 }
741
742 static struct lwp_info *
743 add_lwp (ptid_t ptid)
744 {
745 struct lwp_info *lwp;
746
747 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
748 memset (lwp, 0, sizeof (*lwp));
749
750 if (the_low_target.new_thread != NULL)
751 the_low_target.new_thread (lwp);
752
753 lwp->thread = add_thread (ptid, lwp);
754
755 return lwp;
756 }
757
758 /* Start an inferior process and returns its pid.
759 ALLARGS is a vector of program-name and args. */
760
761 static int
762 linux_create_inferior (char *program, char **allargs)
763 {
764 struct lwp_info *new_lwp;
765 int pid;
766 ptid_t ptid;
767 struct cleanup *restore_personality
768 = maybe_disable_address_space_randomization (disable_randomization);
769
770 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
771 pid = vfork ();
772 #else
773 pid = fork ();
774 #endif
775 if (pid < 0)
776 perror_with_name ("fork");
777
778 if (pid == 0)
779 {
780 close_most_fds ();
781 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
782
783 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
784 signal (__SIGRTMIN + 1, SIG_DFL);
785 #endif
786
787 setpgid (0, 0);
788
789 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
790 stdout to stderr so that inferior i/o doesn't corrupt the connection.
791 Also, redirect stdin to /dev/null. */
792 if (remote_connection_is_stdio ())
793 {
794 close (0);
795 open ("/dev/null", O_RDONLY);
796 dup2 (2, 1);
797 if (write (2, "stdin/stdout redirected\n",
798 sizeof ("stdin/stdout redirected\n") - 1) < 0)
799 {
800 /* Errors ignored. */;
801 }
802 }
803
804 execv (program, allargs);
805 if (errno == ENOENT)
806 execvp (program, allargs);
807
808 fprintf (stderr, "Cannot exec %s: %s.\n", program,
809 strerror (errno));
810 fflush (stderr);
811 _exit (0177);
812 }
813
814 do_cleanups (restore_personality);
815
816 linux_add_process (pid, 0);
817
818 ptid = ptid_build (pid, pid, 0);
819 new_lwp = add_lwp (ptid);
820 new_lwp->must_set_ptrace_flags = 1;
821
822 return pid;
823 }
824
825 /* Attach to an inferior process. Returns 0 on success, ERRNO on
826 error. */
827
828 int
829 linux_attach_lwp (ptid_t ptid)
830 {
831 struct lwp_info *new_lwp;
832 int lwpid = ptid_get_lwp (ptid);
833
834 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
835 != 0)
836 return errno;
837
838 new_lwp = add_lwp (ptid);
839
840 /* We need to wait for SIGSTOP before being able to make the next
841 ptrace call on this LWP. */
842 new_lwp->must_set_ptrace_flags = 1;
843
844 if (linux_proc_pid_is_stopped (lwpid))
845 {
846 if (debug_threads)
847 debug_printf ("Attached to a stopped process\n");
848
849 /* The process is definitely stopped. It is in a job control
850 stop, unless the kernel predates the TASK_STOPPED /
851 TASK_TRACED distinction, in which case it might be in a
852 ptrace stop. Make sure it is in a ptrace stop; from there we
853 can kill it, signal it, et cetera.
854
855 First make sure there is a pending SIGSTOP. Since we are
856 already attached, the process can not transition from stopped
857 to running without a PTRACE_CONT; so we know this signal will
858 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
859 probably already in the queue (unless this kernel is old
860 enough to use TASK_STOPPED for ptrace stops); but since
861 SIGSTOP is not an RT signal, it can only be queued once. */
862 kill_lwp (lwpid, SIGSTOP);
863
864 /* Finally, resume the stopped process. This will deliver the
865 SIGSTOP (or a higher priority signal, just like normal
866 PTRACE_ATTACH), which we'll catch later on. */
867 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
868 }
869
870 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
871 brings it to a halt.
872
873 There are several cases to consider here:
874
875 1) gdbserver has already attached to the process and is being notified
876 of a new thread that is being created.
877 In this case we should ignore that SIGSTOP and resume the
878 process. This is handled below by setting stop_expected = 1,
879 and the fact that add_thread sets last_resume_kind ==
880 resume_continue.
881
882 2) This is the first thread (the process thread), and we're attaching
883 to it via attach_inferior.
884 In this case we want the process thread to stop.
885 This is handled by having linux_attach set last_resume_kind ==
886 resume_stop after we return.
887
888 If the pid we are attaching to is also the tgid, we attach to and
889 stop all the existing threads. Otherwise, we attach to pid and
890 ignore any other threads in the same group as this pid.
891
892 3) GDB is connecting to gdbserver and is requesting an enumeration of all
893 existing threads.
894 In this case we want the thread to stop.
895 FIXME: This case is currently not properly handled.
896 We should wait for the SIGSTOP but don't. Things work apparently
897 because enough time passes between when we ptrace (ATTACH) and when
898 gdb makes the next ptrace call on the thread.
899
900 On the other hand, if we are currently trying to stop all threads, we
901 should treat the new thread as if we had sent it a SIGSTOP. This works
902 because we are guaranteed that the add_lwp call above added us to the
903 end of the list, and so the new thread has not yet reached
904 wait_for_sigstop (but will). */
905 new_lwp->stop_expected = 1;
906
907 return 0;
908 }
909
910 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
911 already attached. Returns true if a new LWP is found, false
912 otherwise. */
913
914 static int
915 attach_proc_task_lwp_callback (ptid_t ptid)
916 {
917 /* Is this a new thread? */
918 if (find_thread_ptid (ptid) == NULL)
919 {
920 int lwpid = ptid_get_lwp (ptid);
921 int err;
922
923 if (debug_threads)
924 debug_printf ("Found new lwp %d\n", lwpid);
925
926 err = linux_attach_lwp (ptid);
927
928 /* Be quiet if we simply raced with the thread exiting. EPERM
929 is returned if the thread's task still exists, and is marked
930 as exited or zombie, as well as other conditions, so in that
931 case, confirm the status in /proc/PID/status. */
932 if (err == ESRCH
933 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
934 {
935 if (debug_threads)
936 {
937 debug_printf ("Cannot attach to lwp %d: "
938 "thread is gone (%d: %s)\n",
939 lwpid, err, strerror (err));
940 }
941 }
942 else if (err != 0)
943 {
944 warning (_("Cannot attach to lwp %d: %s"),
945 lwpid,
946 linux_ptrace_attach_fail_reason_string (ptid, err));
947 }
948
949 return 1;
950 }
951 return 0;
952 }
953
954 /* Attach to PID. If PID is the tgid, attach to it and all
955 of its threads. */
956
957 static int
958 linux_attach (unsigned long pid)
959 {
960 ptid_t ptid = ptid_build (pid, pid, 0);
961 int err;
962
963 /* Attach to PID. We will check for other threads
964 soon. */
965 err = linux_attach_lwp (ptid);
966 if (err != 0)
967 error ("Cannot attach to process %ld: %s",
968 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
969
970 linux_add_process (pid, 1);
971
972 if (!non_stop)
973 {
974 struct thread_info *thread;
975
976 /* Don't ignore the initial SIGSTOP if we just attached to this
977 process. It will be collected by wait shortly. */
978 thread = find_thread_ptid (ptid_build (pid, pid, 0));
979 thread->last_resume_kind = resume_stop;
980 }
981
982 /* We must attach to every LWP. If /proc is mounted, use that to
983 find them now. On the one hand, the inferior may be using raw
984 clone instead of using pthreads. On the other hand, even if it
985 is using pthreads, GDB may not be connected yet (thread_db needs
986 to do symbol lookups, through qSymbol). Also, thread_db walks
987 structures in the inferior's address space to find the list of
988 threads/LWPs, and those structures may well be corrupted. Note
989 that once thread_db is loaded, we'll still use it to list threads
990 and associate pthread info with each LWP. */
991 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
992 return 0;
993 }
994
995 struct counter
996 {
997 int pid;
998 int count;
999 };
1000
1001 static int
1002 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1003 {
1004 struct counter *counter = args;
1005
1006 if (ptid_get_pid (entry->id) == counter->pid)
1007 {
1008 if (++counter->count > 1)
1009 return 1;
1010 }
1011
1012 return 0;
1013 }
1014
1015 static int
1016 last_thread_of_process_p (int pid)
1017 {
1018 struct counter counter = { pid , 0 };
1019
1020 return (find_inferior (&all_threads,
1021 second_thread_of_pid_p, &counter) == NULL);
1022 }
1023
1024 /* Kill LWP. */
1025
1026 static void
1027 linux_kill_one_lwp (struct lwp_info *lwp)
1028 {
1029 struct thread_info *thr = get_lwp_thread (lwp);
1030 int pid = lwpid_of (thr);
1031
1032 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1033 there is no signal context, and ptrace(PTRACE_KILL) (or
1034 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1035 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1036 alternative is to kill with SIGKILL. We only need one SIGKILL
1037 per process, not one for each thread. But since we still support
1038 linuxthreads, and we also support debugging programs using raw
1039 clone without CLONE_THREAD, we send one for each thread. For
1040 years, we used PTRACE_KILL only, so we're being a bit paranoid
1041 about some old kernels where PTRACE_KILL might work better
1042 (dubious if there are any such, but that's why it's paranoia), so
1043 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1044 everywhere. */
1045
1046 errno = 0;
1047 kill_lwp (pid, SIGKILL);
1048 if (debug_threads)
1049 {
1050 int save_errno = errno;
1051
1052 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1053 target_pid_to_str (ptid_of (thr)),
1054 save_errno ? strerror (save_errno) : "OK");
1055 }
1056
1057 errno = 0;
1058 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1059 if (debug_threads)
1060 {
1061 int save_errno = errno;
1062
1063 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1064 target_pid_to_str (ptid_of (thr)),
1065 save_errno ? strerror (save_errno) : "OK");
1066 }
1067 }
1068
1069 /* Kill LWP and wait for it to die. */
1070
1071 static void
1072 kill_wait_lwp (struct lwp_info *lwp)
1073 {
1074 struct thread_info *thr = get_lwp_thread (lwp);
1075 int pid = ptid_get_pid (ptid_of (thr));
1076 int lwpid = ptid_get_lwp (ptid_of (thr));
1077 int wstat;
1078 int res;
1079
1080 if (debug_threads)
1081 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1082
1083 do
1084 {
1085 linux_kill_one_lwp (lwp);
1086
1087 /* Make sure it died. Notes:
1088
1089 - The loop is most likely unnecessary.
1090
1091 - We don't use linux_wait_for_event as that could delete lwps
1092 while we're iterating over them. We're not interested in
1093 any pending status at this point, only in making sure all
1094 wait status on the kernel side are collected until the
1095 process is reaped.
1096
1097 - We don't use __WALL here as the __WALL emulation relies on
1098 SIGCHLD, and killing a stopped process doesn't generate
1099 one, nor an exit status.
1100 */
1101 res = my_waitpid (lwpid, &wstat, 0);
1102 if (res == -1 && errno == ECHILD)
1103 res = my_waitpid (lwpid, &wstat, __WCLONE);
1104 } while (res > 0 && WIFSTOPPED (wstat));
1105
1106 gdb_assert (res > 0);
1107 }
1108
1109 /* Callback for `find_inferior'. Kills an lwp of a given process,
1110 except the leader. */
1111
1112 static int
1113 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1114 {
1115 struct thread_info *thread = (struct thread_info *) entry;
1116 struct lwp_info *lwp = get_thread_lwp (thread);
1117 int pid = * (int *) args;
1118
1119 if (ptid_get_pid (entry->id) != pid)
1120 return 0;
1121
1122 /* We avoid killing the first thread here, because of a Linux kernel (at
1123 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1124 the children get a chance to be reaped, it will remain a zombie
1125 forever. */
1126
1127 if (lwpid_of (thread) == pid)
1128 {
1129 if (debug_threads)
1130 debug_printf ("lkop: is last of process %s\n",
1131 target_pid_to_str (entry->id));
1132 return 0;
1133 }
1134
1135 kill_wait_lwp (lwp);
1136 return 0;
1137 }
1138
1139 static int
1140 linux_kill (int pid)
1141 {
1142 struct process_info *process;
1143 struct lwp_info *lwp;
1144
1145 process = find_process_pid (pid);
1146 if (process == NULL)
1147 return -1;
1148
1149 /* If we're killing a running inferior, make sure it is stopped
1150 first, as PTRACE_KILL will not work otherwise. */
1151 stop_all_lwps (0, NULL);
1152
1153 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1154
1155 /* See the comment in linux_kill_one_lwp. We did not kill the first
1156 thread in the list, so do so now. */
1157 lwp = find_lwp_pid (pid_to_ptid (pid));
1158
1159 if (lwp == NULL)
1160 {
1161 if (debug_threads)
1162 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1163 pid);
1164 }
1165 else
1166 kill_wait_lwp (lwp);
1167
1168 the_target->mourn (process);
1169
1170 /* Since we presently can only stop all lwps of all processes, we
1171 need to unstop lwps of other processes. */
1172 unstop_all_lwps (0, NULL);
1173 return 0;
1174 }
1175
1176 /* Get pending signal of THREAD, for detaching purposes. This is the
1177 signal the thread last stopped for, which we need to deliver to the
1178 thread when detaching, otherwise, it'd be suppressed/lost. */
1179
1180 static int
1181 get_detach_signal (struct thread_info *thread)
1182 {
1183 enum gdb_signal signo = GDB_SIGNAL_0;
1184 int status;
1185 struct lwp_info *lp = get_thread_lwp (thread);
1186
1187 if (lp->status_pending_p)
1188 status = lp->status_pending;
1189 else
1190 {
1191 /* If the thread had been suspended by gdbserver, and it stopped
1192 cleanly, then it'll have stopped with SIGSTOP. But we don't
1193 want to deliver that SIGSTOP. */
1194 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1195 || thread->last_status.value.sig == GDB_SIGNAL_0)
1196 return 0;
1197
1198 /* Otherwise, we may need to deliver the signal we
1199 intercepted. */
1200 status = lp->last_status;
1201 }
1202
1203 if (!WIFSTOPPED (status))
1204 {
1205 if (debug_threads)
1206 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1207 target_pid_to_str (ptid_of (thread)));
1208 return 0;
1209 }
1210
1211 /* Extended wait statuses aren't real SIGTRAPs. */
1212 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1213 {
1214 if (debug_threads)
1215 debug_printf ("GPS: lwp %s had stopped with extended "
1216 "status: no pending signal\n",
1217 target_pid_to_str (ptid_of (thread)));
1218 return 0;
1219 }
1220
1221 signo = gdb_signal_from_host (WSTOPSIG (status));
1222
1223 if (program_signals_p && !program_signals[signo])
1224 {
1225 if (debug_threads)
1226 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1227 target_pid_to_str (ptid_of (thread)),
1228 gdb_signal_to_string (signo));
1229 return 0;
1230 }
1231 else if (!program_signals_p
1232 /* If we have no way to know which signals GDB does not
1233 want to have passed to the program, assume
1234 SIGTRAP/SIGINT, which is GDB's default. */
1235 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1236 {
1237 if (debug_threads)
1238 debug_printf ("GPS: lwp %s had signal %s, "
1239 "but we don't know if we should pass it. "
1240 "Default to not.\n",
1241 target_pid_to_str (ptid_of (thread)),
1242 gdb_signal_to_string (signo));
1243 return 0;
1244 }
1245 else
1246 {
1247 if (debug_threads)
1248 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1249 target_pid_to_str (ptid_of (thread)),
1250 gdb_signal_to_string (signo));
1251
1252 return WSTOPSIG (status);
1253 }
1254 }
1255
1256 static int
1257 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1258 {
1259 struct thread_info *thread = (struct thread_info *) entry;
1260 struct lwp_info *lwp = get_thread_lwp (thread);
1261 int pid = * (int *) args;
1262 int sig;
1263
1264 if (ptid_get_pid (entry->id) != pid)
1265 return 0;
1266
1267 /* If there is a pending SIGSTOP, get rid of it. */
1268 if (lwp->stop_expected)
1269 {
1270 if (debug_threads)
1271 debug_printf ("Sending SIGCONT to %s\n",
1272 target_pid_to_str (ptid_of (thread)));
1273
1274 kill_lwp (lwpid_of (thread), SIGCONT);
1275 lwp->stop_expected = 0;
1276 }
1277
1278 /* Flush any pending changes to the process's registers. */
1279 regcache_invalidate_thread (thread);
1280
1281 /* Pass on any pending signal for this thread. */
1282 sig = get_detach_signal (thread);
1283
1284 /* Finally, let it resume. */
1285 if (the_low_target.prepare_to_resume != NULL)
1286 the_low_target.prepare_to_resume (lwp);
1287 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1288 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1289 error (_("Can't detach %s: %s"),
1290 target_pid_to_str (ptid_of (thread)),
1291 strerror (errno));
1292
1293 delete_lwp (lwp);
1294 return 0;
1295 }
1296
1297 static int
1298 linux_detach (int pid)
1299 {
1300 struct process_info *process;
1301
1302 process = find_process_pid (pid);
1303 if (process == NULL)
1304 return -1;
1305
1306 /* Stop all threads before detaching. First, ptrace requires that
1307 the thread is stopped to sucessfully detach. Second, thread_db
1308 may need to uninstall thread event breakpoints from memory, which
1309 only works with a stopped process anyway. */
1310 stop_all_lwps (0, NULL);
1311
1312 #ifdef USE_THREAD_DB
1313 thread_db_detach (process);
1314 #endif
1315
1316 /* Stabilize threads (move out of jump pads). */
1317 stabilize_threads ();
1318
1319 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1320
1321 the_target->mourn (process);
1322
1323 /* Since we presently can only stop all lwps of all processes, we
1324 need to unstop lwps of other processes. */
1325 unstop_all_lwps (0, NULL);
1326 return 0;
1327 }
1328
1329 /* Remove all LWPs that belong to process PROC from the lwp list. */
1330
1331 static int
1332 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1333 {
1334 struct thread_info *thread = (struct thread_info *) entry;
1335 struct lwp_info *lwp = get_thread_lwp (thread);
1336 struct process_info *process = proc;
1337
1338 if (pid_of (thread) == pid_of (process))
1339 delete_lwp (lwp);
1340
1341 return 0;
1342 }
1343
1344 static void
1345 linux_mourn (struct process_info *process)
1346 {
1347 struct process_info_private *priv;
1348
1349 #ifdef USE_THREAD_DB
1350 thread_db_mourn (process);
1351 #endif
1352
1353 find_inferior (&all_threads, delete_lwp_callback, process);
1354
1355 /* Freeing all private data. */
1356 priv = process->priv;
1357 free (priv->arch_private);
1358 free (priv);
1359 process->priv = NULL;
1360
1361 remove_process (process);
1362 }
1363
1364 static void
1365 linux_join (int pid)
1366 {
1367 int status, ret;
1368
1369 do {
1370 ret = my_waitpid (pid, &status, 0);
1371 if (WIFEXITED (status) || WIFSIGNALED (status))
1372 break;
1373 } while (ret != -1 || errno != ECHILD);
1374 }
1375
1376 /* Return nonzero if the given thread is still alive. */
1377 static int
1378 linux_thread_alive (ptid_t ptid)
1379 {
1380 struct lwp_info *lwp = find_lwp_pid (ptid);
1381
1382 /* We assume we always know if a thread exits. If a whole process
1383 exited but we still haven't been able to report it to GDB, we'll
1384 hold on to the last lwp of the dead process. */
1385 if (lwp != NULL)
1386 return !lwp->dead;
1387 else
1388 return 0;
1389 }
1390
1391 /* Return 1 if this lwp still has an interesting status pending. If
1392 not (e.g., it had stopped for a breakpoint that is gone), return
1393 false. */
1394
1395 static int
1396 thread_still_has_status_pending_p (struct thread_info *thread)
1397 {
1398 struct lwp_info *lp = get_thread_lwp (thread);
1399
1400 if (!lp->status_pending_p)
1401 return 0;
1402
1403 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1404 report any status pending the LWP may have. */
1405 if (thread->last_resume_kind == resume_stop
1406 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1407 return 0;
1408
1409 if (thread->last_resume_kind != resume_stop
1410 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1411 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1412 {
1413 struct thread_info *saved_thread;
1414 CORE_ADDR pc;
1415 int discard = 0;
1416
1417 gdb_assert (lp->last_status != 0);
1418
1419 pc = get_pc (lp);
1420
1421 saved_thread = current_thread;
1422 current_thread = thread;
1423
1424 if (pc != lp->stop_pc)
1425 {
1426 if (debug_threads)
1427 debug_printf ("PC of %ld changed\n",
1428 lwpid_of (thread));
1429 discard = 1;
1430 }
1431
1432 #if !USE_SIGTRAP_SIGINFO
1433 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1434 && !(*the_low_target.breakpoint_at) (pc))
1435 {
1436 if (debug_threads)
1437 debug_printf ("previous SW breakpoint of %ld gone\n",
1438 lwpid_of (thread));
1439 discard = 1;
1440 }
1441 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1442 && !hardware_breakpoint_inserted_here (pc))
1443 {
1444 if (debug_threads)
1445 debug_printf ("previous HW breakpoint of %ld gone\n",
1446 lwpid_of (thread));
1447 discard = 1;
1448 }
1449 #endif
1450
1451 current_thread = saved_thread;
1452
1453 if (discard)
1454 {
1455 if (debug_threads)
1456 debug_printf ("discarding pending breakpoint status\n");
1457 lp->status_pending_p = 0;
1458 return 0;
1459 }
1460 }
1461
1462 return 1;
1463 }
1464
1465 /* Return 1 if this lwp has an interesting status pending. */
1466 static int
1467 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1468 {
1469 struct thread_info *thread = (struct thread_info *) entry;
1470 struct lwp_info *lp = get_thread_lwp (thread);
1471 ptid_t ptid = * (ptid_t *) arg;
1472
1473 /* Check if we're only interested in events from a specific process
1474 or a specific LWP. */
1475 if (!ptid_match (ptid_of (thread), ptid))
1476 return 0;
1477
1478 if (lp->status_pending_p
1479 && !thread_still_has_status_pending_p (thread))
1480 {
1481 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1482 return 0;
1483 }
1484
1485 return lp->status_pending_p;
1486 }
1487
1488 static int
1489 same_lwp (struct inferior_list_entry *entry, void *data)
1490 {
1491 ptid_t ptid = *(ptid_t *) data;
1492 int lwp;
1493
1494 if (ptid_get_lwp (ptid) != 0)
1495 lwp = ptid_get_lwp (ptid);
1496 else
1497 lwp = ptid_get_pid (ptid);
1498
1499 if (ptid_get_lwp (entry->id) == lwp)
1500 return 1;
1501
1502 return 0;
1503 }
1504
1505 struct lwp_info *
1506 find_lwp_pid (ptid_t ptid)
1507 {
1508 struct inferior_list_entry *thread
1509 = find_inferior (&all_threads, same_lwp, &ptid);
1510
1511 if (thread == NULL)
1512 return NULL;
1513
1514 return get_thread_lwp ((struct thread_info *) thread);
1515 }
1516
1517 /* Return the number of known LWPs in the tgid given by PID. */
1518
1519 static int
1520 num_lwps (int pid)
1521 {
1522 struct inferior_list_entry *inf, *tmp;
1523 int count = 0;
1524
1525 ALL_INFERIORS (&all_threads, inf, tmp)
1526 {
1527 if (ptid_get_pid (inf->id) == pid)
1528 count++;
1529 }
1530
1531 return count;
1532 }
1533
1534 /* The arguments passed to iterate_over_lwps. */
1535
1536 struct iterate_over_lwps_args
1537 {
1538 /* The FILTER argument passed to iterate_over_lwps. */
1539 ptid_t filter;
1540
1541 /* The CALLBACK argument passed to iterate_over_lwps. */
1542 iterate_over_lwps_ftype *callback;
1543
1544 /* The DATA argument passed to iterate_over_lwps. */
1545 void *data;
1546 };
1547
1548 /* Callback for find_inferior used by iterate_over_lwps to filter
1549 calls to the callback supplied to that function. Returning a
1550 nonzero value causes find_inferiors to stop iterating and return
1551 the current inferior_list_entry. Returning zero indicates that
1552 find_inferiors should continue iterating. */
1553
1554 static int
1555 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1556 {
1557 struct iterate_over_lwps_args *args
1558 = (struct iterate_over_lwps_args *) args_p;
1559
1560 if (ptid_match (entry->id, args->filter))
1561 {
1562 struct thread_info *thr = (struct thread_info *) entry;
1563 struct lwp_info *lwp = get_thread_lwp (thr);
1564
1565 return (*args->callback) (lwp, args->data);
1566 }
1567
1568 return 0;
1569 }
1570
1571 /* See nat/linux-nat.h. */
1572
1573 struct lwp_info *
1574 iterate_over_lwps (ptid_t filter,
1575 iterate_over_lwps_ftype callback,
1576 void *data)
1577 {
1578 struct iterate_over_lwps_args args = {filter, callback, data};
1579 struct inferior_list_entry *entry;
1580
1581 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1582 if (entry == NULL)
1583 return NULL;
1584
1585 return get_thread_lwp ((struct thread_info *) entry);
1586 }
1587
1588 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1589 their exits until all other threads in the group have exited. */
1590
1591 static void
1592 check_zombie_leaders (void)
1593 {
1594 struct process_info *proc, *tmp;
1595
1596 ALL_PROCESSES (proc, tmp)
1597 {
1598 pid_t leader_pid = pid_of (proc);
1599 struct lwp_info *leader_lp;
1600
1601 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1602
1603 if (debug_threads)
1604 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1605 "num_lwps=%d, zombie=%d\n",
1606 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1607 linux_proc_pid_is_zombie (leader_pid));
1608
1609 if (leader_lp != NULL
1610 /* Check if there are other threads in the group, as we may
1611 have raced with the inferior simply exiting. */
1612 && !last_thread_of_process_p (leader_pid)
1613 && linux_proc_pid_is_zombie (leader_pid))
1614 {
1615 /* A leader zombie can mean one of two things:
1616
1617 - It exited, and there's an exit status pending
1618 available, or only the leader exited (not the whole
1619 program). In the latter case, we can't waitpid the
1620 leader's exit status until all other threads are gone.
1621
1622 - There are 3 or more threads in the group, and a thread
1623 other than the leader exec'd. On an exec, the Linux
1624 kernel destroys all other threads (except the execing
1625 one) in the thread group, and resets the execing thread's
1626 tid to the tgid. No exit notification is sent for the
1627 execing thread -- from the ptracer's perspective, it
1628 appears as though the execing thread just vanishes.
1629 Until we reap all other threads except the leader and the
1630 execing thread, the leader will be zombie, and the
1631 execing thread will be in `D (disc sleep)'. As soon as
1632 all other threads are reaped, the execing thread changes
1633 it's tid to the tgid, and the previous (zombie) leader
1634 vanishes, giving place to the "new" leader. We could try
1635 distinguishing the exit and exec cases, by waiting once
1636 more, and seeing if something comes out, but it doesn't
1637 sound useful. The previous leader _does_ go away, and
1638 we'll re-add the new one once we see the exec event
1639 (which is just the same as what would happen if the
1640 previous leader did exit voluntarily before some other
1641 thread execs). */
1642
1643 if (debug_threads)
1644 fprintf (stderr,
1645 "CZL: Thread group leader %d zombie "
1646 "(it exited, or another thread execd).\n",
1647 leader_pid);
1648
1649 delete_lwp (leader_lp);
1650 }
1651 }
1652 }
1653
1654 /* Callback for `find_inferior'. Returns the first LWP that is not
1655 stopped. ARG is a PTID filter. */
1656
1657 static int
1658 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1659 {
1660 struct thread_info *thr = (struct thread_info *) entry;
1661 struct lwp_info *lwp;
1662 ptid_t filter = *(ptid_t *) arg;
1663
1664 if (!ptid_match (ptid_of (thr), filter))
1665 return 0;
1666
1667 lwp = get_thread_lwp (thr);
1668 if (!lwp->stopped)
1669 return 1;
1670
1671 return 0;
1672 }
1673
1674 /* This function should only be called if the LWP got a SIGTRAP.
1675
1676 Handle any tracepoint steps or hits. Return true if a tracepoint
1677 event was handled, 0 otherwise. */
1678
1679 static int
1680 handle_tracepoints (struct lwp_info *lwp)
1681 {
1682 struct thread_info *tinfo = get_lwp_thread (lwp);
1683 int tpoint_related_event = 0;
1684
1685 gdb_assert (lwp->suspended == 0);
1686
1687 /* If this tracepoint hit causes a tracing stop, we'll immediately
1688 uninsert tracepoints. To do this, we temporarily pause all
1689 threads, unpatch away, and then unpause threads. We need to make
1690 sure the unpausing doesn't resume LWP too. */
1691 lwp->suspended++;
1692
1693 /* And we need to be sure that any all-threads-stopping doesn't try
1694 to move threads out of the jump pads, as it could deadlock the
1695 inferior (LWP could be in the jump pad, maybe even holding the
1696 lock.) */
1697
1698 /* Do any necessary step collect actions. */
1699 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1700
1701 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1702
1703 /* See if we just hit a tracepoint and do its main collect
1704 actions. */
1705 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1706
1707 lwp->suspended--;
1708
1709 gdb_assert (lwp->suspended == 0);
1710 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1711
1712 if (tpoint_related_event)
1713 {
1714 if (debug_threads)
1715 debug_printf ("got a tracepoint event\n");
1716 return 1;
1717 }
1718
1719 return 0;
1720 }
1721
1722 /* Convenience wrapper. Returns true if LWP is presently collecting a
1723 fast tracepoint. */
1724
1725 static int
1726 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1727 struct fast_tpoint_collect_status *status)
1728 {
1729 CORE_ADDR thread_area;
1730 struct thread_info *thread = get_lwp_thread (lwp);
1731
1732 if (the_low_target.get_thread_area == NULL)
1733 return 0;
1734
1735 /* Get the thread area address. This is used to recognize which
1736 thread is which when tracing with the in-process agent library.
1737 We don't read anything from the address, and treat it as opaque;
1738 it's the address itself that we assume is unique per-thread. */
1739 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1740 return 0;
1741
1742 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1743 }
1744
1745 /* The reason we resume in the caller, is because we want to be able
1746 to pass lwp->status_pending as WSTAT, and we need to clear
1747 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1748 refuses to resume. */
1749
1750 static int
1751 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1752 {
1753 struct thread_info *saved_thread;
1754
1755 saved_thread = current_thread;
1756 current_thread = get_lwp_thread (lwp);
1757
1758 if ((wstat == NULL
1759 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1760 && supports_fast_tracepoints ()
1761 && agent_loaded_p ())
1762 {
1763 struct fast_tpoint_collect_status status;
1764 int r;
1765
1766 if (debug_threads)
1767 debug_printf ("Checking whether LWP %ld needs to move out of the "
1768 "jump pad.\n",
1769 lwpid_of (current_thread));
1770
1771 r = linux_fast_tracepoint_collecting (lwp, &status);
1772
1773 if (wstat == NULL
1774 || (WSTOPSIG (*wstat) != SIGILL
1775 && WSTOPSIG (*wstat) != SIGFPE
1776 && WSTOPSIG (*wstat) != SIGSEGV
1777 && WSTOPSIG (*wstat) != SIGBUS))
1778 {
1779 lwp->collecting_fast_tracepoint = r;
1780
1781 if (r != 0)
1782 {
1783 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1784 {
1785 /* Haven't executed the original instruction yet.
1786 Set breakpoint there, and wait till it's hit,
1787 then single-step until exiting the jump pad. */
1788 lwp->exit_jump_pad_bkpt
1789 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1790 }
1791
1792 if (debug_threads)
1793 debug_printf ("Checking whether LWP %ld needs to move out of "
1794 "the jump pad...it does\n",
1795 lwpid_of (current_thread));
1796 current_thread = saved_thread;
1797
1798 return 1;
1799 }
1800 }
1801 else
1802 {
1803 /* If we get a synchronous signal while collecting, *and*
1804 while executing the (relocated) original instruction,
1805 reset the PC to point at the tpoint address, before
1806 reporting to GDB. Otherwise, it's an IPA lib bug: just
1807 report the signal to GDB, and pray for the best. */
1808
1809 lwp->collecting_fast_tracepoint = 0;
1810
1811 if (r != 0
1812 && (status.adjusted_insn_addr <= lwp->stop_pc
1813 && lwp->stop_pc < status.adjusted_insn_addr_end))
1814 {
1815 siginfo_t info;
1816 struct regcache *regcache;
1817
1818 /* The si_addr on a few signals references the address
1819 of the faulting instruction. Adjust that as
1820 well. */
1821 if ((WSTOPSIG (*wstat) == SIGILL
1822 || WSTOPSIG (*wstat) == SIGFPE
1823 || WSTOPSIG (*wstat) == SIGBUS
1824 || WSTOPSIG (*wstat) == SIGSEGV)
1825 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1826 (PTRACE_TYPE_ARG3) 0, &info) == 0
1827 /* Final check just to make sure we don't clobber
1828 the siginfo of non-kernel-sent signals. */
1829 && (uintptr_t) info.si_addr == lwp->stop_pc)
1830 {
1831 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1832 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1833 (PTRACE_TYPE_ARG3) 0, &info);
1834 }
1835
1836 regcache = get_thread_regcache (current_thread, 1);
1837 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1838 lwp->stop_pc = status.tpoint_addr;
1839
1840 /* Cancel any fast tracepoint lock this thread was
1841 holding. */
1842 force_unlock_trace_buffer ();
1843 }
1844
1845 if (lwp->exit_jump_pad_bkpt != NULL)
1846 {
1847 if (debug_threads)
1848 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1849 "stopping all threads momentarily.\n");
1850
1851 stop_all_lwps (1, lwp);
1852
1853 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1854 lwp->exit_jump_pad_bkpt = NULL;
1855
1856 unstop_all_lwps (1, lwp);
1857
1858 gdb_assert (lwp->suspended >= 0);
1859 }
1860 }
1861 }
1862
1863 if (debug_threads)
1864 debug_printf ("Checking whether LWP %ld needs to move out of the "
1865 "jump pad...no\n",
1866 lwpid_of (current_thread));
1867
1868 current_thread = saved_thread;
1869 return 0;
1870 }
1871
1872 /* Enqueue one signal in the "signals to report later when out of the
1873 jump pad" list. */
1874
1875 static void
1876 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1877 {
1878 struct pending_signals *p_sig;
1879 struct thread_info *thread = get_lwp_thread (lwp);
1880
1881 if (debug_threads)
1882 debug_printf ("Deferring signal %d for LWP %ld.\n",
1883 WSTOPSIG (*wstat), lwpid_of (thread));
1884
1885 if (debug_threads)
1886 {
1887 struct pending_signals *sig;
1888
1889 for (sig = lwp->pending_signals_to_report;
1890 sig != NULL;
1891 sig = sig->prev)
1892 debug_printf (" Already queued %d\n",
1893 sig->signal);
1894
1895 debug_printf (" (no more currently queued signals)\n");
1896 }
1897
1898 /* Don't enqueue non-RT signals if they are already in the deferred
1899 queue. (SIGSTOP being the easiest signal to see ending up here
1900 twice) */
1901 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1902 {
1903 struct pending_signals *sig;
1904
1905 for (sig = lwp->pending_signals_to_report;
1906 sig != NULL;
1907 sig = sig->prev)
1908 {
1909 if (sig->signal == WSTOPSIG (*wstat))
1910 {
1911 if (debug_threads)
1912 debug_printf ("Not requeuing already queued non-RT signal %d"
1913 " for LWP %ld\n",
1914 sig->signal,
1915 lwpid_of (thread));
1916 return;
1917 }
1918 }
1919 }
1920
1921 p_sig = xmalloc (sizeof (*p_sig));
1922 p_sig->prev = lwp->pending_signals_to_report;
1923 p_sig->signal = WSTOPSIG (*wstat);
1924 memset (&p_sig->info, 0, sizeof (siginfo_t));
1925 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1926 &p_sig->info);
1927
1928 lwp->pending_signals_to_report = p_sig;
1929 }
1930
1931 /* Dequeue one signal from the "signals to report later when out of
1932 the jump pad" list. */
1933
1934 static int
1935 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1936 {
1937 struct thread_info *thread = get_lwp_thread (lwp);
1938
1939 if (lwp->pending_signals_to_report != NULL)
1940 {
1941 struct pending_signals **p_sig;
1942
1943 p_sig = &lwp->pending_signals_to_report;
1944 while ((*p_sig)->prev != NULL)
1945 p_sig = &(*p_sig)->prev;
1946
1947 *wstat = W_STOPCODE ((*p_sig)->signal);
1948 if ((*p_sig)->info.si_signo != 0)
1949 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1950 &(*p_sig)->info);
1951 free (*p_sig);
1952 *p_sig = NULL;
1953
1954 if (debug_threads)
1955 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1956 WSTOPSIG (*wstat), lwpid_of (thread));
1957
1958 if (debug_threads)
1959 {
1960 struct pending_signals *sig;
1961
1962 for (sig = lwp->pending_signals_to_report;
1963 sig != NULL;
1964 sig = sig->prev)
1965 debug_printf (" Still queued %d\n",
1966 sig->signal);
1967
1968 debug_printf (" (no more queued signals)\n");
1969 }
1970
1971 return 1;
1972 }
1973
1974 return 0;
1975 }
1976
1977 /* Fetch the possibly triggered data watchpoint info and store it in
1978 CHILD.
1979
1980 On some archs, like x86, that use debug registers to set
1981 watchpoints, it's possible that the way to know which watched
1982 address trapped, is to check the register that is used to select
1983 which address to watch. Problem is, between setting the watchpoint
1984 and reading back which data address trapped, the user may change
1985 the set of watchpoints, and, as a consequence, GDB changes the
1986 debug registers in the inferior. To avoid reading back a stale
1987 stopped-data-address when that happens, we cache in LP the fact
1988 that a watchpoint trapped, and the corresponding data address, as
1989 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1990 registers meanwhile, we have the cached data we can rely on. */
1991
1992 static int
1993 check_stopped_by_watchpoint (struct lwp_info *child)
1994 {
1995 if (the_low_target.stopped_by_watchpoint != NULL)
1996 {
1997 struct thread_info *saved_thread;
1998
1999 saved_thread = current_thread;
2000 current_thread = get_lwp_thread (child);
2001
2002 if (the_low_target.stopped_by_watchpoint ())
2003 {
2004 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2005
2006 if (the_low_target.stopped_data_address != NULL)
2007 child->stopped_data_address
2008 = the_low_target.stopped_data_address ();
2009 else
2010 child->stopped_data_address = 0;
2011 }
2012
2013 current_thread = saved_thread;
2014 }
2015
2016 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2017 }
2018
2019 /* Return the ptrace options that we want to try to enable. */
2020
2021 static int
2022 linux_low_ptrace_options (int attached)
2023 {
2024 int options = 0;
2025
2026 if (!attached)
2027 options |= PTRACE_O_EXITKILL;
2028
2029 if (report_fork_events)
2030 options |= PTRACE_O_TRACEFORK;
2031
2032 if (report_vfork_events)
2033 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2034
2035 return options;
2036 }
2037
2038 /* Do low-level handling of the event, and check if we should go on
2039 and pass it to caller code. Return the affected lwp if we are, or
2040 NULL otherwise. */
2041
2042 static struct lwp_info *
2043 linux_low_filter_event (int lwpid, int wstat)
2044 {
2045 struct lwp_info *child;
2046 struct thread_info *thread;
2047 int have_stop_pc = 0;
2048
2049 child = find_lwp_pid (pid_to_ptid (lwpid));
2050
2051 /* If we didn't find a process, one of two things presumably happened:
2052 - A process we started and then detached from has exited. Ignore it.
2053 - A process we are controlling has forked and the new child's stop
2054 was reported to us by the kernel. Save its PID. */
2055 if (child == NULL && WIFSTOPPED (wstat))
2056 {
2057 add_to_pid_list (&stopped_pids, lwpid, wstat);
2058 return NULL;
2059 }
2060 else if (child == NULL)
2061 return NULL;
2062
2063 thread = get_lwp_thread (child);
2064
2065 child->stopped = 1;
2066
2067 child->last_status = wstat;
2068
2069 /* Check if the thread has exited. */
2070 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2071 {
2072 if (debug_threads)
2073 debug_printf ("LLFE: %d exited.\n", lwpid);
2074 if (num_lwps (pid_of (thread)) > 1)
2075 {
2076
2077 /* If there is at least one more LWP, then the exit signal was
2078 not the end of the debugged application and should be
2079 ignored. */
2080 delete_lwp (child);
2081 return NULL;
2082 }
2083 else
2084 {
2085 /* This was the last lwp in the process. Since events are
2086 serialized to GDB core, and we can't report this one
2087 right now, but GDB core and the other target layers will
2088 want to be notified about the exit code/signal, leave the
2089 status pending for the next time we're able to report
2090 it. */
2091 mark_lwp_dead (child, wstat);
2092 return child;
2093 }
2094 }
2095
2096 gdb_assert (WIFSTOPPED (wstat));
2097
2098 if (WIFSTOPPED (wstat))
2099 {
2100 struct process_info *proc;
2101
2102 /* Architecture-specific setup after inferior is running. This
2103 needs to happen after we have attached to the inferior and it
2104 is stopped for the first time, but before we access any
2105 inferior registers. */
2106 proc = find_process_pid (pid_of (thread));
2107 if (proc->priv->new_inferior)
2108 {
2109 struct thread_info *saved_thread;
2110
2111 saved_thread = current_thread;
2112 current_thread = thread;
2113
2114 the_low_target.arch_setup ();
2115
2116 current_thread = saved_thread;
2117
2118 proc->priv->new_inferior = 0;
2119 }
2120 }
2121
2122 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2123 {
2124 struct process_info *proc = find_process_pid (pid_of (thread));
2125 int options = linux_low_ptrace_options (proc->attached);
2126
2127 linux_enable_event_reporting (lwpid, options);
2128 child->must_set_ptrace_flags = 0;
2129 }
2130
2131 /* Be careful to not overwrite stop_pc until
2132 check_stopped_by_breakpoint is called. */
2133 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2134 && linux_is_extended_waitstatus (wstat))
2135 {
2136 child->stop_pc = get_pc (child);
2137 if (handle_extended_wait (child, wstat))
2138 {
2139 /* The event has been handled, so just return without
2140 reporting it. */
2141 return NULL;
2142 }
2143 }
2144
2145 /* Check first whether this was a SW/HW breakpoint before checking
2146 watchpoints, because at least s390 can't tell the data address of
2147 hardware watchpoint hits, and returns stopped-by-watchpoint as
2148 long as there's a watchpoint set. */
2149 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2150 {
2151 if (check_stopped_by_breakpoint (child))
2152 have_stop_pc = 1;
2153 }
2154
2155 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2156 or hardware watchpoint. Check which is which if we got
2157 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2158 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2159 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2160 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2161 check_stopped_by_watchpoint (child);
2162
2163 if (!have_stop_pc)
2164 child->stop_pc = get_pc (child);
2165
2166 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2167 && child->stop_expected)
2168 {
2169 if (debug_threads)
2170 debug_printf ("Expected stop.\n");
2171 child->stop_expected = 0;
2172
2173 if (thread->last_resume_kind == resume_stop)
2174 {
2175 /* We want to report the stop to the core. Treat the
2176 SIGSTOP as a normal event. */
2177 if (debug_threads)
2178 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2179 target_pid_to_str (ptid_of (thread)));
2180 }
2181 else if (stopping_threads != NOT_STOPPING_THREADS)
2182 {
2183 /* Stopping threads. We don't want this SIGSTOP to end up
2184 pending. */
2185 if (debug_threads)
2186 debug_printf ("LLW: SIGSTOP caught for %s "
2187 "while stopping threads.\n",
2188 target_pid_to_str (ptid_of (thread)));
2189 return NULL;
2190 }
2191 else
2192 {
2193 /* This is a delayed SIGSTOP. Filter out the event. */
2194 if (debug_threads)
2195 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2196 child->stepping ? "step" : "continue",
2197 target_pid_to_str (ptid_of (thread)));
2198
2199 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2200 return NULL;
2201 }
2202 }
2203
2204 child->status_pending_p = 1;
2205 child->status_pending = wstat;
2206 return child;
2207 }
2208
2209 /* Resume LWPs that are currently stopped without any pending status
2210 to report, but are resumed from the core's perspective. */
2211
2212 static void
2213 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2214 {
2215 struct thread_info *thread = (struct thread_info *) entry;
2216 struct lwp_info *lp = get_thread_lwp (thread);
2217
2218 if (lp->stopped
2219 && !lp->status_pending_p
2220 && thread->last_resume_kind != resume_stop
2221 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2222 {
2223 int step = thread->last_resume_kind == resume_step;
2224
2225 if (debug_threads)
2226 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2227 target_pid_to_str (ptid_of (thread)),
2228 paddress (lp->stop_pc),
2229 step);
2230
2231 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2232 }
2233 }
2234
2235 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2236 match FILTER_PTID (leaving others pending). The PTIDs can be:
2237 minus_one_ptid, to specify any child; a pid PTID, specifying all
2238 lwps of a thread group; or a PTID representing a single lwp. Store
2239 the stop status through the status pointer WSTAT. OPTIONS is
2240 passed to the waitpid call. Return 0 if no event was found and
2241 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2242 was found. Return the PID of the stopped child otherwise. */
2243
2244 static int
2245 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2246 int *wstatp, int options)
2247 {
2248 struct thread_info *event_thread;
2249 struct lwp_info *event_child, *requested_child;
2250 sigset_t block_mask, prev_mask;
2251
2252 retry:
2253 /* N.B. event_thread points to the thread_info struct that contains
2254 event_child. Keep them in sync. */
2255 event_thread = NULL;
2256 event_child = NULL;
2257 requested_child = NULL;
2258
2259 /* Check for a lwp with a pending status. */
2260
2261 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2262 {
2263 event_thread = (struct thread_info *)
2264 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2265 if (event_thread != NULL)
2266 event_child = get_thread_lwp (event_thread);
2267 if (debug_threads && event_thread)
2268 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2269 }
2270 else if (!ptid_equal (filter_ptid, null_ptid))
2271 {
2272 requested_child = find_lwp_pid (filter_ptid);
2273
2274 if (stopping_threads == NOT_STOPPING_THREADS
2275 && requested_child->status_pending_p
2276 && requested_child->collecting_fast_tracepoint)
2277 {
2278 enqueue_one_deferred_signal (requested_child,
2279 &requested_child->status_pending);
2280 requested_child->status_pending_p = 0;
2281 requested_child->status_pending = 0;
2282 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2283 }
2284
2285 if (requested_child->suspended
2286 && requested_child->status_pending_p)
2287 {
2288 internal_error (__FILE__, __LINE__,
2289 "requesting an event out of a"
2290 " suspended child?");
2291 }
2292
2293 if (requested_child->status_pending_p)
2294 {
2295 event_child = requested_child;
2296 event_thread = get_lwp_thread (event_child);
2297 }
2298 }
2299
2300 if (event_child != NULL)
2301 {
2302 if (debug_threads)
2303 debug_printf ("Got an event from pending child %ld (%04x)\n",
2304 lwpid_of (event_thread), event_child->status_pending);
2305 *wstatp = event_child->status_pending;
2306 event_child->status_pending_p = 0;
2307 event_child->status_pending = 0;
2308 current_thread = event_thread;
2309 return lwpid_of (event_thread);
2310 }
2311
2312 /* But if we don't find a pending event, we'll have to wait.
2313
2314 We only enter this loop if no process has a pending wait status.
2315 Thus any action taken in response to a wait status inside this
2316 loop is responding as soon as we detect the status, not after any
2317 pending events. */
2318
2319 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2320 all signals while here. */
2321 sigfillset (&block_mask);
2322 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2323
2324 /* Always pull all events out of the kernel. We'll randomly select
2325 an event LWP out of all that have events, to prevent
2326 starvation. */
2327 while (event_child == NULL)
2328 {
2329 pid_t ret = 0;
2330
2331 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2332 quirks:
2333
2334 - If the thread group leader exits while other threads in the
2335 thread group still exist, waitpid(TGID, ...) hangs. That
2336 waitpid won't return an exit status until the other threads
2337 in the group are reaped.
2338
2339 - When a non-leader thread execs, that thread just vanishes
2340 without reporting an exit (so we'd hang if we waited for it
2341 explicitly in that case). The exec event is reported to
2342 the TGID pid (although we don't currently enable exec
2343 events). */
2344 errno = 0;
2345 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2346
2347 if (debug_threads)
2348 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2349 ret, errno ? strerror (errno) : "ERRNO-OK");
2350
2351 if (ret > 0)
2352 {
2353 if (debug_threads)
2354 {
2355 debug_printf ("LLW: waitpid %ld received %s\n",
2356 (long) ret, status_to_str (*wstatp));
2357 }
2358
2359 /* Filter all events. IOW, leave all events pending. We'll
2360 randomly select an event LWP out of all that have events
2361 below. */
2362 linux_low_filter_event (ret, *wstatp);
2363 /* Retry until nothing comes out of waitpid. A single
2364 SIGCHLD can indicate more than one child stopped. */
2365 continue;
2366 }
2367
2368 /* Now that we've pulled all events out of the kernel, resume
2369 LWPs that don't have an interesting event to report. */
2370 if (stopping_threads == NOT_STOPPING_THREADS)
2371 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2372
2373 /* ... and find an LWP with a status to report to the core, if
2374 any. */
2375 event_thread = (struct thread_info *)
2376 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2377 if (event_thread != NULL)
2378 {
2379 event_child = get_thread_lwp (event_thread);
2380 *wstatp = event_child->status_pending;
2381 event_child->status_pending_p = 0;
2382 event_child->status_pending = 0;
2383 break;
2384 }
2385
2386 /* Check for zombie thread group leaders. Those can't be reaped
2387 until all other threads in the thread group are. */
2388 check_zombie_leaders ();
2389
2390 /* If there are no resumed children left in the set of LWPs we
2391 want to wait for, bail. We can't just block in
2392 waitpid/sigsuspend, because lwps might have been left stopped
2393 in trace-stop state, and we'd be stuck forever waiting for
2394 their status to change (which would only happen if we resumed
2395 them). Even if WNOHANG is set, this return code is preferred
2396 over 0 (below), as it is more detailed. */
2397 if ((find_inferior (&all_threads,
2398 not_stopped_callback,
2399 &wait_ptid) == NULL))
2400 {
2401 if (debug_threads)
2402 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2403 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2404 return -1;
2405 }
2406
2407 /* No interesting event to report to the caller. */
2408 if ((options & WNOHANG))
2409 {
2410 if (debug_threads)
2411 debug_printf ("WNOHANG set, no event found\n");
2412
2413 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2414 return 0;
2415 }
2416
2417 /* Block until we get an event reported with SIGCHLD. */
2418 if (debug_threads)
2419 debug_printf ("sigsuspend'ing\n");
2420
2421 sigsuspend (&prev_mask);
2422 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2423 goto retry;
2424 }
2425
2426 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2427
2428 current_thread = event_thread;
2429
2430 /* Check for thread exit. */
2431 if (! WIFSTOPPED (*wstatp))
2432 {
2433 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2434
2435 if (debug_threads)
2436 debug_printf ("LWP %d is the last lwp of process. "
2437 "Process %ld exiting.\n",
2438 pid_of (event_thread), lwpid_of (event_thread));
2439 return lwpid_of (event_thread);
2440 }
2441
2442 return lwpid_of (event_thread);
2443 }
2444
2445 /* Wait for an event from child(ren) PTID. PTIDs can be:
2446 minus_one_ptid, to specify any child; a pid PTID, specifying all
2447 lwps of a thread group; or a PTID representing a single lwp. Store
2448 the stop status through the status pointer WSTAT. OPTIONS is
2449 passed to the waitpid call. Return 0 if no event was found and
2450 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2451 was found. Return the PID of the stopped child otherwise. */
2452
2453 static int
2454 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2455 {
2456 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2457 }
2458
2459 /* Count the LWP's that have had events. */
2460
2461 static int
2462 count_events_callback (struct inferior_list_entry *entry, void *data)
2463 {
2464 struct thread_info *thread = (struct thread_info *) entry;
2465 struct lwp_info *lp = get_thread_lwp (thread);
2466 int *count = data;
2467
2468 gdb_assert (count != NULL);
2469
2470 /* Count only resumed LWPs that have an event pending. */
2471 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2472 && lp->status_pending_p)
2473 (*count)++;
2474
2475 return 0;
2476 }
2477
2478 /* Select the LWP (if any) that is currently being single-stepped. */
2479
2480 static int
2481 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2482 {
2483 struct thread_info *thread = (struct thread_info *) entry;
2484 struct lwp_info *lp = get_thread_lwp (thread);
2485
2486 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2487 && thread->last_resume_kind == resume_step
2488 && lp->status_pending_p)
2489 return 1;
2490 else
2491 return 0;
2492 }
2493
2494 /* Select the Nth LWP that has had an event. */
2495
2496 static int
2497 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2498 {
2499 struct thread_info *thread = (struct thread_info *) entry;
2500 struct lwp_info *lp = get_thread_lwp (thread);
2501 int *selector = data;
2502
2503 gdb_assert (selector != NULL);
2504
2505 /* Select only resumed LWPs that have an event pending. */
2506 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2507 && lp->status_pending_p)
2508 if ((*selector)-- == 0)
2509 return 1;
2510
2511 return 0;
2512 }
2513
2514 /* Select one LWP out of those that have events pending. */
2515
2516 static void
2517 select_event_lwp (struct lwp_info **orig_lp)
2518 {
2519 int num_events = 0;
2520 int random_selector;
2521 struct thread_info *event_thread = NULL;
2522
2523 /* In all-stop, give preference to the LWP that is being
2524 single-stepped. There will be at most one, and it's the LWP that
2525 the core is most interested in. If we didn't do this, then we'd
2526 have to handle pending step SIGTRAPs somehow in case the core
2527 later continues the previously-stepped thread, otherwise we'd
2528 report the pending SIGTRAP, and the core, not having stepped the
2529 thread, wouldn't understand what the trap was for, and therefore
2530 would report it to the user as a random signal. */
2531 if (!non_stop)
2532 {
2533 event_thread
2534 = (struct thread_info *) find_inferior (&all_threads,
2535 select_singlestep_lwp_callback,
2536 NULL);
2537 if (event_thread != NULL)
2538 {
2539 if (debug_threads)
2540 debug_printf ("SEL: Select single-step %s\n",
2541 target_pid_to_str (ptid_of (event_thread)));
2542 }
2543 }
2544 if (event_thread == NULL)
2545 {
2546 /* No single-stepping LWP. Select one at random, out of those
2547 which have had events. */
2548
2549 /* First see how many events we have. */
2550 find_inferior (&all_threads, count_events_callback, &num_events);
2551 gdb_assert (num_events > 0);
2552
2553 /* Now randomly pick a LWP out of those that have had
2554 events. */
2555 random_selector = (int)
2556 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2557
2558 if (debug_threads && num_events > 1)
2559 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2560 num_events, random_selector);
2561
2562 event_thread
2563 = (struct thread_info *) find_inferior (&all_threads,
2564 select_event_lwp_callback,
2565 &random_selector);
2566 }
2567
2568 if (event_thread != NULL)
2569 {
2570 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2571
2572 /* Switch the event LWP. */
2573 *orig_lp = event_lp;
2574 }
2575 }
2576
2577 /* Decrement the suspend count of an LWP. */
2578
2579 static int
2580 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2581 {
2582 struct thread_info *thread = (struct thread_info *) entry;
2583 struct lwp_info *lwp = get_thread_lwp (thread);
2584
2585 /* Ignore EXCEPT. */
2586 if (lwp == except)
2587 return 0;
2588
2589 lwp->suspended--;
2590
2591 gdb_assert (lwp->suspended >= 0);
2592 return 0;
2593 }
2594
2595 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2596 NULL. */
2597
2598 static void
2599 unsuspend_all_lwps (struct lwp_info *except)
2600 {
2601 find_inferior (&all_threads, unsuspend_one_lwp, except);
2602 }
2603
2604 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2605 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2606 void *data);
2607 static int lwp_running (struct inferior_list_entry *entry, void *data);
2608 static ptid_t linux_wait_1 (ptid_t ptid,
2609 struct target_waitstatus *ourstatus,
2610 int target_options);
2611
2612 /* Stabilize threads (move out of jump pads).
2613
2614 If a thread is midway collecting a fast tracepoint, we need to
2615 finish the collection and move it out of the jump pad before
2616 reporting the signal.
2617
2618 This avoids recursion while collecting (when a signal arrives
2619 midway, and the signal handler itself collects), which would trash
2620 the trace buffer. In case the user set a breakpoint in a signal
2621 handler, this avoids the backtrace showing the jump pad, etc..
2622 Most importantly, there are certain things we can't do safely if
2623 threads are stopped in a jump pad (or in its callee's). For
2624 example:
2625
2626 - starting a new trace run. A thread still collecting the
2627 previous run, could trash the trace buffer when resumed. The trace
2628 buffer control structures would have been reset but the thread had
2629 no way to tell. The thread could even midway memcpy'ing to the
2630 buffer, which would mean that when resumed, it would clobber the
2631 trace buffer that had been set for a new run.
2632
2633 - we can't rewrite/reuse the jump pads for new tracepoints
2634 safely. Say you do tstart while a thread is stopped midway while
2635 collecting. When the thread is later resumed, it finishes the
2636 collection, and returns to the jump pad, to execute the original
2637 instruction that was under the tracepoint jump at the time the
2638 older run had been started. If the jump pad had been rewritten
2639 since for something else in the new run, the thread would now
2640 execute the wrong / random instructions. */
2641
2642 static void
2643 linux_stabilize_threads (void)
2644 {
2645 struct thread_info *saved_thread;
2646 struct thread_info *thread_stuck;
2647
2648 thread_stuck
2649 = (struct thread_info *) find_inferior (&all_threads,
2650 stuck_in_jump_pad_callback,
2651 NULL);
2652 if (thread_stuck != NULL)
2653 {
2654 if (debug_threads)
2655 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2656 lwpid_of (thread_stuck));
2657 return;
2658 }
2659
2660 saved_thread = current_thread;
2661
2662 stabilizing_threads = 1;
2663
2664 /* Kick 'em all. */
2665 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2666
2667 /* Loop until all are stopped out of the jump pads. */
2668 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2669 {
2670 struct target_waitstatus ourstatus;
2671 struct lwp_info *lwp;
2672 int wstat;
2673
2674 /* Note that we go through the full wait even loop. While
2675 moving threads out of jump pad, we need to be able to step
2676 over internal breakpoints and such. */
2677 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2678
2679 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2680 {
2681 lwp = get_thread_lwp (current_thread);
2682
2683 /* Lock it. */
2684 lwp->suspended++;
2685
2686 if (ourstatus.value.sig != GDB_SIGNAL_0
2687 || current_thread->last_resume_kind == resume_stop)
2688 {
2689 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2690 enqueue_one_deferred_signal (lwp, &wstat);
2691 }
2692 }
2693 }
2694
2695 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2696
2697 stabilizing_threads = 0;
2698
2699 current_thread = saved_thread;
2700
2701 if (debug_threads)
2702 {
2703 thread_stuck
2704 = (struct thread_info *) find_inferior (&all_threads,
2705 stuck_in_jump_pad_callback,
2706 NULL);
2707 if (thread_stuck != NULL)
2708 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2709 lwpid_of (thread_stuck));
2710 }
2711 }
2712
2713 static void async_file_mark (void);
2714
2715 /* Convenience function that is called when the kernel reports an
2716 event that is not passed out to GDB. */
2717
2718 static ptid_t
2719 ignore_event (struct target_waitstatus *ourstatus)
2720 {
2721 /* If we got an event, there may still be others, as a single
2722 SIGCHLD can indicate more than one child stopped. This forces
2723 another target_wait call. */
2724 async_file_mark ();
2725
2726 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2727 return null_ptid;
2728 }
2729
2730 /* Return non-zero if WAITSTATUS reflects an extended linux
2731 event. Otherwise, return zero. */
2732
2733 static int
2734 extended_event_reported (const struct target_waitstatus *waitstatus)
2735 {
2736 if (waitstatus == NULL)
2737 return 0;
2738
2739 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2740 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2741 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2742 }
2743
2744 /* Wait for process, returns status. */
2745
2746 static ptid_t
2747 linux_wait_1 (ptid_t ptid,
2748 struct target_waitstatus *ourstatus, int target_options)
2749 {
2750 int w;
2751 struct lwp_info *event_child;
2752 int options;
2753 int pid;
2754 int step_over_finished;
2755 int bp_explains_trap;
2756 int maybe_internal_trap;
2757 int report_to_gdb;
2758 int trace_event;
2759 int in_step_range;
2760
2761 if (debug_threads)
2762 {
2763 debug_enter ();
2764 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2765 }
2766
2767 /* Translate generic target options into linux options. */
2768 options = __WALL;
2769 if (target_options & TARGET_WNOHANG)
2770 options |= WNOHANG;
2771
2772 bp_explains_trap = 0;
2773 trace_event = 0;
2774 in_step_range = 0;
2775 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2776
2777 if (ptid_equal (step_over_bkpt, null_ptid))
2778 pid = linux_wait_for_event (ptid, &w, options);
2779 else
2780 {
2781 if (debug_threads)
2782 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2783 target_pid_to_str (step_over_bkpt));
2784 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2785 }
2786
2787 if (pid == 0)
2788 {
2789 gdb_assert (target_options & TARGET_WNOHANG);
2790
2791 if (debug_threads)
2792 {
2793 debug_printf ("linux_wait_1 ret = null_ptid, "
2794 "TARGET_WAITKIND_IGNORE\n");
2795 debug_exit ();
2796 }
2797
2798 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2799 return null_ptid;
2800 }
2801 else if (pid == -1)
2802 {
2803 if (debug_threads)
2804 {
2805 debug_printf ("linux_wait_1 ret = null_ptid, "
2806 "TARGET_WAITKIND_NO_RESUMED\n");
2807 debug_exit ();
2808 }
2809
2810 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2811 return null_ptid;
2812 }
2813
2814 event_child = get_thread_lwp (current_thread);
2815
2816 /* linux_wait_for_event only returns an exit status for the last
2817 child of a process. Report it. */
2818 if (WIFEXITED (w) || WIFSIGNALED (w))
2819 {
2820 if (WIFEXITED (w))
2821 {
2822 ourstatus->kind = TARGET_WAITKIND_EXITED;
2823 ourstatus->value.integer = WEXITSTATUS (w);
2824
2825 if (debug_threads)
2826 {
2827 debug_printf ("linux_wait_1 ret = %s, exited with "
2828 "retcode %d\n",
2829 target_pid_to_str (ptid_of (current_thread)),
2830 WEXITSTATUS (w));
2831 debug_exit ();
2832 }
2833 }
2834 else
2835 {
2836 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2837 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2838
2839 if (debug_threads)
2840 {
2841 debug_printf ("linux_wait_1 ret = %s, terminated with "
2842 "signal %d\n",
2843 target_pid_to_str (ptid_of (current_thread)),
2844 WTERMSIG (w));
2845 debug_exit ();
2846 }
2847 }
2848
2849 return ptid_of (current_thread);
2850 }
2851
2852 /* If step-over executes a breakpoint instruction, it means a
2853 gdb/gdbserver breakpoint had been planted on top of a permanent
2854 breakpoint. The PC has been adjusted by
2855 check_stopped_by_breakpoint to point at the breakpoint address.
2856 Advance the PC manually past the breakpoint, otherwise the
2857 program would keep trapping the permanent breakpoint forever. */
2858 if (!ptid_equal (step_over_bkpt, null_ptid)
2859 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2860 {
2861 unsigned int increment_pc = the_low_target.breakpoint_len;
2862
2863 if (debug_threads)
2864 {
2865 debug_printf ("step-over for %s executed software breakpoint\n",
2866 target_pid_to_str (ptid_of (current_thread)));
2867 }
2868
2869 if (increment_pc != 0)
2870 {
2871 struct regcache *regcache
2872 = get_thread_regcache (current_thread, 1);
2873
2874 event_child->stop_pc += increment_pc;
2875 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2876
2877 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2878 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2879 }
2880 }
2881
2882 /* If this event was not handled before, and is not a SIGTRAP, we
2883 report it. SIGILL and SIGSEGV are also treated as traps in case
2884 a breakpoint is inserted at the current PC. If this target does
2885 not support internal breakpoints at all, we also report the
2886 SIGTRAP without further processing; it's of no concern to us. */
2887 maybe_internal_trap
2888 = (supports_breakpoints ()
2889 && (WSTOPSIG (w) == SIGTRAP
2890 || ((WSTOPSIG (w) == SIGILL
2891 || WSTOPSIG (w) == SIGSEGV)
2892 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2893
2894 if (maybe_internal_trap)
2895 {
2896 /* Handle anything that requires bookkeeping before deciding to
2897 report the event or continue waiting. */
2898
2899 /* First check if we can explain the SIGTRAP with an internal
2900 breakpoint, or if we should possibly report the event to GDB.
2901 Do this before anything that may remove or insert a
2902 breakpoint. */
2903 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2904
2905 /* We have a SIGTRAP, possibly a step-over dance has just
2906 finished. If so, tweak the state machine accordingly,
2907 reinsert breakpoints and delete any reinsert (software
2908 single-step) breakpoints. */
2909 step_over_finished = finish_step_over (event_child);
2910
2911 /* Now invoke the callbacks of any internal breakpoints there. */
2912 check_breakpoints (event_child->stop_pc);
2913
2914 /* Handle tracepoint data collecting. This may overflow the
2915 trace buffer, and cause a tracing stop, removing
2916 breakpoints. */
2917 trace_event = handle_tracepoints (event_child);
2918
2919 if (bp_explains_trap)
2920 {
2921 /* If we stepped or ran into an internal breakpoint, we've
2922 already handled it. So next time we resume (from this
2923 PC), we should step over it. */
2924 if (debug_threads)
2925 debug_printf ("Hit a gdbserver breakpoint.\n");
2926
2927 if (breakpoint_here (event_child->stop_pc))
2928 event_child->need_step_over = 1;
2929 }
2930 }
2931 else
2932 {
2933 /* We have some other signal, possibly a step-over dance was in
2934 progress, and it should be cancelled too. */
2935 step_over_finished = finish_step_over (event_child);
2936 }
2937
2938 /* We have all the data we need. Either report the event to GDB, or
2939 resume threads and keep waiting for more. */
2940
2941 /* If we're collecting a fast tracepoint, finish the collection and
2942 move out of the jump pad before delivering a signal. See
2943 linux_stabilize_threads. */
2944
2945 if (WIFSTOPPED (w)
2946 && WSTOPSIG (w) != SIGTRAP
2947 && supports_fast_tracepoints ()
2948 && agent_loaded_p ())
2949 {
2950 if (debug_threads)
2951 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2952 "to defer or adjust it.\n",
2953 WSTOPSIG (w), lwpid_of (current_thread));
2954
2955 /* Allow debugging the jump pad itself. */
2956 if (current_thread->last_resume_kind != resume_step
2957 && maybe_move_out_of_jump_pad (event_child, &w))
2958 {
2959 enqueue_one_deferred_signal (event_child, &w);
2960
2961 if (debug_threads)
2962 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2963 WSTOPSIG (w), lwpid_of (current_thread));
2964
2965 linux_resume_one_lwp (event_child, 0, 0, NULL);
2966
2967 return ignore_event (ourstatus);
2968 }
2969 }
2970
2971 if (event_child->collecting_fast_tracepoint)
2972 {
2973 if (debug_threads)
2974 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2975 "Check if we're already there.\n",
2976 lwpid_of (current_thread),
2977 event_child->collecting_fast_tracepoint);
2978
2979 trace_event = 1;
2980
2981 event_child->collecting_fast_tracepoint
2982 = linux_fast_tracepoint_collecting (event_child, NULL);
2983
2984 if (event_child->collecting_fast_tracepoint != 1)
2985 {
2986 /* No longer need this breakpoint. */
2987 if (event_child->exit_jump_pad_bkpt != NULL)
2988 {
2989 if (debug_threads)
2990 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2991 "stopping all threads momentarily.\n");
2992
2993 /* Other running threads could hit this breakpoint.
2994 We don't handle moribund locations like GDB does,
2995 instead we always pause all threads when removing
2996 breakpoints, so that any step-over or
2997 decr_pc_after_break adjustment is always taken
2998 care of while the breakpoint is still
2999 inserted. */
3000 stop_all_lwps (1, event_child);
3001
3002 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3003 event_child->exit_jump_pad_bkpt = NULL;
3004
3005 unstop_all_lwps (1, event_child);
3006
3007 gdb_assert (event_child->suspended >= 0);
3008 }
3009 }
3010
3011 if (event_child->collecting_fast_tracepoint == 0)
3012 {
3013 if (debug_threads)
3014 debug_printf ("fast tracepoint finished "
3015 "collecting successfully.\n");
3016
3017 /* We may have a deferred signal to report. */
3018 if (dequeue_one_deferred_signal (event_child, &w))
3019 {
3020 if (debug_threads)
3021 debug_printf ("dequeued one signal.\n");
3022 }
3023 else
3024 {
3025 if (debug_threads)
3026 debug_printf ("no deferred signals.\n");
3027
3028 if (stabilizing_threads)
3029 {
3030 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3031 ourstatus->value.sig = GDB_SIGNAL_0;
3032
3033 if (debug_threads)
3034 {
3035 debug_printf ("linux_wait_1 ret = %s, stopped "
3036 "while stabilizing threads\n",
3037 target_pid_to_str (ptid_of (current_thread)));
3038 debug_exit ();
3039 }
3040
3041 return ptid_of (current_thread);
3042 }
3043 }
3044 }
3045 }
3046
3047 /* Check whether GDB would be interested in this event. */
3048
3049 /* If GDB is not interested in this signal, don't stop other
3050 threads, and don't report it to GDB. Just resume the inferior
3051 right away. We do this for threading-related signals as well as
3052 any that GDB specifically requested we ignore. But never ignore
3053 SIGSTOP if we sent it ourselves, and do not ignore signals when
3054 stepping - they may require special handling to skip the signal
3055 handler. Also never ignore signals that could be caused by a
3056 breakpoint. */
3057 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3058 thread library? */
3059 if (WIFSTOPPED (w)
3060 && current_thread->last_resume_kind != resume_step
3061 && (
3062 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3063 (current_process ()->priv->thread_db != NULL
3064 && (WSTOPSIG (w) == __SIGRTMIN
3065 || WSTOPSIG (w) == __SIGRTMIN + 1))
3066 ||
3067 #endif
3068 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3069 && !(WSTOPSIG (w) == SIGSTOP
3070 && current_thread->last_resume_kind == resume_stop)
3071 && !linux_wstatus_maybe_breakpoint (w))))
3072 {
3073 siginfo_t info, *info_p;
3074
3075 if (debug_threads)
3076 debug_printf ("Ignored signal %d for LWP %ld.\n",
3077 WSTOPSIG (w), lwpid_of (current_thread));
3078
3079 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3080 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3081 info_p = &info;
3082 else
3083 info_p = NULL;
3084 linux_resume_one_lwp (event_child, event_child->stepping,
3085 WSTOPSIG (w), info_p);
3086 return ignore_event (ourstatus);
3087 }
3088
3089 /* Note that all addresses are always "out of the step range" when
3090 there's no range to begin with. */
3091 in_step_range = lwp_in_step_range (event_child);
3092
3093 /* If GDB wanted this thread to single step, and the thread is out
3094 of the step range, we always want to report the SIGTRAP, and let
3095 GDB handle it. Watchpoints should always be reported. So should
3096 signals we can't explain. A SIGTRAP we can't explain could be a
3097 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3098 do, we're be able to handle GDB breakpoints on top of internal
3099 breakpoints, by handling the internal breakpoint and still
3100 reporting the event to GDB. If we don't, we're out of luck, GDB
3101 won't see the breakpoint hit. */
3102 report_to_gdb = (!maybe_internal_trap
3103 || (current_thread->last_resume_kind == resume_step
3104 && !in_step_range)
3105 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3106 || (!step_over_finished && !in_step_range
3107 && !bp_explains_trap && !trace_event)
3108 || (gdb_breakpoint_here (event_child->stop_pc)
3109 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3110 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3111 || extended_event_reported (&event_child->waitstatus));
3112
3113 run_breakpoint_commands (event_child->stop_pc);
3114
3115 /* We found no reason GDB would want us to stop. We either hit one
3116 of our own breakpoints, or finished an internal step GDB
3117 shouldn't know about. */
3118 if (!report_to_gdb)
3119 {
3120 if (debug_threads)
3121 {
3122 if (bp_explains_trap)
3123 debug_printf ("Hit a gdbserver breakpoint.\n");
3124 if (step_over_finished)
3125 debug_printf ("Step-over finished.\n");
3126 if (trace_event)
3127 debug_printf ("Tracepoint event.\n");
3128 if (lwp_in_step_range (event_child))
3129 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3130 paddress (event_child->stop_pc),
3131 paddress (event_child->step_range_start),
3132 paddress (event_child->step_range_end));
3133 if (extended_event_reported (&event_child->waitstatus))
3134 {
3135 char *str = target_waitstatus_to_string (ourstatus);
3136 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3137 lwpid_of (get_lwp_thread (event_child)), str);
3138 xfree (str);
3139 }
3140 }
3141
3142 /* We're not reporting this breakpoint to GDB, so apply the
3143 decr_pc_after_break adjustment to the inferior's regcache
3144 ourselves. */
3145
3146 if (the_low_target.set_pc != NULL)
3147 {
3148 struct regcache *regcache
3149 = get_thread_regcache (current_thread, 1);
3150 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3151 }
3152
3153 /* We may have finished stepping over a breakpoint. If so,
3154 we've stopped and suspended all LWPs momentarily except the
3155 stepping one. This is where we resume them all again. We're
3156 going to keep waiting, so use proceed, which handles stepping
3157 over the next breakpoint. */
3158 if (debug_threads)
3159 debug_printf ("proceeding all threads.\n");
3160
3161 if (step_over_finished)
3162 unsuspend_all_lwps (event_child);
3163
3164 proceed_all_lwps ();
3165 return ignore_event (ourstatus);
3166 }
3167
3168 if (debug_threads)
3169 {
3170 if (current_thread->last_resume_kind == resume_step)
3171 {
3172 if (event_child->step_range_start == event_child->step_range_end)
3173 debug_printf ("GDB wanted to single-step, reporting event.\n");
3174 else if (!lwp_in_step_range (event_child))
3175 debug_printf ("Out of step range, reporting event.\n");
3176 }
3177 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3178 debug_printf ("Stopped by watchpoint.\n");
3179 else if (gdb_breakpoint_here (event_child->stop_pc))
3180 debug_printf ("Stopped by GDB breakpoint.\n");
3181 if (debug_threads)
3182 debug_printf ("Hit a non-gdbserver trap event.\n");
3183 }
3184
3185 /* Alright, we're going to report a stop. */
3186
3187 if (!stabilizing_threads)
3188 {
3189 /* In all-stop, stop all threads. */
3190 if (!non_stop)
3191 stop_all_lwps (0, NULL);
3192
3193 /* If we're not waiting for a specific LWP, choose an event LWP
3194 from among those that have had events. Giving equal priority
3195 to all LWPs that have had events helps prevent
3196 starvation. */
3197 if (ptid_equal (ptid, minus_one_ptid))
3198 {
3199 event_child->status_pending_p = 1;
3200 event_child->status_pending = w;
3201
3202 select_event_lwp (&event_child);
3203
3204 /* current_thread and event_child must stay in sync. */
3205 current_thread = get_lwp_thread (event_child);
3206
3207 event_child->status_pending_p = 0;
3208 w = event_child->status_pending;
3209 }
3210
3211 if (step_over_finished)
3212 {
3213 if (!non_stop)
3214 {
3215 /* If we were doing a step-over, all other threads but
3216 the stepping one had been paused in start_step_over,
3217 with their suspend counts incremented. We don't want
3218 to do a full unstop/unpause, because we're in
3219 all-stop mode (so we want threads stopped), but we
3220 still need to unsuspend the other threads, to
3221 decrement their `suspended' count back. */
3222 unsuspend_all_lwps (event_child);
3223 }
3224 else
3225 {
3226 /* If we just finished a step-over, then all threads had
3227 been momentarily paused. In all-stop, that's fine,
3228 we want threads stopped by now anyway. In non-stop,
3229 we need to re-resume threads that GDB wanted to be
3230 running. */
3231 unstop_all_lwps (1, event_child);
3232 }
3233 }
3234
3235 /* Stabilize threads (move out of jump pads). */
3236 if (!non_stop)
3237 stabilize_threads ();
3238 }
3239 else
3240 {
3241 /* If we just finished a step-over, then all threads had been
3242 momentarily paused. In all-stop, that's fine, we want
3243 threads stopped by now anyway. In non-stop, we need to
3244 re-resume threads that GDB wanted to be running. */
3245 if (step_over_finished)
3246 unstop_all_lwps (1, event_child);
3247 }
3248
3249 if (extended_event_reported (&event_child->waitstatus))
3250 {
3251 /* If the reported event is a fork, vfork or exec, let GDB know. */
3252 ourstatus->kind = event_child->waitstatus.kind;
3253 ourstatus->value = event_child->waitstatus.value;
3254
3255 /* Clear the event lwp's waitstatus since we handled it already. */
3256 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3257 }
3258 else
3259 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3260
3261 /* Now that we've selected our final event LWP, un-adjust its PC if
3262 it was a software breakpoint, and the client doesn't know we can
3263 adjust the breakpoint ourselves. */
3264 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3265 && !swbreak_feature)
3266 {
3267 int decr_pc = the_low_target.decr_pc_after_break;
3268
3269 if (decr_pc != 0)
3270 {
3271 struct regcache *regcache
3272 = get_thread_regcache (current_thread, 1);
3273 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3274 }
3275 }
3276
3277 if (current_thread->last_resume_kind == resume_stop
3278 && WSTOPSIG (w) == SIGSTOP)
3279 {
3280 /* A thread that has been requested to stop by GDB with vCont;t,
3281 and it stopped cleanly, so report as SIG0. The use of
3282 SIGSTOP is an implementation detail. */
3283 ourstatus->value.sig = GDB_SIGNAL_0;
3284 }
3285 else if (current_thread->last_resume_kind == resume_stop
3286 && WSTOPSIG (w) != SIGSTOP)
3287 {
3288 /* A thread that has been requested to stop by GDB with vCont;t,
3289 but, it stopped for other reasons. */
3290 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3291 }
3292 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3293 {
3294 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3295 }
3296
3297 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3298
3299 if (debug_threads)
3300 {
3301 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3302 target_pid_to_str (ptid_of (current_thread)),
3303 ourstatus->kind, ourstatus->value.sig);
3304 debug_exit ();
3305 }
3306
3307 return ptid_of (current_thread);
3308 }
3309
3310 /* Get rid of any pending event in the pipe. */
3311 static void
3312 async_file_flush (void)
3313 {
3314 int ret;
3315 char buf;
3316
3317 do
3318 ret = read (linux_event_pipe[0], &buf, 1);
3319 while (ret >= 0 || (ret == -1 && errno == EINTR));
3320 }
3321
3322 /* Put something in the pipe, so the event loop wakes up. */
3323 static void
3324 async_file_mark (void)
3325 {
3326 int ret;
3327
3328 async_file_flush ();
3329
3330 do
3331 ret = write (linux_event_pipe[1], "+", 1);
3332 while (ret == 0 || (ret == -1 && errno == EINTR));
3333
3334 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3335 be awakened anyway. */
3336 }
3337
3338 static ptid_t
3339 linux_wait (ptid_t ptid,
3340 struct target_waitstatus *ourstatus, int target_options)
3341 {
3342 ptid_t event_ptid;
3343
3344 /* Flush the async file first. */
3345 if (target_is_async_p ())
3346 async_file_flush ();
3347
3348 do
3349 {
3350 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3351 }
3352 while ((target_options & TARGET_WNOHANG) == 0
3353 && ptid_equal (event_ptid, null_ptid)
3354 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3355
3356 /* If at least one stop was reported, there may be more. A single
3357 SIGCHLD can signal more than one child stop. */
3358 if (target_is_async_p ()
3359 && (target_options & TARGET_WNOHANG) != 0
3360 && !ptid_equal (event_ptid, null_ptid))
3361 async_file_mark ();
3362
3363 return event_ptid;
3364 }
3365
3366 /* Send a signal to an LWP. */
3367
3368 static int
3369 kill_lwp (unsigned long lwpid, int signo)
3370 {
3371 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3372 fails, then we are not using nptl threads and we should be using kill. */
3373
3374 #ifdef __NR_tkill
3375 {
3376 static int tkill_failed;
3377
3378 if (!tkill_failed)
3379 {
3380 int ret;
3381
3382 errno = 0;
3383 ret = syscall (__NR_tkill, lwpid, signo);
3384 if (errno != ENOSYS)
3385 return ret;
3386 tkill_failed = 1;
3387 }
3388 }
3389 #endif
3390
3391 return kill (lwpid, signo);
3392 }
3393
3394 void
3395 linux_stop_lwp (struct lwp_info *lwp)
3396 {
3397 send_sigstop (lwp);
3398 }
3399
3400 static void
3401 send_sigstop (struct lwp_info *lwp)
3402 {
3403 int pid;
3404
3405 pid = lwpid_of (get_lwp_thread (lwp));
3406
3407 /* If we already have a pending stop signal for this process, don't
3408 send another. */
3409 if (lwp->stop_expected)
3410 {
3411 if (debug_threads)
3412 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3413
3414 return;
3415 }
3416
3417 if (debug_threads)
3418 debug_printf ("Sending sigstop to lwp %d\n", pid);
3419
3420 lwp->stop_expected = 1;
3421 kill_lwp (pid, SIGSTOP);
3422 }
3423
3424 static int
3425 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3426 {
3427 struct thread_info *thread = (struct thread_info *) entry;
3428 struct lwp_info *lwp = get_thread_lwp (thread);
3429
3430 /* Ignore EXCEPT. */
3431 if (lwp == except)
3432 return 0;
3433
3434 if (lwp->stopped)
3435 return 0;
3436
3437 send_sigstop (lwp);
3438 return 0;
3439 }
3440
3441 /* Increment the suspend count of an LWP, and stop it, if not stopped
3442 yet. */
3443 static int
3444 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3445 void *except)
3446 {
3447 struct thread_info *thread = (struct thread_info *) entry;
3448 struct lwp_info *lwp = get_thread_lwp (thread);
3449
3450 /* Ignore EXCEPT. */
3451 if (lwp == except)
3452 return 0;
3453
3454 lwp->suspended++;
3455
3456 return send_sigstop_callback (entry, except);
3457 }
3458
3459 static void
3460 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3461 {
3462 /* It's dead, really. */
3463 lwp->dead = 1;
3464
3465 /* Store the exit status for later. */
3466 lwp->status_pending_p = 1;
3467 lwp->status_pending = wstat;
3468
3469 /* Prevent trying to stop it. */
3470 lwp->stopped = 1;
3471
3472 /* No further stops are expected from a dead lwp. */
3473 lwp->stop_expected = 0;
3474 }
3475
3476 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3477
3478 static void
3479 wait_for_sigstop (void)
3480 {
3481 struct thread_info *saved_thread;
3482 ptid_t saved_tid;
3483 int wstat;
3484 int ret;
3485
3486 saved_thread = current_thread;
3487 if (saved_thread != NULL)
3488 saved_tid = saved_thread->entry.id;
3489 else
3490 saved_tid = null_ptid; /* avoid bogus unused warning */
3491
3492 if (debug_threads)
3493 debug_printf ("wait_for_sigstop: pulling events\n");
3494
3495 /* Passing NULL_PTID as filter indicates we want all events to be
3496 left pending. Eventually this returns when there are no
3497 unwaited-for children left. */
3498 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3499 &wstat, __WALL);
3500 gdb_assert (ret == -1);
3501
3502 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3503 current_thread = saved_thread;
3504 else
3505 {
3506 if (debug_threads)
3507 debug_printf ("Previously current thread died.\n");
3508
3509 if (non_stop)
3510 {
3511 /* We can't change the current inferior behind GDB's back,
3512 otherwise, a subsequent command may apply to the wrong
3513 process. */
3514 current_thread = NULL;
3515 }
3516 else
3517 {
3518 /* Set a valid thread as current. */
3519 set_desired_thread (0);
3520 }
3521 }
3522 }
3523
3524 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3525 move it out, because we need to report the stop event to GDB. For
3526 example, if the user puts a breakpoint in the jump pad, it's
3527 because she wants to debug it. */
3528
3529 static int
3530 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3531 {
3532 struct thread_info *thread = (struct thread_info *) entry;
3533 struct lwp_info *lwp = get_thread_lwp (thread);
3534
3535 gdb_assert (lwp->suspended == 0);
3536 gdb_assert (lwp->stopped);
3537
3538 /* Allow debugging the jump pad, gdb_collect, etc.. */
3539 return (supports_fast_tracepoints ()
3540 && agent_loaded_p ()
3541 && (gdb_breakpoint_here (lwp->stop_pc)
3542 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3543 || thread->last_resume_kind == resume_step)
3544 && linux_fast_tracepoint_collecting (lwp, NULL));
3545 }
3546
3547 static void
3548 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3549 {
3550 struct thread_info *thread = (struct thread_info *) entry;
3551 struct lwp_info *lwp = get_thread_lwp (thread);
3552 int *wstat;
3553
3554 gdb_assert (lwp->suspended == 0);
3555 gdb_assert (lwp->stopped);
3556
3557 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3558
3559 /* Allow debugging the jump pad, gdb_collect, etc. */
3560 if (!gdb_breakpoint_here (lwp->stop_pc)
3561 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3562 && thread->last_resume_kind != resume_step
3563 && maybe_move_out_of_jump_pad (lwp, wstat))
3564 {
3565 if (debug_threads)
3566 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3567 lwpid_of (thread));
3568
3569 if (wstat)
3570 {
3571 lwp->status_pending_p = 0;
3572 enqueue_one_deferred_signal (lwp, wstat);
3573
3574 if (debug_threads)
3575 debug_printf ("Signal %d for LWP %ld deferred "
3576 "(in jump pad)\n",
3577 WSTOPSIG (*wstat), lwpid_of (thread));
3578 }
3579
3580 linux_resume_one_lwp (lwp, 0, 0, NULL);
3581 }
3582 else
3583 lwp->suspended++;
3584 }
3585
3586 static int
3587 lwp_running (struct inferior_list_entry *entry, void *data)
3588 {
3589 struct thread_info *thread = (struct thread_info *) entry;
3590 struct lwp_info *lwp = get_thread_lwp (thread);
3591
3592 if (lwp->dead)
3593 return 0;
3594 if (lwp->stopped)
3595 return 0;
3596 return 1;
3597 }
3598
3599 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3600 If SUSPEND, then also increase the suspend count of every LWP,
3601 except EXCEPT. */
3602
3603 static void
3604 stop_all_lwps (int suspend, struct lwp_info *except)
3605 {
3606 /* Should not be called recursively. */
3607 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3608
3609 if (debug_threads)
3610 {
3611 debug_enter ();
3612 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3613 suspend ? "stop-and-suspend" : "stop",
3614 except != NULL
3615 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3616 : "none");
3617 }
3618
3619 stopping_threads = (suspend
3620 ? STOPPING_AND_SUSPENDING_THREADS
3621 : STOPPING_THREADS);
3622
3623 if (suspend)
3624 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3625 else
3626 find_inferior (&all_threads, send_sigstop_callback, except);
3627 wait_for_sigstop ();
3628 stopping_threads = NOT_STOPPING_THREADS;
3629
3630 if (debug_threads)
3631 {
3632 debug_printf ("stop_all_lwps done, setting stopping_threads "
3633 "back to !stopping\n");
3634 debug_exit ();
3635 }
3636 }
3637
3638 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3639 SIGNAL is nonzero, give it that signal. */
3640
3641 static void
3642 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3643 int step, int signal, siginfo_t *info)
3644 {
3645 struct thread_info *thread = get_lwp_thread (lwp);
3646 struct thread_info *saved_thread;
3647 int fast_tp_collecting;
3648
3649 if (lwp->stopped == 0)
3650 return;
3651
3652 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3653
3654 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3655
3656 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3657 user used the "jump" command, or "set $pc = foo"). */
3658 if (lwp->stop_pc != get_pc (lwp))
3659 {
3660 /* Collecting 'while-stepping' actions doesn't make sense
3661 anymore. */
3662 release_while_stepping_state_list (thread);
3663 }
3664
3665 /* If we have pending signals or status, and a new signal, enqueue the
3666 signal. Also enqueue the signal if we are waiting to reinsert a
3667 breakpoint; it will be picked up again below. */
3668 if (signal != 0
3669 && (lwp->status_pending_p
3670 || lwp->pending_signals != NULL
3671 || lwp->bp_reinsert != 0
3672 || fast_tp_collecting))
3673 {
3674 struct pending_signals *p_sig;
3675 p_sig = xmalloc (sizeof (*p_sig));
3676 p_sig->prev = lwp->pending_signals;
3677 p_sig->signal = signal;
3678 if (info == NULL)
3679 memset (&p_sig->info, 0, sizeof (siginfo_t));
3680 else
3681 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3682 lwp->pending_signals = p_sig;
3683 }
3684
3685 if (lwp->status_pending_p)
3686 {
3687 if (debug_threads)
3688 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3689 " has pending status\n",
3690 lwpid_of (thread), step ? "step" : "continue", signal,
3691 lwp->stop_expected ? "expected" : "not expected");
3692 return;
3693 }
3694
3695 saved_thread = current_thread;
3696 current_thread = thread;
3697
3698 if (debug_threads)
3699 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3700 lwpid_of (thread), step ? "step" : "continue", signal,
3701 lwp->stop_expected ? "expected" : "not expected");
3702
3703 /* This bit needs some thinking about. If we get a signal that
3704 we must report while a single-step reinsert is still pending,
3705 we often end up resuming the thread. It might be better to
3706 (ew) allow a stack of pending events; then we could be sure that
3707 the reinsert happened right away and not lose any signals.
3708
3709 Making this stack would also shrink the window in which breakpoints are
3710 uninserted (see comment in linux_wait_for_lwp) but not enough for
3711 complete correctness, so it won't solve that problem. It may be
3712 worthwhile just to solve this one, however. */
3713 if (lwp->bp_reinsert != 0)
3714 {
3715 if (debug_threads)
3716 debug_printf (" pending reinsert at 0x%s\n",
3717 paddress (lwp->bp_reinsert));
3718
3719 if (can_hardware_single_step ())
3720 {
3721 if (fast_tp_collecting == 0)
3722 {
3723 if (step == 0)
3724 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3725 if (lwp->suspended)
3726 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3727 lwp->suspended);
3728 }
3729
3730 step = 1;
3731 }
3732
3733 /* Postpone any pending signal. It was enqueued above. */
3734 signal = 0;
3735 }
3736
3737 if (fast_tp_collecting == 1)
3738 {
3739 if (debug_threads)
3740 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3741 " (exit-jump-pad-bkpt)\n",
3742 lwpid_of (thread));
3743
3744 /* Postpone any pending signal. It was enqueued above. */
3745 signal = 0;
3746 }
3747 else if (fast_tp_collecting == 2)
3748 {
3749 if (debug_threads)
3750 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3751 " single-stepping\n",
3752 lwpid_of (thread));
3753
3754 if (can_hardware_single_step ())
3755 step = 1;
3756 else
3757 {
3758 internal_error (__FILE__, __LINE__,
3759 "moving out of jump pad single-stepping"
3760 " not implemented on this target");
3761 }
3762
3763 /* Postpone any pending signal. It was enqueued above. */
3764 signal = 0;
3765 }
3766
3767 /* If we have while-stepping actions in this thread set it stepping.
3768 If we have a signal to deliver, it may or may not be set to
3769 SIG_IGN, we don't know. Assume so, and allow collecting
3770 while-stepping into a signal handler. A possible smart thing to
3771 do would be to set an internal breakpoint at the signal return
3772 address, continue, and carry on catching this while-stepping
3773 action only when that breakpoint is hit. A future
3774 enhancement. */
3775 if (thread->while_stepping != NULL
3776 && can_hardware_single_step ())
3777 {
3778 if (debug_threads)
3779 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3780 lwpid_of (thread));
3781 step = 1;
3782 }
3783
3784 if (the_low_target.get_pc != NULL)
3785 {
3786 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3787
3788 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3789
3790 if (debug_threads)
3791 {
3792 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3793 (long) lwp->stop_pc);
3794 }
3795 }
3796
3797 /* If we have pending signals, consume one unless we are trying to
3798 reinsert a breakpoint or we're trying to finish a fast tracepoint
3799 collect. */
3800 if (lwp->pending_signals != NULL
3801 && lwp->bp_reinsert == 0
3802 && fast_tp_collecting == 0)
3803 {
3804 struct pending_signals **p_sig;
3805
3806 p_sig = &lwp->pending_signals;
3807 while ((*p_sig)->prev != NULL)
3808 p_sig = &(*p_sig)->prev;
3809
3810 signal = (*p_sig)->signal;
3811 if ((*p_sig)->info.si_signo != 0)
3812 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3813 &(*p_sig)->info);
3814
3815 free (*p_sig);
3816 *p_sig = NULL;
3817 }
3818
3819 if (the_low_target.prepare_to_resume != NULL)
3820 the_low_target.prepare_to_resume (lwp);
3821
3822 regcache_invalidate_thread (thread);
3823 errno = 0;
3824 lwp->stepping = step;
3825 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3826 (PTRACE_TYPE_ARG3) 0,
3827 /* Coerce to a uintptr_t first to avoid potential gcc warning
3828 of coercing an 8 byte integer to a 4 byte pointer. */
3829 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3830
3831 current_thread = saved_thread;
3832 if (errno)
3833 perror_with_name ("resuming thread");
3834
3835 /* Successfully resumed. Clear state that no longer makes sense,
3836 and mark the LWP as running. Must not do this before resuming
3837 otherwise if that fails other code will be confused. E.g., we'd
3838 later try to stop the LWP and hang forever waiting for a stop
3839 status. Note that we must not throw after this is cleared,
3840 otherwise handle_zombie_lwp_error would get confused. */
3841 lwp->stopped = 0;
3842 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3843 }
3844
3845 /* Called when we try to resume a stopped LWP and that errors out. If
3846 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3847 or about to become), discard the error, clear any pending status
3848 the LWP may have, and return true (we'll collect the exit status
3849 soon enough). Otherwise, return false. */
3850
3851 static int
3852 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3853 {
3854 struct thread_info *thread = get_lwp_thread (lp);
3855
3856 /* If we get an error after resuming the LWP successfully, we'd
3857 confuse !T state for the LWP being gone. */
3858 gdb_assert (lp->stopped);
3859
3860 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3861 because even if ptrace failed with ESRCH, the tracee may be "not
3862 yet fully dead", but already refusing ptrace requests. In that
3863 case the tracee has 'R (Running)' state for a little bit
3864 (observed in Linux 3.18). See also the note on ESRCH in the
3865 ptrace(2) man page. Instead, check whether the LWP has any state
3866 other than ptrace-stopped. */
3867
3868 /* Don't assume anything if /proc/PID/status can't be read. */
3869 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3870 {
3871 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3872 lp->status_pending_p = 0;
3873 return 1;
3874 }
3875 return 0;
3876 }
3877
3878 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3879 disappears while we try to resume it. */
3880
3881 static void
3882 linux_resume_one_lwp (struct lwp_info *lwp,
3883 int step, int signal, siginfo_t *info)
3884 {
3885 TRY
3886 {
3887 linux_resume_one_lwp_throw (lwp, step, signal, info);
3888 }
3889 CATCH (ex, RETURN_MASK_ERROR)
3890 {
3891 if (!check_ptrace_stopped_lwp_gone (lwp))
3892 throw_exception (ex);
3893 }
3894 END_CATCH
3895 }
3896
3897 struct thread_resume_array
3898 {
3899 struct thread_resume *resume;
3900 size_t n;
3901 };
3902
3903 /* This function is called once per thread via find_inferior.
3904 ARG is a pointer to a thread_resume_array struct.
3905 We look up the thread specified by ENTRY in ARG, and mark the thread
3906 with a pointer to the appropriate resume request.
3907
3908 This algorithm is O(threads * resume elements), but resume elements
3909 is small (and will remain small at least until GDB supports thread
3910 suspension). */
3911
3912 static int
3913 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3914 {
3915 struct thread_info *thread = (struct thread_info *) entry;
3916 struct lwp_info *lwp = get_thread_lwp (thread);
3917 int ndx;
3918 struct thread_resume_array *r;
3919
3920 r = arg;
3921
3922 for (ndx = 0; ndx < r->n; ndx++)
3923 {
3924 ptid_t ptid = r->resume[ndx].thread;
3925 if (ptid_equal (ptid, minus_one_ptid)
3926 || ptid_equal (ptid, entry->id)
3927 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3928 of PID'. */
3929 || (ptid_get_pid (ptid) == pid_of (thread)
3930 && (ptid_is_pid (ptid)
3931 || ptid_get_lwp (ptid) == -1)))
3932 {
3933 if (r->resume[ndx].kind == resume_stop
3934 && thread->last_resume_kind == resume_stop)
3935 {
3936 if (debug_threads)
3937 debug_printf ("already %s LWP %ld at GDB's request\n",
3938 (thread->last_status.kind
3939 == TARGET_WAITKIND_STOPPED)
3940 ? "stopped"
3941 : "stopping",
3942 lwpid_of (thread));
3943
3944 continue;
3945 }
3946
3947 lwp->resume = &r->resume[ndx];
3948 thread->last_resume_kind = lwp->resume->kind;
3949
3950 lwp->step_range_start = lwp->resume->step_range_start;
3951 lwp->step_range_end = lwp->resume->step_range_end;
3952
3953 /* If we had a deferred signal to report, dequeue one now.
3954 This can happen if LWP gets more than one signal while
3955 trying to get out of a jump pad. */
3956 if (lwp->stopped
3957 && !lwp->status_pending_p
3958 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3959 {
3960 lwp->status_pending_p = 1;
3961
3962 if (debug_threads)
3963 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3964 "leaving status pending.\n",
3965 WSTOPSIG (lwp->status_pending),
3966 lwpid_of (thread));
3967 }
3968
3969 return 0;
3970 }
3971 }
3972
3973 /* No resume action for this thread. */
3974 lwp->resume = NULL;
3975
3976 return 0;
3977 }
3978
3979 /* find_inferior callback for linux_resume.
3980 Set *FLAG_P if this lwp has an interesting status pending. */
3981
3982 static int
3983 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3984 {
3985 struct thread_info *thread = (struct thread_info *) entry;
3986 struct lwp_info *lwp = get_thread_lwp (thread);
3987
3988 /* LWPs which will not be resumed are not interesting, because
3989 we might not wait for them next time through linux_wait. */
3990 if (lwp->resume == NULL)
3991 return 0;
3992
3993 if (thread_still_has_status_pending_p (thread))
3994 * (int *) flag_p = 1;
3995
3996 return 0;
3997 }
3998
3999 /* Return 1 if this lwp that GDB wants running is stopped at an
4000 internal breakpoint that we need to step over. It assumes that any
4001 required STOP_PC adjustment has already been propagated to the
4002 inferior's regcache. */
4003
4004 static int
4005 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4006 {
4007 struct thread_info *thread = (struct thread_info *) entry;
4008 struct lwp_info *lwp = get_thread_lwp (thread);
4009 struct thread_info *saved_thread;
4010 CORE_ADDR pc;
4011
4012 /* LWPs which will not be resumed are not interesting, because we
4013 might not wait for them next time through linux_wait. */
4014
4015 if (!lwp->stopped)
4016 {
4017 if (debug_threads)
4018 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4019 lwpid_of (thread));
4020 return 0;
4021 }
4022
4023 if (thread->last_resume_kind == resume_stop)
4024 {
4025 if (debug_threads)
4026 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4027 " stopped\n",
4028 lwpid_of (thread));
4029 return 0;
4030 }
4031
4032 gdb_assert (lwp->suspended >= 0);
4033
4034 if (lwp->suspended)
4035 {
4036 if (debug_threads)
4037 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4038 lwpid_of (thread));
4039 return 0;
4040 }
4041
4042 if (!lwp->need_step_over)
4043 {
4044 if (debug_threads)
4045 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4046 }
4047
4048 if (lwp->status_pending_p)
4049 {
4050 if (debug_threads)
4051 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4052 " status.\n",
4053 lwpid_of (thread));
4054 return 0;
4055 }
4056
4057 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4058 or we have. */
4059 pc = get_pc (lwp);
4060
4061 /* If the PC has changed since we stopped, then don't do anything,
4062 and let the breakpoint/tracepoint be hit. This happens if, for
4063 instance, GDB handled the decr_pc_after_break subtraction itself,
4064 GDB is OOL stepping this thread, or the user has issued a "jump"
4065 command, or poked thread's registers herself. */
4066 if (pc != lwp->stop_pc)
4067 {
4068 if (debug_threads)
4069 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4070 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4071 lwpid_of (thread),
4072 paddress (lwp->stop_pc), paddress (pc));
4073
4074 lwp->need_step_over = 0;
4075 return 0;
4076 }
4077
4078 saved_thread = current_thread;
4079 current_thread = thread;
4080
4081 /* We can only step over breakpoints we know about. */
4082 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4083 {
4084 /* Don't step over a breakpoint that GDB expects to hit
4085 though. If the condition is being evaluated on the target's side
4086 and it evaluate to false, step over this breakpoint as well. */
4087 if (gdb_breakpoint_here (pc)
4088 && gdb_condition_true_at_breakpoint (pc)
4089 && gdb_no_commands_at_breakpoint (pc))
4090 {
4091 if (debug_threads)
4092 debug_printf ("Need step over [LWP %ld]? yes, but found"
4093 " GDB breakpoint at 0x%s; skipping step over\n",
4094 lwpid_of (thread), paddress (pc));
4095
4096 current_thread = saved_thread;
4097 return 0;
4098 }
4099 else
4100 {
4101 if (debug_threads)
4102 debug_printf ("Need step over [LWP %ld]? yes, "
4103 "found breakpoint at 0x%s\n",
4104 lwpid_of (thread), paddress (pc));
4105
4106 /* We've found an lwp that needs stepping over --- return 1 so
4107 that find_inferior stops looking. */
4108 current_thread = saved_thread;
4109
4110 /* If the step over is cancelled, this is set again. */
4111 lwp->need_step_over = 0;
4112 return 1;
4113 }
4114 }
4115
4116 current_thread = saved_thread;
4117
4118 if (debug_threads)
4119 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4120 " at 0x%s\n",
4121 lwpid_of (thread), paddress (pc));
4122
4123 return 0;
4124 }
4125
4126 /* Start a step-over operation on LWP. When LWP stopped at a
4127 breakpoint, to make progress, we need to remove the breakpoint out
4128 of the way. If we let other threads run while we do that, they may
4129 pass by the breakpoint location and miss hitting it. To avoid
4130 that, a step-over momentarily stops all threads while LWP is
4131 single-stepped while the breakpoint is temporarily uninserted from
4132 the inferior. When the single-step finishes, we reinsert the
4133 breakpoint, and let all threads that are supposed to be running,
4134 run again.
4135
4136 On targets that don't support hardware single-step, we don't
4137 currently support full software single-stepping. Instead, we only
4138 support stepping over the thread event breakpoint, by asking the
4139 low target where to place a reinsert breakpoint. Since this
4140 routine assumes the breakpoint being stepped over is a thread event
4141 breakpoint, it usually assumes the return address of the current
4142 function is a good enough place to set the reinsert breakpoint. */
4143
4144 static int
4145 start_step_over (struct lwp_info *lwp)
4146 {
4147 struct thread_info *thread = get_lwp_thread (lwp);
4148 struct thread_info *saved_thread;
4149 CORE_ADDR pc;
4150 int step;
4151
4152 if (debug_threads)
4153 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4154 lwpid_of (thread));
4155
4156 stop_all_lwps (1, lwp);
4157 gdb_assert (lwp->suspended == 0);
4158
4159 if (debug_threads)
4160 debug_printf ("Done stopping all threads for step-over.\n");
4161
4162 /* Note, we should always reach here with an already adjusted PC,
4163 either by GDB (if we're resuming due to GDB's request), or by our
4164 caller, if we just finished handling an internal breakpoint GDB
4165 shouldn't care about. */
4166 pc = get_pc (lwp);
4167
4168 saved_thread = current_thread;
4169 current_thread = thread;
4170
4171 lwp->bp_reinsert = pc;
4172 uninsert_breakpoints_at (pc);
4173 uninsert_fast_tracepoint_jumps_at (pc);
4174
4175 if (can_hardware_single_step ())
4176 {
4177 step = 1;
4178 }
4179 else
4180 {
4181 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4182 set_reinsert_breakpoint (raddr);
4183 step = 0;
4184 }
4185
4186 current_thread = saved_thread;
4187
4188 linux_resume_one_lwp (lwp, step, 0, NULL);
4189
4190 /* Require next event from this LWP. */
4191 step_over_bkpt = thread->entry.id;
4192 return 1;
4193 }
4194
4195 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4196 start_step_over, if still there, and delete any reinsert
4197 breakpoints we've set, on non hardware single-step targets. */
4198
4199 static int
4200 finish_step_over (struct lwp_info *lwp)
4201 {
4202 if (lwp->bp_reinsert != 0)
4203 {
4204 if (debug_threads)
4205 debug_printf ("Finished step over.\n");
4206
4207 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4208 may be no breakpoint to reinsert there by now. */
4209 reinsert_breakpoints_at (lwp->bp_reinsert);
4210 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4211
4212 lwp->bp_reinsert = 0;
4213
4214 /* Delete any software-single-step reinsert breakpoints. No
4215 longer needed. We don't have to worry about other threads
4216 hitting this trap, and later not being able to explain it,
4217 because we were stepping over a breakpoint, and we hold all
4218 threads but LWP stopped while doing that. */
4219 if (!can_hardware_single_step ())
4220 delete_reinsert_breakpoints ();
4221
4222 step_over_bkpt = null_ptid;
4223 return 1;
4224 }
4225 else
4226 return 0;
4227 }
4228
4229 /* This function is called once per thread. We check the thread's resume
4230 request, which will tell us whether to resume, step, or leave the thread
4231 stopped; and what signal, if any, it should be sent.
4232
4233 For threads which we aren't explicitly told otherwise, we preserve
4234 the stepping flag; this is used for stepping over gdbserver-placed
4235 breakpoints.
4236
4237 If pending_flags was set in any thread, we queue any needed
4238 signals, since we won't actually resume. We already have a pending
4239 event to report, so we don't need to preserve any step requests;
4240 they should be re-issued if necessary. */
4241
4242 static int
4243 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4244 {
4245 struct thread_info *thread = (struct thread_info *) entry;
4246 struct lwp_info *lwp = get_thread_lwp (thread);
4247 int step;
4248 int leave_all_stopped = * (int *) arg;
4249 int leave_pending;
4250
4251 if (lwp->resume == NULL)
4252 return 0;
4253
4254 if (lwp->resume->kind == resume_stop)
4255 {
4256 if (debug_threads)
4257 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4258
4259 if (!lwp->stopped)
4260 {
4261 if (debug_threads)
4262 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4263
4264 /* Stop the thread, and wait for the event asynchronously,
4265 through the event loop. */
4266 send_sigstop (lwp);
4267 }
4268 else
4269 {
4270 if (debug_threads)
4271 debug_printf ("already stopped LWP %ld\n",
4272 lwpid_of (thread));
4273
4274 /* The LWP may have been stopped in an internal event that
4275 was not meant to be notified back to GDB (e.g., gdbserver
4276 breakpoint), so we should be reporting a stop event in
4277 this case too. */
4278
4279 /* If the thread already has a pending SIGSTOP, this is a
4280 no-op. Otherwise, something later will presumably resume
4281 the thread and this will cause it to cancel any pending
4282 operation, due to last_resume_kind == resume_stop. If
4283 the thread already has a pending status to report, we
4284 will still report it the next time we wait - see
4285 status_pending_p_callback. */
4286
4287 /* If we already have a pending signal to report, then
4288 there's no need to queue a SIGSTOP, as this means we're
4289 midway through moving the LWP out of the jumppad, and we
4290 will report the pending signal as soon as that is
4291 finished. */
4292 if (lwp->pending_signals_to_report == NULL)
4293 send_sigstop (lwp);
4294 }
4295
4296 /* For stop requests, we're done. */
4297 lwp->resume = NULL;
4298 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4299 return 0;
4300 }
4301
4302 /* If this thread which is about to be resumed has a pending status,
4303 then don't resume any threads - we can just report the pending
4304 status. Make sure to queue any signals that would otherwise be
4305 sent. In all-stop mode, we do this decision based on if *any*
4306 thread has a pending status. If there's a thread that needs the
4307 step-over-breakpoint dance, then don't resume any other thread
4308 but that particular one. */
4309 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4310
4311 if (!leave_pending)
4312 {
4313 if (debug_threads)
4314 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4315
4316 step = (lwp->resume->kind == resume_step);
4317 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4318 }
4319 else
4320 {
4321 if (debug_threads)
4322 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4323
4324 /* If we have a new signal, enqueue the signal. */
4325 if (lwp->resume->sig != 0)
4326 {
4327 struct pending_signals *p_sig;
4328 p_sig = xmalloc (sizeof (*p_sig));
4329 p_sig->prev = lwp->pending_signals;
4330 p_sig->signal = lwp->resume->sig;
4331 memset (&p_sig->info, 0, sizeof (siginfo_t));
4332
4333 /* If this is the same signal we were previously stopped by,
4334 make sure to queue its siginfo. We can ignore the return
4335 value of ptrace; if it fails, we'll skip
4336 PTRACE_SETSIGINFO. */
4337 if (WIFSTOPPED (lwp->last_status)
4338 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4339 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4340 &p_sig->info);
4341
4342 lwp->pending_signals = p_sig;
4343 }
4344 }
4345
4346 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4347 lwp->resume = NULL;
4348 return 0;
4349 }
4350
4351 static void
4352 linux_resume (struct thread_resume *resume_info, size_t n)
4353 {
4354 struct thread_resume_array array = { resume_info, n };
4355 struct thread_info *need_step_over = NULL;
4356 int any_pending;
4357 int leave_all_stopped;
4358
4359 if (debug_threads)
4360 {
4361 debug_enter ();
4362 debug_printf ("linux_resume:\n");
4363 }
4364
4365 find_inferior (&all_threads, linux_set_resume_request, &array);
4366
4367 /* If there is a thread which would otherwise be resumed, which has
4368 a pending status, then don't resume any threads - we can just
4369 report the pending status. Make sure to queue any signals that
4370 would otherwise be sent. In non-stop mode, we'll apply this
4371 logic to each thread individually. We consume all pending events
4372 before considering to start a step-over (in all-stop). */
4373 any_pending = 0;
4374 if (!non_stop)
4375 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4376
4377 /* If there is a thread which would otherwise be resumed, which is
4378 stopped at a breakpoint that needs stepping over, then don't
4379 resume any threads - have it step over the breakpoint with all
4380 other threads stopped, then resume all threads again. Make sure
4381 to queue any signals that would otherwise be delivered or
4382 queued. */
4383 if (!any_pending && supports_breakpoints ())
4384 need_step_over
4385 = (struct thread_info *) find_inferior (&all_threads,
4386 need_step_over_p, NULL);
4387
4388 leave_all_stopped = (need_step_over != NULL || any_pending);
4389
4390 if (debug_threads)
4391 {
4392 if (need_step_over != NULL)
4393 debug_printf ("Not resuming all, need step over\n");
4394 else if (any_pending)
4395 debug_printf ("Not resuming, all-stop and found "
4396 "an LWP with pending status\n");
4397 else
4398 debug_printf ("Resuming, no pending status or step over needed\n");
4399 }
4400
4401 /* Even if we're leaving threads stopped, queue all signals we'd
4402 otherwise deliver. */
4403 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4404
4405 if (need_step_over)
4406 start_step_over (get_thread_lwp (need_step_over));
4407
4408 if (debug_threads)
4409 {
4410 debug_printf ("linux_resume done\n");
4411 debug_exit ();
4412 }
4413 }
4414
4415 /* This function is called once per thread. We check the thread's
4416 last resume request, which will tell us whether to resume, step, or
4417 leave the thread stopped. Any signal the client requested to be
4418 delivered has already been enqueued at this point.
4419
4420 If any thread that GDB wants running is stopped at an internal
4421 breakpoint that needs stepping over, we start a step-over operation
4422 on that particular thread, and leave all others stopped. */
4423
4424 static int
4425 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4426 {
4427 struct thread_info *thread = (struct thread_info *) entry;
4428 struct lwp_info *lwp = get_thread_lwp (thread);
4429 int step;
4430
4431 if (lwp == except)
4432 return 0;
4433
4434 if (debug_threads)
4435 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4436
4437 if (!lwp->stopped)
4438 {
4439 if (debug_threads)
4440 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4441 return 0;
4442 }
4443
4444 if (thread->last_resume_kind == resume_stop
4445 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4446 {
4447 if (debug_threads)
4448 debug_printf (" client wants LWP to remain %ld stopped\n",
4449 lwpid_of (thread));
4450 return 0;
4451 }
4452
4453 if (lwp->status_pending_p)
4454 {
4455 if (debug_threads)
4456 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4457 lwpid_of (thread));
4458 return 0;
4459 }
4460
4461 gdb_assert (lwp->suspended >= 0);
4462
4463 if (lwp->suspended)
4464 {
4465 if (debug_threads)
4466 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4467 return 0;
4468 }
4469
4470 if (thread->last_resume_kind == resume_stop
4471 && lwp->pending_signals_to_report == NULL
4472 && lwp->collecting_fast_tracepoint == 0)
4473 {
4474 /* We haven't reported this LWP as stopped yet (otherwise, the
4475 last_status.kind check above would catch it, and we wouldn't
4476 reach here. This LWP may have been momentarily paused by a
4477 stop_all_lwps call while handling for example, another LWP's
4478 step-over. In that case, the pending expected SIGSTOP signal
4479 that was queued at vCont;t handling time will have already
4480 been consumed by wait_for_sigstop, and so we need to requeue
4481 another one here. Note that if the LWP already has a SIGSTOP
4482 pending, this is a no-op. */
4483
4484 if (debug_threads)
4485 debug_printf ("Client wants LWP %ld to stop. "
4486 "Making sure it has a SIGSTOP pending\n",
4487 lwpid_of (thread));
4488
4489 send_sigstop (lwp);
4490 }
4491
4492 step = thread->last_resume_kind == resume_step;
4493 linux_resume_one_lwp (lwp, step, 0, NULL);
4494 return 0;
4495 }
4496
4497 static int
4498 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4499 {
4500 struct thread_info *thread = (struct thread_info *) entry;
4501 struct lwp_info *lwp = get_thread_lwp (thread);
4502
4503 if (lwp == except)
4504 return 0;
4505
4506 lwp->suspended--;
4507 gdb_assert (lwp->suspended >= 0);
4508
4509 return proceed_one_lwp (entry, except);
4510 }
4511
4512 /* When we finish a step-over, set threads running again. If there's
4513 another thread that may need a step-over, now's the time to start
4514 it. Eventually, we'll move all threads past their breakpoints. */
4515
4516 static void
4517 proceed_all_lwps (void)
4518 {
4519 struct thread_info *need_step_over;
4520
4521 /* If there is a thread which would otherwise be resumed, which is
4522 stopped at a breakpoint that needs stepping over, then don't
4523 resume any threads - have it step over the breakpoint with all
4524 other threads stopped, then resume all threads again. */
4525
4526 if (supports_breakpoints ())
4527 {
4528 need_step_over
4529 = (struct thread_info *) find_inferior (&all_threads,
4530 need_step_over_p, NULL);
4531
4532 if (need_step_over != NULL)
4533 {
4534 if (debug_threads)
4535 debug_printf ("proceed_all_lwps: found "
4536 "thread %ld needing a step-over\n",
4537 lwpid_of (need_step_over));
4538
4539 start_step_over (get_thread_lwp (need_step_over));
4540 return;
4541 }
4542 }
4543
4544 if (debug_threads)
4545 debug_printf ("Proceeding, no step-over needed\n");
4546
4547 find_inferior (&all_threads, proceed_one_lwp, NULL);
4548 }
4549
4550 /* Stopped LWPs that the client wanted to be running, that don't have
4551 pending statuses, are set to run again, except for EXCEPT, if not
4552 NULL. This undoes a stop_all_lwps call. */
4553
4554 static void
4555 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4556 {
4557 if (debug_threads)
4558 {
4559 debug_enter ();
4560 if (except)
4561 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4562 lwpid_of (get_lwp_thread (except)));
4563 else
4564 debug_printf ("unstopping all lwps\n");
4565 }
4566
4567 if (unsuspend)
4568 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4569 else
4570 find_inferior (&all_threads, proceed_one_lwp, except);
4571
4572 if (debug_threads)
4573 {
4574 debug_printf ("unstop_all_lwps done\n");
4575 debug_exit ();
4576 }
4577 }
4578
4579
4580 #ifdef HAVE_LINUX_REGSETS
4581
4582 #define use_linux_regsets 1
4583
4584 /* Returns true if REGSET has been disabled. */
4585
4586 static int
4587 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4588 {
4589 return (info->disabled_regsets != NULL
4590 && info->disabled_regsets[regset - info->regsets]);
4591 }
4592
4593 /* Disable REGSET. */
4594
4595 static void
4596 disable_regset (struct regsets_info *info, struct regset_info *regset)
4597 {
4598 int dr_offset;
4599
4600 dr_offset = regset - info->regsets;
4601 if (info->disabled_regsets == NULL)
4602 info->disabled_regsets = xcalloc (1, info->num_regsets);
4603 info->disabled_regsets[dr_offset] = 1;
4604 }
4605
4606 static int
4607 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4608 struct regcache *regcache)
4609 {
4610 struct regset_info *regset;
4611 int saw_general_regs = 0;
4612 int pid;
4613 struct iovec iov;
4614
4615 pid = lwpid_of (current_thread);
4616 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4617 {
4618 void *buf, *data;
4619 int nt_type, res;
4620
4621 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4622 continue;
4623
4624 buf = xmalloc (regset->size);
4625
4626 nt_type = regset->nt_type;
4627 if (nt_type)
4628 {
4629 iov.iov_base = buf;
4630 iov.iov_len = regset->size;
4631 data = (void *) &iov;
4632 }
4633 else
4634 data = buf;
4635
4636 #ifndef __sparc__
4637 res = ptrace (regset->get_request, pid,
4638 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4639 #else
4640 res = ptrace (regset->get_request, pid, data, nt_type);
4641 #endif
4642 if (res < 0)
4643 {
4644 if (errno == EIO)
4645 {
4646 /* If we get EIO on a regset, do not try it again for
4647 this process mode. */
4648 disable_regset (regsets_info, regset);
4649 }
4650 else if (errno == ENODATA)
4651 {
4652 /* ENODATA may be returned if the regset is currently
4653 not "active". This can happen in normal operation,
4654 so suppress the warning in this case. */
4655 }
4656 else
4657 {
4658 char s[256];
4659 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4660 pid);
4661 perror (s);
4662 }
4663 }
4664 else
4665 {
4666 if (regset->type == GENERAL_REGS)
4667 saw_general_regs = 1;
4668 regset->store_function (regcache, buf);
4669 }
4670 free (buf);
4671 }
4672 if (saw_general_regs)
4673 return 0;
4674 else
4675 return 1;
4676 }
4677
4678 static int
4679 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4680 struct regcache *regcache)
4681 {
4682 struct regset_info *regset;
4683 int saw_general_regs = 0;
4684 int pid;
4685 struct iovec iov;
4686
4687 pid = lwpid_of (current_thread);
4688 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4689 {
4690 void *buf, *data;
4691 int nt_type, res;
4692
4693 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4694 || regset->fill_function == NULL)
4695 continue;
4696
4697 buf = xmalloc (regset->size);
4698
4699 /* First fill the buffer with the current register set contents,
4700 in case there are any items in the kernel's regset that are
4701 not in gdbserver's regcache. */
4702
4703 nt_type = regset->nt_type;
4704 if (nt_type)
4705 {
4706 iov.iov_base = buf;
4707 iov.iov_len = regset->size;
4708 data = (void *) &iov;
4709 }
4710 else
4711 data = buf;
4712
4713 #ifndef __sparc__
4714 res = ptrace (regset->get_request, pid,
4715 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4716 #else
4717 res = ptrace (regset->get_request, pid, data, nt_type);
4718 #endif
4719
4720 if (res == 0)
4721 {
4722 /* Then overlay our cached registers on that. */
4723 regset->fill_function (regcache, buf);
4724
4725 /* Only now do we write the register set. */
4726 #ifndef __sparc__
4727 res = ptrace (regset->set_request, pid,
4728 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4729 #else
4730 res = ptrace (regset->set_request, pid, data, nt_type);
4731 #endif
4732 }
4733
4734 if (res < 0)
4735 {
4736 if (errno == EIO)
4737 {
4738 /* If we get EIO on a regset, do not try it again for
4739 this process mode. */
4740 disable_regset (regsets_info, regset);
4741 }
4742 else if (errno == ESRCH)
4743 {
4744 /* At this point, ESRCH should mean the process is
4745 already gone, in which case we simply ignore attempts
4746 to change its registers. See also the related
4747 comment in linux_resume_one_lwp. */
4748 free (buf);
4749 return 0;
4750 }
4751 else
4752 {
4753 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4754 }
4755 }
4756 else if (regset->type == GENERAL_REGS)
4757 saw_general_regs = 1;
4758 free (buf);
4759 }
4760 if (saw_general_regs)
4761 return 0;
4762 else
4763 return 1;
4764 }
4765
4766 #else /* !HAVE_LINUX_REGSETS */
4767
4768 #define use_linux_regsets 0
4769 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4770 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4771
4772 #endif
4773
4774 /* Return 1 if register REGNO is supported by one of the regset ptrace
4775 calls or 0 if it has to be transferred individually. */
4776
4777 static int
4778 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4779 {
4780 unsigned char mask = 1 << (regno % 8);
4781 size_t index = regno / 8;
4782
4783 return (use_linux_regsets
4784 && (regs_info->regset_bitmap == NULL
4785 || (regs_info->regset_bitmap[index] & mask) != 0));
4786 }
4787
4788 #ifdef HAVE_LINUX_USRREGS
4789
4790 int
4791 register_addr (const struct usrregs_info *usrregs, int regnum)
4792 {
4793 int addr;
4794
4795 if (regnum < 0 || regnum >= usrregs->num_regs)
4796 error ("Invalid register number %d.", regnum);
4797
4798 addr = usrregs->regmap[regnum];
4799
4800 return addr;
4801 }
4802
4803 /* Fetch one register. */
4804 static void
4805 fetch_register (const struct usrregs_info *usrregs,
4806 struct regcache *regcache, int regno)
4807 {
4808 CORE_ADDR regaddr;
4809 int i, size;
4810 char *buf;
4811 int pid;
4812
4813 if (regno >= usrregs->num_regs)
4814 return;
4815 if ((*the_low_target.cannot_fetch_register) (regno))
4816 return;
4817
4818 regaddr = register_addr (usrregs, regno);
4819 if (regaddr == -1)
4820 return;
4821
4822 size = ((register_size (regcache->tdesc, regno)
4823 + sizeof (PTRACE_XFER_TYPE) - 1)
4824 & -sizeof (PTRACE_XFER_TYPE));
4825 buf = alloca (size);
4826
4827 pid = lwpid_of (current_thread);
4828 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4829 {
4830 errno = 0;
4831 *(PTRACE_XFER_TYPE *) (buf + i) =
4832 ptrace (PTRACE_PEEKUSER, pid,
4833 /* Coerce to a uintptr_t first to avoid potential gcc warning
4834 of coercing an 8 byte integer to a 4 byte pointer. */
4835 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4836 regaddr += sizeof (PTRACE_XFER_TYPE);
4837 if (errno != 0)
4838 error ("reading register %d: %s", regno, strerror (errno));
4839 }
4840
4841 if (the_low_target.supply_ptrace_register)
4842 the_low_target.supply_ptrace_register (regcache, regno, buf);
4843 else
4844 supply_register (regcache, regno, buf);
4845 }
4846
4847 /* Store one register. */
4848 static void
4849 store_register (const struct usrregs_info *usrregs,
4850 struct regcache *regcache, int regno)
4851 {
4852 CORE_ADDR regaddr;
4853 int i, size;
4854 char *buf;
4855 int pid;
4856
4857 if (regno >= usrregs->num_regs)
4858 return;
4859 if ((*the_low_target.cannot_store_register) (regno))
4860 return;
4861
4862 regaddr = register_addr (usrregs, regno);
4863 if (regaddr == -1)
4864 return;
4865
4866 size = ((register_size (regcache->tdesc, regno)
4867 + sizeof (PTRACE_XFER_TYPE) - 1)
4868 & -sizeof (PTRACE_XFER_TYPE));
4869 buf = alloca (size);
4870 memset (buf, 0, size);
4871
4872 if (the_low_target.collect_ptrace_register)
4873 the_low_target.collect_ptrace_register (regcache, regno, buf);
4874 else
4875 collect_register (regcache, regno, buf);
4876
4877 pid = lwpid_of (current_thread);
4878 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4879 {
4880 errno = 0;
4881 ptrace (PTRACE_POKEUSER, pid,
4882 /* Coerce to a uintptr_t first to avoid potential gcc warning
4883 about coercing an 8 byte integer to a 4 byte pointer. */
4884 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4885 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4886 if (errno != 0)
4887 {
4888 /* At this point, ESRCH should mean the process is
4889 already gone, in which case we simply ignore attempts
4890 to change its registers. See also the related
4891 comment in linux_resume_one_lwp. */
4892 if (errno == ESRCH)
4893 return;
4894
4895 if ((*the_low_target.cannot_store_register) (regno) == 0)
4896 error ("writing register %d: %s", regno, strerror (errno));
4897 }
4898 regaddr += sizeof (PTRACE_XFER_TYPE);
4899 }
4900 }
4901
4902 /* Fetch all registers, or just one, from the child process.
4903 If REGNO is -1, do this for all registers, skipping any that are
4904 assumed to have been retrieved by regsets_fetch_inferior_registers,
4905 unless ALL is non-zero.
4906 Otherwise, REGNO specifies which register (so we can save time). */
4907 static void
4908 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4909 struct regcache *regcache, int regno, int all)
4910 {
4911 struct usrregs_info *usr = regs_info->usrregs;
4912
4913 if (regno == -1)
4914 {
4915 for (regno = 0; regno < usr->num_regs; regno++)
4916 if (all || !linux_register_in_regsets (regs_info, regno))
4917 fetch_register (usr, regcache, regno);
4918 }
4919 else
4920 fetch_register (usr, regcache, regno);
4921 }
4922
4923 /* Store our register values back into the inferior.
4924 If REGNO is -1, do this for all registers, skipping any that are
4925 assumed to have been saved by regsets_store_inferior_registers,
4926 unless ALL is non-zero.
4927 Otherwise, REGNO specifies which register (so we can save time). */
4928 static void
4929 usr_store_inferior_registers (const struct regs_info *regs_info,
4930 struct regcache *regcache, int regno, int all)
4931 {
4932 struct usrregs_info *usr = regs_info->usrregs;
4933
4934 if (regno == -1)
4935 {
4936 for (regno = 0; regno < usr->num_regs; regno++)
4937 if (all || !linux_register_in_regsets (regs_info, regno))
4938 store_register (usr, regcache, regno);
4939 }
4940 else
4941 store_register (usr, regcache, regno);
4942 }
4943
4944 #else /* !HAVE_LINUX_USRREGS */
4945
4946 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4947 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4948
4949 #endif
4950
4951
4952 void
4953 linux_fetch_registers (struct regcache *regcache, int regno)
4954 {
4955 int use_regsets;
4956 int all = 0;
4957 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4958
4959 if (regno == -1)
4960 {
4961 if (the_low_target.fetch_register != NULL
4962 && regs_info->usrregs != NULL)
4963 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4964 (*the_low_target.fetch_register) (regcache, regno);
4965
4966 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4967 if (regs_info->usrregs != NULL)
4968 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4969 }
4970 else
4971 {
4972 if (the_low_target.fetch_register != NULL
4973 && (*the_low_target.fetch_register) (regcache, regno))
4974 return;
4975
4976 use_regsets = linux_register_in_regsets (regs_info, regno);
4977 if (use_regsets)
4978 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4979 regcache);
4980 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4981 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4982 }
4983 }
4984
4985 void
4986 linux_store_registers (struct regcache *regcache, int regno)
4987 {
4988 int use_regsets;
4989 int all = 0;
4990 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4991
4992 if (regno == -1)
4993 {
4994 all = regsets_store_inferior_registers (regs_info->regsets_info,
4995 regcache);
4996 if (regs_info->usrregs != NULL)
4997 usr_store_inferior_registers (regs_info, regcache, regno, all);
4998 }
4999 else
5000 {
5001 use_regsets = linux_register_in_regsets (regs_info, regno);
5002 if (use_regsets)
5003 all = regsets_store_inferior_registers (regs_info->regsets_info,
5004 regcache);
5005 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5006 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5007 }
5008 }
5009
5010
5011 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5012 to debugger memory starting at MYADDR. */
5013
5014 static int
5015 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5016 {
5017 int pid = lwpid_of (current_thread);
5018 register PTRACE_XFER_TYPE *buffer;
5019 register CORE_ADDR addr;
5020 register int count;
5021 char filename[64];
5022 register int i;
5023 int ret;
5024 int fd;
5025
5026 /* Try using /proc. Don't bother for one word. */
5027 if (len >= 3 * sizeof (long))
5028 {
5029 int bytes;
5030
5031 /* We could keep this file open and cache it - possibly one per
5032 thread. That requires some juggling, but is even faster. */
5033 sprintf (filename, "/proc/%d/mem", pid);
5034 fd = open (filename, O_RDONLY | O_LARGEFILE);
5035 if (fd == -1)
5036 goto no_proc;
5037
5038 /* If pread64 is available, use it. It's faster if the kernel
5039 supports it (only one syscall), and it's 64-bit safe even on
5040 32-bit platforms (for instance, SPARC debugging a SPARC64
5041 application). */
5042 #ifdef HAVE_PREAD64
5043 bytes = pread64 (fd, myaddr, len, memaddr);
5044 #else
5045 bytes = -1;
5046 if (lseek (fd, memaddr, SEEK_SET) != -1)
5047 bytes = read (fd, myaddr, len);
5048 #endif
5049
5050 close (fd);
5051 if (bytes == len)
5052 return 0;
5053
5054 /* Some data was read, we'll try to get the rest with ptrace. */
5055 if (bytes > 0)
5056 {
5057 memaddr += bytes;
5058 myaddr += bytes;
5059 len -= bytes;
5060 }
5061 }
5062
5063 no_proc:
5064 /* Round starting address down to longword boundary. */
5065 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5066 /* Round ending address up; get number of longwords that makes. */
5067 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5068 / sizeof (PTRACE_XFER_TYPE));
5069 /* Allocate buffer of that many longwords. */
5070 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5071
5072 /* Read all the longwords */
5073 errno = 0;
5074 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5075 {
5076 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5077 about coercing an 8 byte integer to a 4 byte pointer. */
5078 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5079 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5080 (PTRACE_TYPE_ARG4) 0);
5081 if (errno)
5082 break;
5083 }
5084 ret = errno;
5085
5086 /* Copy appropriate bytes out of the buffer. */
5087 if (i > 0)
5088 {
5089 i *= sizeof (PTRACE_XFER_TYPE);
5090 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5091 memcpy (myaddr,
5092 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5093 i < len ? i : len);
5094 }
5095
5096 return ret;
5097 }
5098
5099 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5100 memory at MEMADDR. On failure (cannot write to the inferior)
5101 returns the value of errno. Always succeeds if LEN is zero. */
5102
5103 static int
5104 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5105 {
5106 register int i;
5107 /* Round starting address down to longword boundary. */
5108 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5109 /* Round ending address up; get number of longwords that makes. */
5110 register int count
5111 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5112 / sizeof (PTRACE_XFER_TYPE);
5113
5114 /* Allocate buffer of that many longwords. */
5115 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5116 alloca (count * sizeof (PTRACE_XFER_TYPE));
5117
5118 int pid = lwpid_of (current_thread);
5119
5120 if (len == 0)
5121 {
5122 /* Zero length write always succeeds. */
5123 return 0;
5124 }
5125
5126 if (debug_threads)
5127 {
5128 /* Dump up to four bytes. */
5129 unsigned int val = * (unsigned int *) myaddr;
5130 if (len == 1)
5131 val = val & 0xff;
5132 else if (len == 2)
5133 val = val & 0xffff;
5134 else if (len == 3)
5135 val = val & 0xffffff;
5136 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5137 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5138 }
5139
5140 /* Fill start and end extra bytes of buffer with existing memory data. */
5141
5142 errno = 0;
5143 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5144 about coercing an 8 byte integer to a 4 byte pointer. */
5145 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5146 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5147 (PTRACE_TYPE_ARG4) 0);
5148 if (errno)
5149 return errno;
5150
5151 if (count > 1)
5152 {
5153 errno = 0;
5154 buffer[count - 1]
5155 = ptrace (PTRACE_PEEKTEXT, pid,
5156 /* Coerce to a uintptr_t first to avoid potential gcc warning
5157 about coercing an 8 byte integer to a 4 byte pointer. */
5158 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5159 * sizeof (PTRACE_XFER_TYPE)),
5160 (PTRACE_TYPE_ARG4) 0);
5161 if (errno)
5162 return errno;
5163 }
5164
5165 /* Copy data to be written over corresponding part of buffer. */
5166
5167 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5168 myaddr, len);
5169
5170 /* Write the entire buffer. */
5171
5172 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5173 {
5174 errno = 0;
5175 ptrace (PTRACE_POKETEXT, pid,
5176 /* Coerce to a uintptr_t first to avoid potential gcc warning
5177 about coercing an 8 byte integer to a 4 byte pointer. */
5178 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5179 (PTRACE_TYPE_ARG4) buffer[i]);
5180 if (errno)
5181 return errno;
5182 }
5183
5184 return 0;
5185 }
5186
5187 static void
5188 linux_look_up_symbols (void)
5189 {
5190 #ifdef USE_THREAD_DB
5191 struct process_info *proc = current_process ();
5192
5193 if (proc->priv->thread_db != NULL)
5194 return;
5195
5196 /* If the kernel supports tracing clones, then we don't need to
5197 use the magic thread event breakpoint to learn about
5198 threads. */
5199 thread_db_init (!linux_supports_traceclone ());
5200 #endif
5201 }
5202
5203 static void
5204 linux_request_interrupt (void)
5205 {
5206 extern unsigned long signal_pid;
5207
5208 /* Send a SIGINT to the process group. This acts just like the user
5209 typed a ^C on the controlling terminal. */
5210 kill (-signal_pid, SIGINT);
5211 }
5212
5213 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5214 to debugger memory starting at MYADDR. */
5215
5216 static int
5217 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5218 {
5219 char filename[PATH_MAX];
5220 int fd, n;
5221 int pid = lwpid_of (current_thread);
5222
5223 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5224
5225 fd = open (filename, O_RDONLY);
5226 if (fd < 0)
5227 return -1;
5228
5229 if (offset != (CORE_ADDR) 0
5230 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5231 n = -1;
5232 else
5233 n = read (fd, myaddr, len);
5234
5235 close (fd);
5236
5237 return n;
5238 }
5239
5240 /* These breakpoint and watchpoint related wrapper functions simply
5241 pass on the function call if the target has registered a
5242 corresponding function. */
5243
5244 static int
5245 linux_supports_z_point_type (char z_type)
5246 {
5247 return (the_low_target.supports_z_point_type != NULL
5248 && the_low_target.supports_z_point_type (z_type));
5249 }
5250
5251 static int
5252 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5253 int size, struct raw_breakpoint *bp)
5254 {
5255 if (type == raw_bkpt_type_sw)
5256 return insert_memory_breakpoint (bp);
5257 else if (the_low_target.insert_point != NULL)
5258 return the_low_target.insert_point (type, addr, size, bp);
5259 else
5260 /* Unsupported (see target.h). */
5261 return 1;
5262 }
5263
5264 static int
5265 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5266 int size, struct raw_breakpoint *bp)
5267 {
5268 if (type == raw_bkpt_type_sw)
5269 return remove_memory_breakpoint (bp);
5270 else if (the_low_target.remove_point != NULL)
5271 return the_low_target.remove_point (type, addr, size, bp);
5272 else
5273 /* Unsupported (see target.h). */
5274 return 1;
5275 }
5276
5277 /* Implement the to_stopped_by_sw_breakpoint target_ops
5278 method. */
5279
5280 static int
5281 linux_stopped_by_sw_breakpoint (void)
5282 {
5283 struct lwp_info *lwp = get_thread_lwp (current_thread);
5284
5285 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5286 }
5287
5288 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5289 method. */
5290
5291 static int
5292 linux_supports_stopped_by_sw_breakpoint (void)
5293 {
5294 return USE_SIGTRAP_SIGINFO;
5295 }
5296
5297 /* Implement the to_stopped_by_hw_breakpoint target_ops
5298 method. */
5299
5300 static int
5301 linux_stopped_by_hw_breakpoint (void)
5302 {
5303 struct lwp_info *lwp = get_thread_lwp (current_thread);
5304
5305 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5306 }
5307
5308 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5309 method. */
5310
5311 static int
5312 linux_supports_stopped_by_hw_breakpoint (void)
5313 {
5314 return USE_SIGTRAP_SIGINFO;
5315 }
5316
5317 /* Implement the supports_conditional_breakpoints target_ops
5318 method. */
5319
5320 static int
5321 linux_supports_conditional_breakpoints (void)
5322 {
5323 /* GDBserver needs to step over the breakpoint if the condition is
5324 false. GDBserver software single step is too simple, so disable
5325 conditional breakpoints if the target doesn't have hardware single
5326 step. */
5327 return can_hardware_single_step ();
5328 }
5329
5330 static int
5331 linux_stopped_by_watchpoint (void)
5332 {
5333 struct lwp_info *lwp = get_thread_lwp (current_thread);
5334
5335 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5336 }
5337
5338 static CORE_ADDR
5339 linux_stopped_data_address (void)
5340 {
5341 struct lwp_info *lwp = get_thread_lwp (current_thread);
5342
5343 return lwp->stopped_data_address;
5344 }
5345
5346 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5347 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5348 && defined(PT_TEXT_END_ADDR)
5349
5350 /* This is only used for targets that define PT_TEXT_ADDR,
5351 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5352 the target has different ways of acquiring this information, like
5353 loadmaps. */
5354
5355 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5356 to tell gdb about. */
5357
5358 static int
5359 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5360 {
5361 unsigned long text, text_end, data;
5362 int pid = lwpid_of (current_thread);
5363
5364 errno = 0;
5365
5366 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5367 (PTRACE_TYPE_ARG4) 0);
5368 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5369 (PTRACE_TYPE_ARG4) 0);
5370 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5371 (PTRACE_TYPE_ARG4) 0);
5372
5373 if (errno == 0)
5374 {
5375 /* Both text and data offsets produced at compile-time (and so
5376 used by gdb) are relative to the beginning of the program,
5377 with the data segment immediately following the text segment.
5378 However, the actual runtime layout in memory may put the data
5379 somewhere else, so when we send gdb a data base-address, we
5380 use the real data base address and subtract the compile-time
5381 data base-address from it (which is just the length of the
5382 text segment). BSS immediately follows data in both
5383 cases. */
5384 *text_p = text;
5385 *data_p = data - (text_end - text);
5386
5387 return 1;
5388 }
5389 return 0;
5390 }
5391 #endif
5392
5393 static int
5394 linux_qxfer_osdata (const char *annex,
5395 unsigned char *readbuf, unsigned const char *writebuf,
5396 CORE_ADDR offset, int len)
5397 {
5398 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5399 }
5400
5401 /* Convert a native/host siginfo object, into/from the siginfo in the
5402 layout of the inferiors' architecture. */
5403
5404 static void
5405 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5406 {
5407 int done = 0;
5408
5409 if (the_low_target.siginfo_fixup != NULL)
5410 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5411
5412 /* If there was no callback, or the callback didn't do anything,
5413 then just do a straight memcpy. */
5414 if (!done)
5415 {
5416 if (direction == 1)
5417 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5418 else
5419 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5420 }
5421 }
5422
5423 static int
5424 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5425 unsigned const char *writebuf, CORE_ADDR offset, int len)
5426 {
5427 int pid;
5428 siginfo_t siginfo;
5429 char inf_siginfo[sizeof (siginfo_t)];
5430
5431 if (current_thread == NULL)
5432 return -1;
5433
5434 pid = lwpid_of (current_thread);
5435
5436 if (debug_threads)
5437 debug_printf ("%s siginfo for lwp %d.\n",
5438 readbuf != NULL ? "Reading" : "Writing",
5439 pid);
5440
5441 if (offset >= sizeof (siginfo))
5442 return -1;
5443
5444 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5445 return -1;
5446
5447 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5448 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5449 inferior with a 64-bit GDBSERVER should look the same as debugging it
5450 with a 32-bit GDBSERVER, we need to convert it. */
5451 siginfo_fixup (&siginfo, inf_siginfo, 0);
5452
5453 if (offset + len > sizeof (siginfo))
5454 len = sizeof (siginfo) - offset;
5455
5456 if (readbuf != NULL)
5457 memcpy (readbuf, inf_siginfo + offset, len);
5458 else
5459 {
5460 memcpy (inf_siginfo + offset, writebuf, len);
5461
5462 /* Convert back to ptrace layout before flushing it out. */
5463 siginfo_fixup (&siginfo, inf_siginfo, 1);
5464
5465 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5466 return -1;
5467 }
5468
5469 return len;
5470 }
5471
5472 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5473 so we notice when children change state; as the handler for the
5474 sigsuspend in my_waitpid. */
5475
5476 static void
5477 sigchld_handler (int signo)
5478 {
5479 int old_errno = errno;
5480
5481 if (debug_threads)
5482 {
5483 do
5484 {
5485 /* fprintf is not async-signal-safe, so call write
5486 directly. */
5487 if (write (2, "sigchld_handler\n",
5488 sizeof ("sigchld_handler\n") - 1) < 0)
5489 break; /* just ignore */
5490 } while (0);
5491 }
5492
5493 if (target_is_async_p ())
5494 async_file_mark (); /* trigger a linux_wait */
5495
5496 errno = old_errno;
5497 }
5498
5499 static int
5500 linux_supports_non_stop (void)
5501 {
5502 return 1;
5503 }
5504
5505 static int
5506 linux_async (int enable)
5507 {
5508 int previous = target_is_async_p ();
5509
5510 if (debug_threads)
5511 debug_printf ("linux_async (%d), previous=%d\n",
5512 enable, previous);
5513
5514 if (previous != enable)
5515 {
5516 sigset_t mask;
5517 sigemptyset (&mask);
5518 sigaddset (&mask, SIGCHLD);
5519
5520 sigprocmask (SIG_BLOCK, &mask, NULL);
5521
5522 if (enable)
5523 {
5524 if (pipe (linux_event_pipe) == -1)
5525 {
5526 linux_event_pipe[0] = -1;
5527 linux_event_pipe[1] = -1;
5528 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5529
5530 warning ("creating event pipe failed.");
5531 return previous;
5532 }
5533
5534 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5535 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5536
5537 /* Register the event loop handler. */
5538 add_file_handler (linux_event_pipe[0],
5539 handle_target_event, NULL);
5540
5541 /* Always trigger a linux_wait. */
5542 async_file_mark ();
5543 }
5544 else
5545 {
5546 delete_file_handler (linux_event_pipe[0]);
5547
5548 close (linux_event_pipe[0]);
5549 close (linux_event_pipe[1]);
5550 linux_event_pipe[0] = -1;
5551 linux_event_pipe[1] = -1;
5552 }
5553
5554 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5555 }
5556
5557 return previous;
5558 }
5559
5560 static int
5561 linux_start_non_stop (int nonstop)
5562 {
5563 /* Register or unregister from event-loop accordingly. */
5564 linux_async (nonstop);
5565
5566 if (target_is_async_p () != (nonstop != 0))
5567 return -1;
5568
5569 return 0;
5570 }
5571
5572 static int
5573 linux_supports_multi_process (void)
5574 {
5575 return 1;
5576 }
5577
5578 /* Check if fork events are supported. */
5579
5580 static int
5581 linux_supports_fork_events (void)
5582 {
5583 return linux_supports_tracefork ();
5584 }
5585
5586 /* Check if vfork events are supported. */
5587
5588 static int
5589 linux_supports_vfork_events (void)
5590 {
5591 return linux_supports_tracefork ();
5592 }
5593
5594 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5595 options for the specified lwp. */
5596
5597 static int
5598 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5599 void *args)
5600 {
5601 struct thread_info *thread = (struct thread_info *) entry;
5602 struct lwp_info *lwp = get_thread_lwp (thread);
5603
5604 if (!lwp->stopped)
5605 {
5606 /* Stop the lwp so we can modify its ptrace options. */
5607 lwp->must_set_ptrace_flags = 1;
5608 linux_stop_lwp (lwp);
5609 }
5610 else
5611 {
5612 /* Already stopped; go ahead and set the ptrace options. */
5613 struct process_info *proc = find_process_pid (pid_of (thread));
5614 int options = linux_low_ptrace_options (proc->attached);
5615
5616 linux_enable_event_reporting (lwpid_of (thread), options);
5617 lwp->must_set_ptrace_flags = 0;
5618 }
5619
5620 return 0;
5621 }
5622
5623 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5624 ptrace flags for all inferiors. This is in case the new GDB connection
5625 doesn't support the same set of events that the previous one did. */
5626
5627 static void
5628 linux_handle_new_gdb_connection (void)
5629 {
5630 pid_t pid;
5631
5632 /* Request that all the lwps reset their ptrace options. */
5633 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5634 }
5635
5636 static int
5637 linux_supports_disable_randomization (void)
5638 {
5639 #ifdef HAVE_PERSONALITY
5640 return 1;
5641 #else
5642 return 0;
5643 #endif
5644 }
5645
5646 static int
5647 linux_supports_agent (void)
5648 {
5649 return 1;
5650 }
5651
5652 static int
5653 linux_supports_range_stepping (void)
5654 {
5655 if (*the_low_target.supports_range_stepping == NULL)
5656 return 0;
5657
5658 return (*the_low_target.supports_range_stepping) ();
5659 }
5660
5661 /* Enumerate spufs IDs for process PID. */
5662 static int
5663 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5664 {
5665 int pos = 0;
5666 int written = 0;
5667 char path[128];
5668 DIR *dir;
5669 struct dirent *entry;
5670
5671 sprintf (path, "/proc/%ld/fd", pid);
5672 dir = opendir (path);
5673 if (!dir)
5674 return -1;
5675
5676 rewinddir (dir);
5677 while ((entry = readdir (dir)) != NULL)
5678 {
5679 struct stat st;
5680 struct statfs stfs;
5681 int fd;
5682
5683 fd = atoi (entry->d_name);
5684 if (!fd)
5685 continue;
5686
5687 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5688 if (stat (path, &st) != 0)
5689 continue;
5690 if (!S_ISDIR (st.st_mode))
5691 continue;
5692
5693 if (statfs (path, &stfs) != 0)
5694 continue;
5695 if (stfs.f_type != SPUFS_MAGIC)
5696 continue;
5697
5698 if (pos >= offset && pos + 4 <= offset + len)
5699 {
5700 *(unsigned int *)(buf + pos - offset) = fd;
5701 written += 4;
5702 }
5703 pos += 4;
5704 }
5705
5706 closedir (dir);
5707 return written;
5708 }
5709
5710 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5711 object type, using the /proc file system. */
5712 static int
5713 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5714 unsigned const char *writebuf,
5715 CORE_ADDR offset, int len)
5716 {
5717 long pid = lwpid_of (current_thread);
5718 char buf[128];
5719 int fd = 0;
5720 int ret = 0;
5721
5722 if (!writebuf && !readbuf)
5723 return -1;
5724
5725 if (!*annex)
5726 {
5727 if (!readbuf)
5728 return -1;
5729 else
5730 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5731 }
5732
5733 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5734 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5735 if (fd <= 0)
5736 return -1;
5737
5738 if (offset != 0
5739 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5740 {
5741 close (fd);
5742 return 0;
5743 }
5744
5745 if (writebuf)
5746 ret = write (fd, writebuf, (size_t) len);
5747 else
5748 ret = read (fd, readbuf, (size_t) len);
5749
5750 close (fd);
5751 return ret;
5752 }
5753
5754 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5755 struct target_loadseg
5756 {
5757 /* Core address to which the segment is mapped. */
5758 Elf32_Addr addr;
5759 /* VMA recorded in the program header. */
5760 Elf32_Addr p_vaddr;
5761 /* Size of this segment in memory. */
5762 Elf32_Word p_memsz;
5763 };
5764
5765 # if defined PT_GETDSBT
5766 struct target_loadmap
5767 {
5768 /* Protocol version number, must be zero. */
5769 Elf32_Word version;
5770 /* Pointer to the DSBT table, its size, and the DSBT index. */
5771 unsigned *dsbt_table;
5772 unsigned dsbt_size, dsbt_index;
5773 /* Number of segments in this map. */
5774 Elf32_Word nsegs;
5775 /* The actual memory map. */
5776 struct target_loadseg segs[/*nsegs*/];
5777 };
5778 # define LINUX_LOADMAP PT_GETDSBT
5779 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5780 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5781 # else
5782 struct target_loadmap
5783 {
5784 /* Protocol version number, must be zero. */
5785 Elf32_Half version;
5786 /* Number of segments in this map. */
5787 Elf32_Half nsegs;
5788 /* The actual memory map. */
5789 struct target_loadseg segs[/*nsegs*/];
5790 };
5791 # define LINUX_LOADMAP PTRACE_GETFDPIC
5792 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5793 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5794 # endif
5795
5796 static int
5797 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5798 unsigned char *myaddr, unsigned int len)
5799 {
5800 int pid = lwpid_of (current_thread);
5801 int addr = -1;
5802 struct target_loadmap *data = NULL;
5803 unsigned int actual_length, copy_length;
5804
5805 if (strcmp (annex, "exec") == 0)
5806 addr = (int) LINUX_LOADMAP_EXEC;
5807 else if (strcmp (annex, "interp") == 0)
5808 addr = (int) LINUX_LOADMAP_INTERP;
5809 else
5810 return -1;
5811
5812 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5813 return -1;
5814
5815 if (data == NULL)
5816 return -1;
5817
5818 actual_length = sizeof (struct target_loadmap)
5819 + sizeof (struct target_loadseg) * data->nsegs;
5820
5821 if (offset < 0 || offset > actual_length)
5822 return -1;
5823
5824 copy_length = actual_length - offset < len ? actual_length - offset : len;
5825 memcpy (myaddr, (char *) data + offset, copy_length);
5826 return copy_length;
5827 }
5828 #else
5829 # define linux_read_loadmap NULL
5830 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5831
5832 static void
5833 linux_process_qsupported (const char *query)
5834 {
5835 if (the_low_target.process_qsupported != NULL)
5836 the_low_target.process_qsupported (query);
5837 }
5838
5839 static int
5840 linux_supports_tracepoints (void)
5841 {
5842 if (*the_low_target.supports_tracepoints == NULL)
5843 return 0;
5844
5845 return (*the_low_target.supports_tracepoints) ();
5846 }
5847
5848 static CORE_ADDR
5849 linux_read_pc (struct regcache *regcache)
5850 {
5851 if (the_low_target.get_pc == NULL)
5852 return 0;
5853
5854 return (*the_low_target.get_pc) (regcache);
5855 }
5856
5857 static void
5858 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5859 {
5860 gdb_assert (the_low_target.set_pc != NULL);
5861
5862 (*the_low_target.set_pc) (regcache, pc);
5863 }
5864
5865 static int
5866 linux_thread_stopped (struct thread_info *thread)
5867 {
5868 return get_thread_lwp (thread)->stopped;
5869 }
5870
5871 /* This exposes stop-all-threads functionality to other modules. */
5872
5873 static void
5874 linux_pause_all (int freeze)
5875 {
5876 stop_all_lwps (freeze, NULL);
5877 }
5878
5879 /* This exposes unstop-all-threads functionality to other gdbserver
5880 modules. */
5881
5882 static void
5883 linux_unpause_all (int unfreeze)
5884 {
5885 unstop_all_lwps (unfreeze, NULL);
5886 }
5887
5888 static int
5889 linux_prepare_to_access_memory (void)
5890 {
5891 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5892 running LWP. */
5893 if (non_stop)
5894 linux_pause_all (1);
5895 return 0;
5896 }
5897
5898 static void
5899 linux_done_accessing_memory (void)
5900 {
5901 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5902 running LWP. */
5903 if (non_stop)
5904 linux_unpause_all (1);
5905 }
5906
5907 static int
5908 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5909 CORE_ADDR collector,
5910 CORE_ADDR lockaddr,
5911 ULONGEST orig_size,
5912 CORE_ADDR *jump_entry,
5913 CORE_ADDR *trampoline,
5914 ULONGEST *trampoline_size,
5915 unsigned char *jjump_pad_insn,
5916 ULONGEST *jjump_pad_insn_size,
5917 CORE_ADDR *adjusted_insn_addr,
5918 CORE_ADDR *adjusted_insn_addr_end,
5919 char *err)
5920 {
5921 return (*the_low_target.install_fast_tracepoint_jump_pad)
5922 (tpoint, tpaddr, collector, lockaddr, orig_size,
5923 jump_entry, trampoline, trampoline_size,
5924 jjump_pad_insn, jjump_pad_insn_size,
5925 adjusted_insn_addr, adjusted_insn_addr_end,
5926 err);
5927 }
5928
5929 static struct emit_ops *
5930 linux_emit_ops (void)
5931 {
5932 if (the_low_target.emit_ops != NULL)
5933 return (*the_low_target.emit_ops) ();
5934 else
5935 return NULL;
5936 }
5937
5938 static int
5939 linux_get_min_fast_tracepoint_insn_len (void)
5940 {
5941 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5942 }
5943
5944 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5945
5946 static int
5947 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5948 CORE_ADDR *phdr_memaddr, int *num_phdr)
5949 {
5950 char filename[PATH_MAX];
5951 int fd;
5952 const int auxv_size = is_elf64
5953 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5954 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5955
5956 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5957
5958 fd = open (filename, O_RDONLY);
5959 if (fd < 0)
5960 return 1;
5961
5962 *phdr_memaddr = 0;
5963 *num_phdr = 0;
5964 while (read (fd, buf, auxv_size) == auxv_size
5965 && (*phdr_memaddr == 0 || *num_phdr == 0))
5966 {
5967 if (is_elf64)
5968 {
5969 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5970
5971 switch (aux->a_type)
5972 {
5973 case AT_PHDR:
5974 *phdr_memaddr = aux->a_un.a_val;
5975 break;
5976 case AT_PHNUM:
5977 *num_phdr = aux->a_un.a_val;
5978 break;
5979 }
5980 }
5981 else
5982 {
5983 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5984
5985 switch (aux->a_type)
5986 {
5987 case AT_PHDR:
5988 *phdr_memaddr = aux->a_un.a_val;
5989 break;
5990 case AT_PHNUM:
5991 *num_phdr = aux->a_un.a_val;
5992 break;
5993 }
5994 }
5995 }
5996
5997 close (fd);
5998
5999 if (*phdr_memaddr == 0 || *num_phdr == 0)
6000 {
6001 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6002 "phdr_memaddr = %ld, phdr_num = %d",
6003 (long) *phdr_memaddr, *num_phdr);
6004 return 2;
6005 }
6006
6007 return 0;
6008 }
6009
6010 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6011
6012 static CORE_ADDR
6013 get_dynamic (const int pid, const int is_elf64)
6014 {
6015 CORE_ADDR phdr_memaddr, relocation;
6016 int num_phdr, i;
6017 unsigned char *phdr_buf;
6018 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6019
6020 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6021 return 0;
6022
6023 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6024 phdr_buf = alloca (num_phdr * phdr_size);
6025
6026 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6027 return 0;
6028
6029 /* Compute relocation: it is expected to be 0 for "regular" executables,
6030 non-zero for PIE ones. */
6031 relocation = -1;
6032 for (i = 0; relocation == -1 && i < num_phdr; i++)
6033 if (is_elf64)
6034 {
6035 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6036
6037 if (p->p_type == PT_PHDR)
6038 relocation = phdr_memaddr - p->p_vaddr;
6039 }
6040 else
6041 {
6042 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6043
6044 if (p->p_type == PT_PHDR)
6045 relocation = phdr_memaddr - p->p_vaddr;
6046 }
6047
6048 if (relocation == -1)
6049 {
6050 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6051 any real world executables, including PIE executables, have always
6052 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6053 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6054 or present DT_DEBUG anyway (fpc binaries are statically linked).
6055
6056 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6057
6058 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6059
6060 return 0;
6061 }
6062
6063 for (i = 0; i < num_phdr; i++)
6064 {
6065 if (is_elf64)
6066 {
6067 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6068
6069 if (p->p_type == PT_DYNAMIC)
6070 return p->p_vaddr + relocation;
6071 }
6072 else
6073 {
6074 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6075
6076 if (p->p_type == PT_DYNAMIC)
6077 return p->p_vaddr + relocation;
6078 }
6079 }
6080
6081 return 0;
6082 }
6083
6084 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6085 can be 0 if the inferior does not yet have the library list initialized.
6086 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6087 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6088
6089 static CORE_ADDR
6090 get_r_debug (const int pid, const int is_elf64)
6091 {
6092 CORE_ADDR dynamic_memaddr;
6093 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6094 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6095 CORE_ADDR map = -1;
6096
6097 dynamic_memaddr = get_dynamic (pid, is_elf64);
6098 if (dynamic_memaddr == 0)
6099 return map;
6100
6101 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6102 {
6103 if (is_elf64)
6104 {
6105 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6106 #ifdef DT_MIPS_RLD_MAP
6107 union
6108 {
6109 Elf64_Xword map;
6110 unsigned char buf[sizeof (Elf64_Xword)];
6111 }
6112 rld_map;
6113
6114 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6115 {
6116 if (linux_read_memory (dyn->d_un.d_val,
6117 rld_map.buf, sizeof (rld_map.buf)) == 0)
6118 return rld_map.map;
6119 else
6120 break;
6121 }
6122 #endif /* DT_MIPS_RLD_MAP */
6123
6124 if (dyn->d_tag == DT_DEBUG && map == -1)
6125 map = dyn->d_un.d_val;
6126
6127 if (dyn->d_tag == DT_NULL)
6128 break;
6129 }
6130 else
6131 {
6132 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6133 #ifdef DT_MIPS_RLD_MAP
6134 union
6135 {
6136 Elf32_Word map;
6137 unsigned char buf[sizeof (Elf32_Word)];
6138 }
6139 rld_map;
6140
6141 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6142 {
6143 if (linux_read_memory (dyn->d_un.d_val,
6144 rld_map.buf, sizeof (rld_map.buf)) == 0)
6145 return rld_map.map;
6146 else
6147 break;
6148 }
6149 #endif /* DT_MIPS_RLD_MAP */
6150
6151 if (dyn->d_tag == DT_DEBUG && map == -1)
6152 map = dyn->d_un.d_val;
6153
6154 if (dyn->d_tag == DT_NULL)
6155 break;
6156 }
6157
6158 dynamic_memaddr += dyn_size;
6159 }
6160
6161 return map;
6162 }
6163
6164 /* Read one pointer from MEMADDR in the inferior. */
6165
6166 static int
6167 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6168 {
6169 int ret;
6170
6171 /* Go through a union so this works on either big or little endian
6172 hosts, when the inferior's pointer size is smaller than the size
6173 of CORE_ADDR. It is assumed the inferior's endianness is the
6174 same of the superior's. */
6175 union
6176 {
6177 CORE_ADDR core_addr;
6178 unsigned int ui;
6179 unsigned char uc;
6180 } addr;
6181
6182 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6183 if (ret == 0)
6184 {
6185 if (ptr_size == sizeof (CORE_ADDR))
6186 *ptr = addr.core_addr;
6187 else if (ptr_size == sizeof (unsigned int))
6188 *ptr = addr.ui;
6189 else
6190 gdb_assert_not_reached ("unhandled pointer size");
6191 }
6192 return ret;
6193 }
6194
6195 struct link_map_offsets
6196 {
6197 /* Offset and size of r_debug.r_version. */
6198 int r_version_offset;
6199
6200 /* Offset and size of r_debug.r_map. */
6201 int r_map_offset;
6202
6203 /* Offset to l_addr field in struct link_map. */
6204 int l_addr_offset;
6205
6206 /* Offset to l_name field in struct link_map. */
6207 int l_name_offset;
6208
6209 /* Offset to l_ld field in struct link_map. */
6210 int l_ld_offset;
6211
6212 /* Offset to l_next field in struct link_map. */
6213 int l_next_offset;
6214
6215 /* Offset to l_prev field in struct link_map. */
6216 int l_prev_offset;
6217 };
6218
6219 /* Construct qXfer:libraries-svr4:read reply. */
6220
6221 static int
6222 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6223 unsigned const char *writebuf,
6224 CORE_ADDR offset, int len)
6225 {
6226 char *document;
6227 unsigned document_len;
6228 struct process_info_private *const priv = current_process ()->priv;
6229 char filename[PATH_MAX];
6230 int pid, is_elf64;
6231
6232 static const struct link_map_offsets lmo_32bit_offsets =
6233 {
6234 0, /* r_version offset. */
6235 4, /* r_debug.r_map offset. */
6236 0, /* l_addr offset in link_map. */
6237 4, /* l_name offset in link_map. */
6238 8, /* l_ld offset in link_map. */
6239 12, /* l_next offset in link_map. */
6240 16 /* l_prev offset in link_map. */
6241 };
6242
6243 static const struct link_map_offsets lmo_64bit_offsets =
6244 {
6245 0, /* r_version offset. */
6246 8, /* r_debug.r_map offset. */
6247 0, /* l_addr offset in link_map. */
6248 8, /* l_name offset in link_map. */
6249 16, /* l_ld offset in link_map. */
6250 24, /* l_next offset in link_map. */
6251 32 /* l_prev offset in link_map. */
6252 };
6253 const struct link_map_offsets *lmo;
6254 unsigned int machine;
6255 int ptr_size;
6256 CORE_ADDR lm_addr = 0, lm_prev = 0;
6257 int allocated = 1024;
6258 char *p;
6259 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6260 int header_done = 0;
6261
6262 if (writebuf != NULL)
6263 return -2;
6264 if (readbuf == NULL)
6265 return -1;
6266
6267 pid = lwpid_of (current_thread);
6268 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6269 is_elf64 = elf_64_file_p (filename, &machine);
6270 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6271 ptr_size = is_elf64 ? 8 : 4;
6272
6273 while (annex[0] != '\0')
6274 {
6275 const char *sep;
6276 CORE_ADDR *addrp;
6277 int len;
6278
6279 sep = strchr (annex, '=');
6280 if (sep == NULL)
6281 break;
6282
6283 len = sep - annex;
6284 if (len == 5 && startswith (annex, "start"))
6285 addrp = &lm_addr;
6286 else if (len == 4 && startswith (annex, "prev"))
6287 addrp = &lm_prev;
6288 else
6289 {
6290 annex = strchr (sep, ';');
6291 if (annex == NULL)
6292 break;
6293 annex++;
6294 continue;
6295 }
6296
6297 annex = decode_address_to_semicolon (addrp, sep + 1);
6298 }
6299
6300 if (lm_addr == 0)
6301 {
6302 int r_version = 0;
6303
6304 if (priv->r_debug == 0)
6305 priv->r_debug = get_r_debug (pid, is_elf64);
6306
6307 /* We failed to find DT_DEBUG. Such situation will not change
6308 for this inferior - do not retry it. Report it to GDB as
6309 E01, see for the reasons at the GDB solib-svr4.c side. */
6310 if (priv->r_debug == (CORE_ADDR) -1)
6311 return -1;
6312
6313 if (priv->r_debug != 0)
6314 {
6315 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6316 (unsigned char *) &r_version,
6317 sizeof (r_version)) != 0
6318 || r_version != 1)
6319 {
6320 warning ("unexpected r_debug version %d", r_version);
6321 }
6322 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6323 &lm_addr, ptr_size) != 0)
6324 {
6325 warning ("unable to read r_map from 0x%lx",
6326 (long) priv->r_debug + lmo->r_map_offset);
6327 }
6328 }
6329 }
6330
6331 document = xmalloc (allocated);
6332 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6333 p = document + strlen (document);
6334
6335 while (lm_addr
6336 && read_one_ptr (lm_addr + lmo->l_name_offset,
6337 &l_name, ptr_size) == 0
6338 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6339 &l_addr, ptr_size) == 0
6340 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6341 &l_ld, ptr_size) == 0
6342 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6343 &l_prev, ptr_size) == 0
6344 && read_one_ptr (lm_addr + lmo->l_next_offset,
6345 &l_next, ptr_size) == 0)
6346 {
6347 unsigned char libname[PATH_MAX];
6348
6349 if (lm_prev != l_prev)
6350 {
6351 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6352 (long) lm_prev, (long) l_prev);
6353 break;
6354 }
6355
6356 /* Ignore the first entry even if it has valid name as the first entry
6357 corresponds to the main executable. The first entry should not be
6358 skipped if the dynamic loader was loaded late by a static executable
6359 (see solib-svr4.c parameter ignore_first). But in such case the main
6360 executable does not have PT_DYNAMIC present and this function already
6361 exited above due to failed get_r_debug. */
6362 if (lm_prev == 0)
6363 {
6364 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6365 p = p + strlen (p);
6366 }
6367 else
6368 {
6369 /* Not checking for error because reading may stop before
6370 we've got PATH_MAX worth of characters. */
6371 libname[0] = '\0';
6372 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6373 libname[sizeof (libname) - 1] = '\0';
6374 if (libname[0] != '\0')
6375 {
6376 /* 6x the size for xml_escape_text below. */
6377 size_t len = 6 * strlen ((char *) libname);
6378 char *name;
6379
6380 if (!header_done)
6381 {
6382 /* Terminate `<library-list-svr4'. */
6383 *p++ = '>';
6384 header_done = 1;
6385 }
6386
6387 while (allocated < p - document + len + 200)
6388 {
6389 /* Expand to guarantee sufficient storage. */
6390 uintptr_t document_len = p - document;
6391
6392 document = xrealloc (document, 2 * allocated);
6393 allocated *= 2;
6394 p = document + document_len;
6395 }
6396
6397 name = xml_escape_text ((char *) libname);
6398 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6399 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6400 name, (unsigned long) lm_addr,
6401 (unsigned long) l_addr, (unsigned long) l_ld);
6402 free (name);
6403 }
6404 }
6405
6406 lm_prev = lm_addr;
6407 lm_addr = l_next;
6408 }
6409
6410 if (!header_done)
6411 {
6412 /* Empty list; terminate `<library-list-svr4'. */
6413 strcpy (p, "/>");
6414 }
6415 else
6416 strcpy (p, "</library-list-svr4>");
6417
6418 document_len = strlen (document);
6419 if (offset < document_len)
6420 document_len -= offset;
6421 else
6422 document_len = 0;
6423 if (len > document_len)
6424 len = document_len;
6425
6426 memcpy (readbuf, document + offset, len);
6427 xfree (document);
6428
6429 return len;
6430 }
6431
6432 #ifdef HAVE_LINUX_BTRACE
6433
6434 /* See to_enable_btrace target method. */
6435
6436 static struct btrace_target_info *
6437 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6438 {
6439 struct btrace_target_info *tinfo;
6440
6441 tinfo = linux_enable_btrace (ptid, conf);
6442
6443 if (tinfo != NULL && tinfo->ptr_bits == 0)
6444 {
6445 struct thread_info *thread = find_thread_ptid (ptid);
6446 struct regcache *regcache = get_thread_regcache (thread, 0);
6447
6448 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6449 }
6450
6451 return tinfo;
6452 }
6453
6454 /* See to_disable_btrace target method. */
6455
6456 static int
6457 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6458 {
6459 enum btrace_error err;
6460
6461 err = linux_disable_btrace (tinfo);
6462 return (err == BTRACE_ERR_NONE ? 0 : -1);
6463 }
6464
6465 /* Encode an Intel(R) Processor Trace configuration. */
6466
6467 static void
6468 linux_low_encode_pt_config (struct buffer *buffer,
6469 const struct btrace_data_pt_config *config)
6470 {
6471 buffer_grow_str (buffer, "<pt-config>\n");
6472
6473 switch (config->cpu.vendor)
6474 {
6475 case CV_INTEL:
6476 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6477 "model=\"%u\" stepping=\"%u\"/>\n",
6478 config->cpu.family, config->cpu.model,
6479 config->cpu.stepping);
6480 break;
6481
6482 default:
6483 break;
6484 }
6485
6486 buffer_grow_str (buffer, "</pt-config>\n");
6487 }
6488
6489 /* Encode a raw buffer. */
6490
6491 static void
6492 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6493 unsigned int size)
6494 {
6495 if (size == 0)
6496 return;
6497
6498 /* We use hex encoding - see common/rsp-low.h. */
6499 buffer_grow_str (buffer, "<raw>\n");
6500
6501 while (size-- > 0)
6502 {
6503 char elem[2];
6504
6505 elem[0] = tohex ((*data >> 4) & 0xf);
6506 elem[1] = tohex (*data++ & 0xf);
6507
6508 buffer_grow (buffer, elem, 2);
6509 }
6510
6511 buffer_grow_str (buffer, "</raw>\n");
6512 }
6513
6514 /* See to_read_btrace target method. */
6515
6516 static int
6517 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6518 int type)
6519 {
6520 struct btrace_data btrace;
6521 struct btrace_block *block;
6522 enum btrace_error err;
6523 int i;
6524
6525 btrace_data_init (&btrace);
6526
6527 err = linux_read_btrace (&btrace, tinfo, type);
6528 if (err != BTRACE_ERR_NONE)
6529 {
6530 if (err == BTRACE_ERR_OVERFLOW)
6531 buffer_grow_str0 (buffer, "E.Overflow.");
6532 else
6533 buffer_grow_str0 (buffer, "E.Generic Error.");
6534
6535 goto err;
6536 }
6537
6538 switch (btrace.format)
6539 {
6540 case BTRACE_FORMAT_NONE:
6541 buffer_grow_str0 (buffer, "E.No Trace.");
6542 goto err;
6543
6544 case BTRACE_FORMAT_BTS:
6545 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6546 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6547
6548 for (i = 0;
6549 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6550 i++)
6551 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6552 paddress (block->begin), paddress (block->end));
6553
6554 buffer_grow_str0 (buffer, "</btrace>\n");
6555 break;
6556
6557 case BTRACE_FORMAT_PT:
6558 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6559 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6560 buffer_grow_str (buffer, "<pt>\n");
6561
6562 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6563
6564 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6565 btrace.variant.pt.size);
6566
6567 buffer_grow_str (buffer, "</pt>\n");
6568 buffer_grow_str0 (buffer, "</btrace>\n");
6569 break;
6570
6571 default:
6572 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6573 goto err;
6574 }
6575
6576 btrace_data_fini (&btrace);
6577 return 0;
6578
6579 err:
6580 btrace_data_fini (&btrace);
6581 return -1;
6582 }
6583
6584 /* See to_btrace_conf target method. */
6585
6586 static int
6587 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6588 struct buffer *buffer)
6589 {
6590 const struct btrace_config *conf;
6591
6592 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6593 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6594
6595 conf = linux_btrace_conf (tinfo);
6596 if (conf != NULL)
6597 {
6598 switch (conf->format)
6599 {
6600 case BTRACE_FORMAT_NONE:
6601 break;
6602
6603 case BTRACE_FORMAT_BTS:
6604 buffer_xml_printf (buffer, "<bts");
6605 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6606 buffer_xml_printf (buffer, " />\n");
6607 break;
6608
6609 case BTRACE_FORMAT_PT:
6610 buffer_xml_printf (buffer, "<pt");
6611 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6612 buffer_xml_printf (buffer, "/>\n");
6613 break;
6614 }
6615 }
6616
6617 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6618 return 0;
6619 }
6620 #endif /* HAVE_LINUX_BTRACE */
6621
6622 /* See nat/linux-nat.h. */
6623
6624 ptid_t
6625 current_lwp_ptid (void)
6626 {
6627 return ptid_of (current_thread);
6628 }
6629
6630 static struct target_ops linux_target_ops = {
6631 linux_create_inferior,
6632 linux_attach,
6633 linux_kill,
6634 linux_detach,
6635 linux_mourn,
6636 linux_join,
6637 linux_thread_alive,
6638 linux_resume,
6639 linux_wait,
6640 linux_fetch_registers,
6641 linux_store_registers,
6642 linux_prepare_to_access_memory,
6643 linux_done_accessing_memory,
6644 linux_read_memory,
6645 linux_write_memory,
6646 linux_look_up_symbols,
6647 linux_request_interrupt,
6648 linux_read_auxv,
6649 linux_supports_z_point_type,
6650 linux_insert_point,
6651 linux_remove_point,
6652 linux_stopped_by_sw_breakpoint,
6653 linux_supports_stopped_by_sw_breakpoint,
6654 linux_stopped_by_hw_breakpoint,
6655 linux_supports_stopped_by_hw_breakpoint,
6656 linux_supports_conditional_breakpoints,
6657 linux_stopped_by_watchpoint,
6658 linux_stopped_data_address,
6659 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6660 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6661 && defined(PT_TEXT_END_ADDR)
6662 linux_read_offsets,
6663 #else
6664 NULL,
6665 #endif
6666 #ifdef USE_THREAD_DB
6667 thread_db_get_tls_address,
6668 #else
6669 NULL,
6670 #endif
6671 linux_qxfer_spu,
6672 hostio_last_error_from_errno,
6673 linux_qxfer_osdata,
6674 linux_xfer_siginfo,
6675 linux_supports_non_stop,
6676 linux_async,
6677 linux_start_non_stop,
6678 linux_supports_multi_process,
6679 linux_supports_fork_events,
6680 linux_supports_vfork_events,
6681 linux_handle_new_gdb_connection,
6682 #ifdef USE_THREAD_DB
6683 thread_db_handle_monitor_command,
6684 #else
6685 NULL,
6686 #endif
6687 linux_common_core_of_thread,
6688 linux_read_loadmap,
6689 linux_process_qsupported,
6690 linux_supports_tracepoints,
6691 linux_read_pc,
6692 linux_write_pc,
6693 linux_thread_stopped,
6694 NULL,
6695 linux_pause_all,
6696 linux_unpause_all,
6697 linux_stabilize_threads,
6698 linux_install_fast_tracepoint_jump_pad,
6699 linux_emit_ops,
6700 linux_supports_disable_randomization,
6701 linux_get_min_fast_tracepoint_insn_len,
6702 linux_qxfer_libraries_svr4,
6703 linux_supports_agent,
6704 #ifdef HAVE_LINUX_BTRACE
6705 linux_supports_btrace,
6706 linux_low_enable_btrace,
6707 linux_low_disable_btrace,
6708 linux_low_read_btrace,
6709 linux_low_btrace_conf,
6710 #else
6711 NULL,
6712 NULL,
6713 NULL,
6714 NULL,
6715 NULL,
6716 #endif
6717 linux_supports_range_stepping,
6718 linux_proc_pid_to_exec_file,
6719 linux_mntns_open_cloexec,
6720 linux_mntns_unlink,
6721 linux_mntns_readlink,
6722 };
6723
6724 static void
6725 linux_init_signals ()
6726 {
6727 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6728 to find what the cancel signal actually is. */
6729 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6730 signal (__SIGRTMIN+1, SIG_IGN);
6731 #endif
6732 }
6733
6734 #ifdef HAVE_LINUX_REGSETS
6735 void
6736 initialize_regsets_info (struct regsets_info *info)
6737 {
6738 for (info->num_regsets = 0;
6739 info->regsets[info->num_regsets].size >= 0;
6740 info->num_regsets++)
6741 ;
6742 }
6743 #endif
6744
6745 void
6746 initialize_low (void)
6747 {
6748 struct sigaction sigchld_action;
6749 memset (&sigchld_action, 0, sizeof (sigchld_action));
6750 set_target_ops (&linux_target_ops);
6751 set_breakpoint_data (the_low_target.breakpoint,
6752 the_low_target.breakpoint_len);
6753 linux_init_signals ();
6754 linux_ptrace_init_warnings ();
6755
6756 sigchld_action.sa_handler = sigchld_handler;
6757 sigemptyset (&sigchld_action.sa_mask);
6758 sigchld_action.sa_flags = SA_RESTART;
6759 sigaction (SIGCHLD, &sigchld_action, NULL);
6760
6761 initialize_low_arch ();
6762
6763 linux_check_ptrace_features ();
6764 }
This page took 0.198396 seconds and 4 git commands to generate.