Change maybe_disable_address_space_randomization to a class
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2017 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "environ.h"
53 #ifndef ELFMAG0
54 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
55 then ELFMAG0 will have been defined. If it didn't get included by
56 gdb_proc_service.h then including it will likely introduce a duplicate
57 definition of elf_fpregset_t. */
58 #include <elf.h>
59 #endif
60 #include "nat/linux-namespaces.h"
61
62 #ifndef SPUFS_MAGIC
63 #define SPUFS_MAGIC 0x23c9b64e
64 #endif
65
66 #ifdef HAVE_PERSONALITY
67 # include <sys/personality.h>
68 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
69 # define ADDR_NO_RANDOMIZE 0x0040000
70 # endif
71 #endif
72
73 #ifndef O_LARGEFILE
74 #define O_LARGEFILE 0
75 #endif
76
77 /* Some targets did not define these ptrace constants from the start,
78 so gdbserver defines them locally here. In the future, these may
79 be removed after they are added to asm/ptrace.h. */
80 #if !(defined(PT_TEXT_ADDR) \
81 || defined(PT_DATA_ADDR) \
82 || defined(PT_TEXT_END_ADDR))
83 #if defined(__mcoldfire__)
84 /* These are still undefined in 3.10 kernels. */
85 #define PT_TEXT_ADDR 49*4
86 #define PT_DATA_ADDR 50*4
87 #define PT_TEXT_END_ADDR 51*4
88 /* BFIN already defines these since at least 2.6.32 kernels. */
89 #elif defined(BFIN)
90 #define PT_TEXT_ADDR 220
91 #define PT_TEXT_END_ADDR 224
92 #define PT_DATA_ADDR 228
93 /* These are still undefined in 3.10 kernels. */
94 #elif defined(__TMS320C6X__)
95 #define PT_TEXT_ADDR (0x10000*4)
96 #define PT_DATA_ADDR (0x10004*4)
97 #define PT_TEXT_END_ADDR (0x10008*4)
98 #endif
99 #endif
100
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "btrace-common.h"
104 #endif
105
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
109 {
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
120
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
124 {
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
135
136 /* Does the current host support PTRACE_GETREGSET? */
137 int have_ptrace_getregset = -1;
138
139 /* LWP accessors. */
140
141 /* See nat/linux-nat.h. */
142
143 ptid_t
144 ptid_of_lwp (struct lwp_info *lwp)
145 {
146 return ptid_of (get_lwp_thread (lwp));
147 }
148
149 /* See nat/linux-nat.h. */
150
151 void
152 lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
154 {
155 lwp->arch_private = info;
156 }
157
158 /* See nat/linux-nat.h. */
159
160 struct arch_lwp_info *
161 lwp_arch_private_info (struct lwp_info *lwp)
162 {
163 return lwp->arch_private;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 int
169 lwp_is_stopped (struct lwp_info *lwp)
170 {
171 return lwp->stopped;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 enum target_stop_reason
177 lwp_stop_reason (struct lwp_info *lwp)
178 {
179 return lwp->stop_reason;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 int
185 lwp_is_stepping (struct lwp_info *lwp)
186 {
187 return lwp->stepping;
188 }
189
190 /* A list of all unknown processes which receive stop signals. Some
191 other process will presumably claim each of these as forked
192 children momentarily. */
193
194 struct simple_pid_list
195 {
196 /* The process ID. */
197 int pid;
198
199 /* The status as reported by waitpid. */
200 int status;
201
202 /* Next in chain. */
203 struct simple_pid_list *next;
204 };
205 struct simple_pid_list *stopped_pids;
206
207 /* Trivial list manipulation functions to keep track of a list of new
208 stopped processes. */
209
210 static void
211 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
212 {
213 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
214
215 new_pid->pid = pid;
216 new_pid->status = status;
217 new_pid->next = *listp;
218 *listp = new_pid;
219 }
220
221 static int
222 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
223 {
224 struct simple_pid_list **p;
225
226 for (p = listp; *p != NULL; p = &(*p)->next)
227 if ((*p)->pid == pid)
228 {
229 struct simple_pid_list *next = (*p)->next;
230
231 *statusp = (*p)->status;
232 xfree (*p);
233 *p = next;
234 return 1;
235 }
236 return 0;
237 }
238
239 enum stopping_threads_kind
240 {
241 /* Not stopping threads presently. */
242 NOT_STOPPING_THREADS,
243
244 /* Stopping threads. */
245 STOPPING_THREADS,
246
247 /* Stopping and suspending threads. */
248 STOPPING_AND_SUSPENDING_THREADS
249 };
250
251 /* This is set while stop_all_lwps is in effect. */
252 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
253
254 /* FIXME make into a target method? */
255 int using_threads = 1;
256
257 /* True if we're presently stabilizing threads (moving them out of
258 jump pads). */
259 static int stabilizing_threads;
260
261 static void linux_resume_one_lwp (struct lwp_info *lwp,
262 int step, int signal, siginfo_t *info);
263 static void linux_resume (struct thread_resume *resume_info, size_t n);
264 static void stop_all_lwps (int suspend, struct lwp_info *except);
265 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
266 static void unsuspend_all_lwps (struct lwp_info *except);
267 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
268 int *wstat, int options);
269 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
270 static struct lwp_info *add_lwp (ptid_t ptid);
271 static void linux_mourn (struct process_info *process);
272 static int linux_stopped_by_watchpoint (void);
273 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
274 static int lwp_is_marked_dead (struct lwp_info *lwp);
275 static void proceed_all_lwps (void);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static void complete_ongoing_step_over (void);
280 static int linux_low_ptrace_options (int attached);
281 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
282 static int proceed_one_lwp (thread_info *thread, void *except);
283
284 /* When the event-loop is doing a step-over, this points at the thread
285 being stepped. */
286 ptid_t step_over_bkpt;
287
288 /* True if the low target can hardware single-step. */
289
290 static int
291 can_hardware_single_step (void)
292 {
293 if (the_low_target.supports_hardware_single_step != NULL)
294 return the_low_target.supports_hardware_single_step ();
295 else
296 return 0;
297 }
298
299 /* True if the low target can software single-step. Such targets
300 implement the GET_NEXT_PCS callback. */
301
302 static int
303 can_software_single_step (void)
304 {
305 return (the_low_target.get_next_pcs != NULL);
306 }
307
308 /* True if the low target supports memory breakpoints. If so, we'll
309 have a GET_PC implementation. */
310
311 static int
312 supports_breakpoints (void)
313 {
314 return (the_low_target.get_pc != NULL);
315 }
316
317 /* Returns true if this target can support fast tracepoints. This
318 does not mean that the in-process agent has been loaded in the
319 inferior. */
320
321 static int
322 supports_fast_tracepoints (void)
323 {
324 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
325 }
326
327 /* True if LWP is stopped in its stepping range. */
328
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
331 {
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 }
336
337 struct pending_signals
338 {
339 int signal;
340 siginfo_t info;
341 struct pending_signals *prev;
342 };
343
344 /* The read/write ends of the pipe registered as waitable file in the
345 event loop. */
346 static int linux_event_pipe[2] = { -1, -1 };
347
348 /* True if we're currently in async mode. */
349 #define target_is_async_p() (linux_event_pipe[0] != -1)
350
351 static void send_sigstop (struct lwp_info *lwp);
352 static void wait_for_sigstop (void);
353
354 /* Return non-zero if HEADER is a 64-bit ELF file. */
355
356 static int
357 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
358 {
359 if (header->e_ident[EI_MAG0] == ELFMAG0
360 && header->e_ident[EI_MAG1] == ELFMAG1
361 && header->e_ident[EI_MAG2] == ELFMAG2
362 && header->e_ident[EI_MAG3] == ELFMAG3)
363 {
364 *machine = header->e_machine;
365 return header->e_ident[EI_CLASS] == ELFCLASS64;
366
367 }
368 *machine = EM_NONE;
369 return -1;
370 }
371
372 /* Return non-zero if FILE is a 64-bit ELF file,
373 zero if the file is not a 64-bit ELF file,
374 and -1 if the file is not accessible or doesn't exist. */
375
376 static int
377 elf_64_file_p (const char *file, unsigned int *machine)
378 {
379 Elf64_Ehdr header;
380 int fd;
381
382 fd = open (file, O_RDONLY);
383 if (fd < 0)
384 return -1;
385
386 if (read (fd, &header, sizeof (header)) != sizeof (header))
387 {
388 close (fd);
389 return 0;
390 }
391 close (fd);
392
393 return elf_64_header_p (&header, machine);
394 }
395
396 /* Accepts an integer PID; Returns true if the executable PID is
397 running is a 64-bit ELF file.. */
398
399 int
400 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
401 {
402 char file[PATH_MAX];
403
404 sprintf (file, "/proc/%d/exe", pid);
405 return elf_64_file_p (file, machine);
406 }
407
408 static void
409 delete_lwp (struct lwp_info *lwp)
410 {
411 struct thread_info *thr = get_lwp_thread (lwp);
412
413 if (debug_threads)
414 debug_printf ("deleting %ld\n", lwpid_of (thr));
415
416 remove_thread (thr);
417
418 if (the_low_target.delete_thread != NULL)
419 the_low_target.delete_thread (lwp->arch_private);
420 else
421 gdb_assert (lwp->arch_private == NULL);
422
423 free (lwp);
424 }
425
426 /* Add a process to the common process list, and set its private
427 data. */
428
429 static struct process_info *
430 linux_add_process (int pid, int attached)
431 {
432 struct process_info *proc;
433
434 proc = add_process (pid, attached);
435 proc->priv = XCNEW (struct process_info_private);
436
437 if (the_low_target.new_process != NULL)
438 proc->priv->arch_private = the_low_target.new_process ();
439
440 return proc;
441 }
442
443 static CORE_ADDR get_pc (struct lwp_info *lwp);
444
445 /* Call the target arch_setup function on the current thread. */
446
447 static void
448 linux_arch_setup (void)
449 {
450 the_low_target.arch_setup ();
451 }
452
453 /* Call the target arch_setup function on THREAD. */
454
455 static void
456 linux_arch_setup_thread (struct thread_info *thread)
457 {
458 struct thread_info *saved_thread;
459
460 saved_thread = current_thread;
461 current_thread = thread;
462
463 linux_arch_setup ();
464
465 current_thread = saved_thread;
466 }
467
468 /* Handle a GNU/Linux extended wait response. If we see a clone,
469 fork, or vfork event, we need to add the new LWP to our list
470 (and return 0 so as not to report the trap to higher layers).
471 If we see an exec event, we will modify ORIG_EVENT_LWP to point
472 to a new LWP representing the new program. */
473
474 static int
475 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
476 {
477 struct lwp_info *event_lwp = *orig_event_lwp;
478 int event = linux_ptrace_get_extended_event (wstat);
479 struct thread_info *event_thr = get_lwp_thread (event_lwp);
480 struct lwp_info *new_lwp;
481
482 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
483
484 /* All extended events we currently use are mid-syscall. Only
485 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
486 you have to be using PTRACE_SEIZE to get that. */
487 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
488
489 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
490 || (event == PTRACE_EVENT_CLONE))
491 {
492 ptid_t ptid;
493 unsigned long new_pid;
494 int ret, status;
495
496 /* Get the pid of the new lwp. */
497 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
498 &new_pid);
499
500 /* If we haven't already seen the new PID stop, wait for it now. */
501 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
502 {
503 /* The new child has a pending SIGSTOP. We can't affect it until it
504 hits the SIGSTOP, but we're already attached. */
505
506 ret = my_waitpid (new_pid, &status, __WALL);
507
508 if (ret == -1)
509 perror_with_name ("waiting for new child");
510 else if (ret != new_pid)
511 warning ("wait returned unexpected PID %d", ret);
512 else if (!WIFSTOPPED (status))
513 warning ("wait returned unexpected status 0x%x", status);
514 }
515
516 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
517 {
518 struct process_info *parent_proc;
519 struct process_info *child_proc;
520 struct lwp_info *child_lwp;
521 struct thread_info *child_thr;
522 struct target_desc *tdesc;
523
524 ptid = ptid_build (new_pid, new_pid, 0);
525
526 if (debug_threads)
527 {
528 debug_printf ("HEW: Got fork event from LWP %ld, "
529 "new child is %d\n",
530 ptid_get_lwp (ptid_of (event_thr)),
531 ptid_get_pid (ptid));
532 }
533
534 /* Add the new process to the tables and clone the breakpoint
535 lists of the parent. We need to do this even if the new process
536 will be detached, since we will need the process object and the
537 breakpoints to remove any breakpoints from memory when we
538 detach, and the client side will access registers. */
539 child_proc = linux_add_process (new_pid, 0);
540 gdb_assert (child_proc != NULL);
541 child_lwp = add_lwp (ptid);
542 gdb_assert (child_lwp != NULL);
543 child_lwp->stopped = 1;
544 child_lwp->must_set_ptrace_flags = 1;
545 child_lwp->status_pending_p = 0;
546 child_thr = get_lwp_thread (child_lwp);
547 child_thr->last_resume_kind = resume_stop;
548 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
549
550 /* If we're suspending all threads, leave this one suspended
551 too. If the fork/clone parent is stepping over a breakpoint,
552 all other threads have been suspended already. Leave the
553 child suspended too. */
554 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
555 || event_lwp->bp_reinsert != 0)
556 {
557 if (debug_threads)
558 debug_printf ("HEW: leaving child suspended\n");
559 child_lwp->suspended = 1;
560 }
561
562 parent_proc = get_thread_process (event_thr);
563 child_proc->attached = parent_proc->attached;
564
565 if (event_lwp->bp_reinsert != 0
566 && can_software_single_step ()
567 && event == PTRACE_EVENT_VFORK)
568 {
569 /* If we leave single-step breakpoints there, child will
570 hit it, so uninsert single-step breakpoints from parent
571 (and child). Once vfork child is done, reinsert
572 them back to parent. */
573 uninsert_single_step_breakpoints (event_thr);
574 }
575
576 clone_all_breakpoints (child_thr, event_thr);
577
578 tdesc = allocate_target_description ();
579 copy_target_description (tdesc, parent_proc->tdesc);
580 child_proc->tdesc = tdesc;
581
582 /* Clone arch-specific process data. */
583 if (the_low_target.new_fork != NULL)
584 the_low_target.new_fork (parent_proc, child_proc);
585
586 /* Save fork info in the parent thread. */
587 if (event == PTRACE_EVENT_FORK)
588 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
589 else if (event == PTRACE_EVENT_VFORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
591
592 event_lwp->waitstatus.value.related_pid = ptid;
593
594 /* The status_pending field contains bits denoting the
595 extended event, so when the pending event is handled,
596 the handler will look at lwp->waitstatus. */
597 event_lwp->status_pending_p = 1;
598 event_lwp->status_pending = wstat;
599
600 /* Link the threads until the parent event is passed on to
601 higher layers. */
602 event_lwp->fork_relative = child_lwp;
603 child_lwp->fork_relative = event_lwp;
604
605 /* If the parent thread is doing step-over with single-step
606 breakpoints, the list of single-step breakpoints are cloned
607 from the parent's. Remove them from the child process.
608 In case of vfork, we'll reinsert them back once vforked
609 child is done. */
610 if (event_lwp->bp_reinsert != 0
611 && can_software_single_step ())
612 {
613 /* The child process is forked and stopped, so it is safe
614 to access its memory without stopping all other threads
615 from other processes. */
616 delete_single_step_breakpoints (child_thr);
617
618 gdb_assert (has_single_step_breakpoints (event_thr));
619 gdb_assert (!has_single_step_breakpoints (child_thr));
620 }
621
622 /* Report the event. */
623 return 0;
624 }
625
626 if (debug_threads)
627 debug_printf ("HEW: Got clone event "
628 "from LWP %ld, new child is LWP %ld\n",
629 lwpid_of (event_thr), new_pid);
630
631 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
632 new_lwp = add_lwp (ptid);
633
634 /* Either we're going to immediately resume the new thread
635 or leave it stopped. linux_resume_one_lwp is a nop if it
636 thinks the thread is currently running, so set this first
637 before calling linux_resume_one_lwp. */
638 new_lwp->stopped = 1;
639
640 /* If we're suspending all threads, leave this one suspended
641 too. If the fork/clone parent is stepping over a breakpoint,
642 all other threads have been suspended already. Leave the
643 child suspended too. */
644 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
645 || event_lwp->bp_reinsert != 0)
646 new_lwp->suspended = 1;
647
648 /* Normally we will get the pending SIGSTOP. But in some cases
649 we might get another signal delivered to the group first.
650 If we do get another signal, be sure not to lose it. */
651 if (WSTOPSIG (status) != SIGSTOP)
652 {
653 new_lwp->stop_expected = 1;
654 new_lwp->status_pending_p = 1;
655 new_lwp->status_pending = status;
656 }
657 else if (report_thread_events)
658 {
659 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663
664 thread_db_notice_clone (event_thr, ptid);
665
666 /* Don't report the event. */
667 return 1;
668 }
669 else if (event == PTRACE_EVENT_VFORK_DONE)
670 {
671 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
672
673 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
674 {
675 reinsert_single_step_breakpoints (event_thr);
676
677 gdb_assert (has_single_step_breakpoints (event_thr));
678 }
679
680 /* Report the event. */
681 return 0;
682 }
683 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
684 {
685 struct process_info *proc;
686 std::vector<int> syscalls_to_catch;
687 ptid_t event_ptid;
688 pid_t event_pid;
689
690 if (debug_threads)
691 {
692 debug_printf ("HEW: Got exec event from LWP %ld\n",
693 lwpid_of (event_thr));
694 }
695
696 /* Get the event ptid. */
697 event_ptid = ptid_of (event_thr);
698 event_pid = ptid_get_pid (event_ptid);
699
700 /* Save the syscall list from the execing process. */
701 proc = get_thread_process (event_thr);
702 syscalls_to_catch = std::move (proc->syscalls_to_catch);
703
704 /* Delete the execing process and all its threads. */
705 linux_mourn (proc);
706 current_thread = NULL;
707
708 /* Create a new process/lwp/thread. */
709 proc = linux_add_process (event_pid, 0);
710 event_lwp = add_lwp (event_ptid);
711 event_thr = get_lwp_thread (event_lwp);
712 gdb_assert (current_thread == event_thr);
713 linux_arch_setup_thread (event_thr);
714
715 /* Set the event status. */
716 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
717 event_lwp->waitstatus.value.execd_pathname
718 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
719
720 /* Mark the exec status as pending. */
721 event_lwp->stopped = 1;
722 event_lwp->status_pending_p = 1;
723 event_lwp->status_pending = wstat;
724 event_thr->last_resume_kind = resume_continue;
725 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
726
727 /* Update syscall state in the new lwp, effectively mid-syscall too. */
728 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
729
730 /* Restore the list to catch. Don't rely on the client, which is free
731 to avoid sending a new list when the architecture doesn't change.
732 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
733 proc->syscalls_to_catch = std::move (syscalls_to_catch);
734
735 /* Report the event. */
736 *orig_event_lwp = event_lwp;
737 return 0;
738 }
739
740 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
741 }
742
743 /* Return the PC as read from the regcache of LWP, without any
744 adjustment. */
745
746 static CORE_ADDR
747 get_pc (struct lwp_info *lwp)
748 {
749 struct thread_info *saved_thread;
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
753 if (the_low_target.get_pc == NULL)
754 return 0;
755
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
758
759 regcache = get_thread_regcache (current_thread, 1);
760 pc = (*the_low_target.get_pc) (regcache);
761
762 if (debug_threads)
763 debug_printf ("pc is 0x%lx\n", (long) pc);
764
765 current_thread = saved_thread;
766 return pc;
767 }
768
769 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
771
772 static void
773 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
774 {
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno = UNKNOWN_SYSCALL;
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
791
792 if (debug_threads)
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
794
795 current_thread = saved_thread;
796 }
797
798 static int check_stopped_by_watchpoint (struct lwp_info *child);
799
800 /* Called when the LWP stopped for a signal/trap. If it stopped for a
801 trap check what caused it (breakpoint, watchpoint, trace, etc.),
802 and save the result in the LWP's stop_reason field. If it stopped
803 for a breakpoint, decrement the PC if necessary on the lwp's
804 architecture. Returns true if we now have the LWP's stop PC. */
805
806 static int
807 save_stop_reason (struct lwp_info *lwp)
808 {
809 CORE_ADDR pc;
810 CORE_ADDR sw_breakpoint_pc;
811 struct thread_info *saved_thread;
812 #if USE_SIGTRAP_SIGINFO
813 siginfo_t siginfo;
814 #endif
815
816 if (the_low_target.get_pc == NULL)
817 return 0;
818
819 pc = get_pc (lwp);
820 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
821
822 /* breakpoint_at reads from the current thread. */
823 saved_thread = current_thread;
824 current_thread = get_lwp_thread (lwp);
825
826 #if USE_SIGTRAP_SIGINFO
827 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
828 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
829 {
830 if (siginfo.si_signo == SIGTRAP)
831 {
832 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
833 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
834 {
835 /* The si_code is ambiguous on this arch -- check debug
836 registers. */
837 if (!check_stopped_by_watchpoint (lwp))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
841 {
842 /* If we determine the LWP stopped for a SW breakpoint,
843 trust it. Particularly don't check watchpoint
844 registers, because at least on s390, we'd find
845 stopped-by-watchpoint as long as there's a watchpoint
846 set. */
847 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
848 }
849 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
850 {
851 /* This can indicate either a hardware breakpoint or
852 hardware watchpoint. Check debug registers. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
855 }
856 else if (siginfo.si_code == TRAP_TRACE)
857 {
858 /* We may have single stepped an instruction that
859 triggered a watchpoint. In that case, on some
860 architectures (such as x86), instead of TRAP_HWBKPT,
861 si_code indicates TRAP_TRACE, and we need to check
862 the debug registers separately. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
865 }
866 }
867 }
868 #else
869 /* We may have just stepped a breakpoint instruction. E.g., in
870 non-stop mode, GDB first tells the thread A to step a range, and
871 then the user inserts a breakpoint inside the range. In that
872 case we need to report the breakpoint PC. */
873 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
874 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
875 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
876
877 if (hardware_breakpoint_inserted_here (pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
881 check_stopped_by_watchpoint (lwp);
882 #endif
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
885 {
886 if (debug_threads)
887 {
888 struct thread_info *thr = get_lwp_thread (lwp);
889
890 debug_printf ("CSBB: %s stopped by software breakpoint\n",
891 target_pid_to_str (ptid_of (thr)));
892 }
893
894 /* Back up the PC if necessary. */
895 if (pc != sw_breakpoint_pc)
896 {
897 struct regcache *regcache
898 = get_thread_regcache (current_thread, 1);
899 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
900 }
901
902 /* Update this so we record the correct stop PC below. */
903 pc = sw_breakpoint_pc;
904 }
905 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
906 {
907 if (debug_threads)
908 {
909 struct thread_info *thr = get_lwp_thread (lwp);
910
911 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
912 target_pid_to_str (ptid_of (thr)));
913 }
914 }
915 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
924 }
925 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
926 {
927 if (debug_threads)
928 {
929 struct thread_info *thr = get_lwp_thread (lwp);
930
931 debug_printf ("CSBB: %s stopped by trace\n",
932 target_pid_to_str (ptid_of (thr)));
933 }
934 }
935
936 lwp->stop_pc = pc;
937 current_thread = saved_thread;
938 return 1;
939 }
940
941 static struct lwp_info *
942 add_lwp (ptid_t ptid)
943 {
944 struct lwp_info *lwp;
945
946 lwp = XCNEW (struct lwp_info);
947
948 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
949
950 if (the_low_target.new_thread != NULL)
951 the_low_target.new_thread (lwp);
952
953 lwp->thread = add_thread (ptid, lwp);
954
955 return lwp;
956 }
957
958 /* Callback to be used when calling fork_inferior, responsible for
959 actually initiating the tracing of the inferior. */
960
961 static void
962 linux_ptrace_fun ()
963 {
964 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
965 (PTRACE_TYPE_ARG4) 0) < 0)
966 trace_start_error_with_name ("ptrace");
967
968 if (setpgid (0, 0) < 0)
969 trace_start_error_with_name ("setpgid");
970
971 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 if (close (0) < 0)
977 trace_start_error_with_name ("close");
978 if (open ("/dev/null", O_RDONLY) < 0)
979 trace_start_error_with_name ("open");
980 if (dup2 (2, 1) < 0)
981 trace_start_error_with_name ("dup2");
982 if (write (2, "stdin/stdout redirected\n",
983 sizeof ("stdin/stdout redirected\n") - 1) < 0)
984 {
985 /* Errors ignored. */;
986 }
987 }
988 }
989
990 /* Start an inferior process and returns its pid.
991 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
992 are its arguments. */
993
994 static int
995 linux_create_inferior (const char *program,
996 const std::vector<char *> &program_args)
997 {
998 struct lwp_info *new_lwp;
999 int pid;
1000 ptid_t ptid;
1001
1002 {
1003 maybe_disable_address_space_randomization restore_personality
1004 (disable_randomization);
1005 std::string str_program_args = stringify_argv (program_args);
1006
1007 pid = fork_inferior (program,
1008 str_program_args.c_str (),
1009 get_environ ()->envp (), linux_ptrace_fun,
1010 NULL, NULL, NULL, NULL);
1011 }
1012
1013 linux_add_process (pid, 0);
1014
1015 ptid = ptid_build (pid, pid, 0);
1016 new_lwp = add_lwp (ptid);
1017 new_lwp->must_set_ptrace_flags = 1;
1018
1019 post_fork_inferior (pid, program);
1020
1021 return pid;
1022 }
1023
1024 /* Implement the post_create_inferior target_ops method. */
1025
1026 static void
1027 linux_post_create_inferior (void)
1028 {
1029 struct lwp_info *lwp = get_thread_lwp (current_thread);
1030
1031 linux_arch_setup ();
1032
1033 if (lwp->must_set_ptrace_flags)
1034 {
1035 struct process_info *proc = current_process ();
1036 int options = linux_low_ptrace_options (proc->attached);
1037
1038 linux_enable_event_reporting (lwpid_of (current_thread), options);
1039 lwp->must_set_ptrace_flags = 0;
1040 }
1041 }
1042
1043 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1044 error. */
1045
1046 int
1047 linux_attach_lwp (ptid_t ptid)
1048 {
1049 struct lwp_info *new_lwp;
1050 int lwpid = ptid_get_lwp (ptid);
1051
1052 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1053 != 0)
1054 return errno;
1055
1056 new_lwp = add_lwp (ptid);
1057
1058 /* We need to wait for SIGSTOP before being able to make the next
1059 ptrace call on this LWP. */
1060 new_lwp->must_set_ptrace_flags = 1;
1061
1062 if (linux_proc_pid_is_stopped (lwpid))
1063 {
1064 if (debug_threads)
1065 debug_printf ("Attached to a stopped process\n");
1066
1067 /* The process is definitely stopped. It is in a job control
1068 stop, unless the kernel predates the TASK_STOPPED /
1069 TASK_TRACED distinction, in which case it might be in a
1070 ptrace stop. Make sure it is in a ptrace stop; from there we
1071 can kill it, signal it, et cetera.
1072
1073 First make sure there is a pending SIGSTOP. Since we are
1074 already attached, the process can not transition from stopped
1075 to running without a PTRACE_CONT; so we know this signal will
1076 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1077 probably already in the queue (unless this kernel is old
1078 enough to use TASK_STOPPED for ptrace stops); but since
1079 SIGSTOP is not an RT signal, it can only be queued once. */
1080 kill_lwp (lwpid, SIGSTOP);
1081
1082 /* Finally, resume the stopped process. This will deliver the
1083 SIGSTOP (or a higher priority signal, just like normal
1084 PTRACE_ATTACH), which we'll catch later on. */
1085 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1086 }
1087
1088 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1089 brings it to a halt.
1090
1091 There are several cases to consider here:
1092
1093 1) gdbserver has already attached to the process and is being notified
1094 of a new thread that is being created.
1095 In this case we should ignore that SIGSTOP and resume the
1096 process. This is handled below by setting stop_expected = 1,
1097 and the fact that add_thread sets last_resume_kind ==
1098 resume_continue.
1099
1100 2) This is the first thread (the process thread), and we're attaching
1101 to it via attach_inferior.
1102 In this case we want the process thread to stop.
1103 This is handled by having linux_attach set last_resume_kind ==
1104 resume_stop after we return.
1105
1106 If the pid we are attaching to is also the tgid, we attach to and
1107 stop all the existing threads. Otherwise, we attach to pid and
1108 ignore any other threads in the same group as this pid.
1109
1110 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1111 existing threads.
1112 In this case we want the thread to stop.
1113 FIXME: This case is currently not properly handled.
1114 We should wait for the SIGSTOP but don't. Things work apparently
1115 because enough time passes between when we ptrace (ATTACH) and when
1116 gdb makes the next ptrace call on the thread.
1117
1118 On the other hand, if we are currently trying to stop all threads, we
1119 should treat the new thread as if we had sent it a SIGSTOP. This works
1120 because we are guaranteed that the add_lwp call above added us to the
1121 end of the list, and so the new thread has not yet reached
1122 wait_for_sigstop (but will). */
1123 new_lwp->stop_expected = 1;
1124
1125 return 0;
1126 }
1127
1128 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1129 already attached. Returns true if a new LWP is found, false
1130 otherwise. */
1131
1132 static int
1133 attach_proc_task_lwp_callback (ptid_t ptid)
1134 {
1135 /* Is this a new thread? */
1136 if (find_thread_ptid (ptid) == NULL)
1137 {
1138 int lwpid = ptid_get_lwp (ptid);
1139 int err;
1140
1141 if (debug_threads)
1142 debug_printf ("Found new lwp %d\n", lwpid);
1143
1144 err = linux_attach_lwp (ptid);
1145
1146 /* Be quiet if we simply raced with the thread exiting. EPERM
1147 is returned if the thread's task still exists, and is marked
1148 as exited or zombie, as well as other conditions, so in that
1149 case, confirm the status in /proc/PID/status. */
1150 if (err == ESRCH
1151 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1152 {
1153 if (debug_threads)
1154 {
1155 debug_printf ("Cannot attach to lwp %d: "
1156 "thread is gone (%d: %s)\n",
1157 lwpid, err, strerror (err));
1158 }
1159 }
1160 else if (err != 0)
1161 {
1162 warning (_("Cannot attach to lwp %d: %s"),
1163 lwpid,
1164 linux_ptrace_attach_fail_reason_string (ptid, err));
1165 }
1166
1167 return 1;
1168 }
1169 return 0;
1170 }
1171
1172 static void async_file_mark (void);
1173
1174 /* Attach to PID. If PID is the tgid, attach to it and all
1175 of its threads. */
1176
1177 static int
1178 linux_attach (unsigned long pid)
1179 {
1180 struct process_info *proc;
1181 struct thread_info *initial_thread;
1182 ptid_t ptid = ptid_build (pid, pid, 0);
1183 int err;
1184
1185 /* Attach to PID. We will check for other threads
1186 soon. */
1187 err = linux_attach_lwp (ptid);
1188 if (err != 0)
1189 error ("Cannot attach to process %ld: %s",
1190 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1191
1192 proc = linux_add_process (pid, 1);
1193
1194 /* Don't ignore the initial SIGSTOP if we just attached to this
1195 process. It will be collected by wait shortly. */
1196 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1197 initial_thread->last_resume_kind = resume_stop;
1198
1199 /* We must attach to every LWP. If /proc is mounted, use that to
1200 find them now. On the one hand, the inferior may be using raw
1201 clone instead of using pthreads. On the other hand, even if it
1202 is using pthreads, GDB may not be connected yet (thread_db needs
1203 to do symbol lookups, through qSymbol). Also, thread_db walks
1204 structures in the inferior's address space to find the list of
1205 threads/LWPs, and those structures may well be corrupted. Note
1206 that once thread_db is loaded, we'll still use it to list threads
1207 and associate pthread info with each LWP. */
1208 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1209
1210 /* GDB will shortly read the xml target description for this
1211 process, to figure out the process' architecture. But the target
1212 description is only filled in when the first process/thread in
1213 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1214 that now, otherwise, if GDB is fast enough, it could read the
1215 target description _before_ that initial stop. */
1216 if (non_stop)
1217 {
1218 struct lwp_info *lwp;
1219 int wstat, lwpid;
1220 ptid_t pid_ptid = pid_to_ptid (pid);
1221
1222 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1223 &wstat, __WALL);
1224 gdb_assert (lwpid > 0);
1225
1226 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1227
1228 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1229 {
1230 lwp->status_pending_p = 1;
1231 lwp->status_pending = wstat;
1232 }
1233
1234 initial_thread->last_resume_kind = resume_continue;
1235
1236 async_file_mark ();
1237
1238 gdb_assert (proc->tdesc != NULL);
1239 }
1240
1241 return 0;
1242 }
1243
1244 struct counter
1245 {
1246 int pid;
1247 int count;
1248 };
1249
1250 static int
1251 second_thread_of_pid_p (thread_info *thread, void *args)
1252 {
1253 struct counter *counter = (struct counter *) args;
1254
1255 if (thread->id.pid () == counter->pid)
1256 {
1257 if (++counter->count > 1)
1258 return 1;
1259 }
1260
1261 return 0;
1262 }
1263
1264 static int
1265 last_thread_of_process_p (int pid)
1266 {
1267 struct counter counter = { pid , 0 };
1268
1269 return (find_inferior (&all_threads,
1270 second_thread_of_pid_p, &counter) == NULL);
1271 }
1272
1273 /* Kill LWP. */
1274
1275 static void
1276 linux_kill_one_lwp (struct lwp_info *lwp)
1277 {
1278 struct thread_info *thr = get_lwp_thread (lwp);
1279 int pid = lwpid_of (thr);
1280
1281 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1282 there is no signal context, and ptrace(PTRACE_KILL) (or
1283 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1284 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1285 alternative is to kill with SIGKILL. We only need one SIGKILL
1286 per process, not one for each thread. But since we still support
1287 support debugging programs using raw clone without CLONE_THREAD,
1288 we send one for each thread. For years, we used PTRACE_KILL
1289 only, so we're being a bit paranoid about some old kernels where
1290 PTRACE_KILL might work better (dubious if there are any such, but
1291 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1292 second, and so we're fine everywhere. */
1293
1294 errno = 0;
1295 kill_lwp (pid, SIGKILL);
1296 if (debug_threads)
1297 {
1298 int save_errno = errno;
1299
1300 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1301 target_pid_to_str (ptid_of (thr)),
1302 save_errno ? strerror (save_errno) : "OK");
1303 }
1304
1305 errno = 0;
1306 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1307 if (debug_threads)
1308 {
1309 int save_errno = errno;
1310
1311 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1312 target_pid_to_str (ptid_of (thr)),
1313 save_errno ? strerror (save_errno) : "OK");
1314 }
1315 }
1316
1317 /* Kill LWP and wait for it to die. */
1318
1319 static void
1320 kill_wait_lwp (struct lwp_info *lwp)
1321 {
1322 struct thread_info *thr = get_lwp_thread (lwp);
1323 int pid = ptid_get_pid (ptid_of (thr));
1324 int lwpid = ptid_get_lwp (ptid_of (thr));
1325 int wstat;
1326 int res;
1327
1328 if (debug_threads)
1329 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1330
1331 do
1332 {
1333 linux_kill_one_lwp (lwp);
1334
1335 /* Make sure it died. Notes:
1336
1337 - The loop is most likely unnecessary.
1338
1339 - We don't use linux_wait_for_event as that could delete lwps
1340 while we're iterating over them. We're not interested in
1341 any pending status at this point, only in making sure all
1342 wait status on the kernel side are collected until the
1343 process is reaped.
1344
1345 - We don't use __WALL here as the __WALL emulation relies on
1346 SIGCHLD, and killing a stopped process doesn't generate
1347 one, nor an exit status.
1348 */
1349 res = my_waitpid (lwpid, &wstat, 0);
1350 if (res == -1 && errno == ECHILD)
1351 res = my_waitpid (lwpid, &wstat, __WCLONE);
1352 } while (res > 0 && WIFSTOPPED (wstat));
1353
1354 /* Even if it was stopped, the child may have already disappeared.
1355 E.g., if it was killed by SIGKILL. */
1356 if (res < 0 && errno != ECHILD)
1357 perror_with_name ("kill_wait_lwp");
1358 }
1359
1360 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1361 except the leader. */
1362
1363 static void
1364 kill_one_lwp_callback (thread_info *thread, int pid)
1365 {
1366 struct lwp_info *lwp = get_thread_lwp (thread);
1367
1368 /* We avoid killing the first thread here, because of a Linux kernel (at
1369 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1370 the children get a chance to be reaped, it will remain a zombie
1371 forever. */
1372
1373 if (lwpid_of (thread) == pid)
1374 {
1375 if (debug_threads)
1376 debug_printf ("lkop: is last of process %s\n",
1377 target_pid_to_str (thread->id));
1378 return;
1379 }
1380
1381 kill_wait_lwp (lwp);
1382 }
1383
1384 static int
1385 linux_kill (int pid)
1386 {
1387 struct process_info *process;
1388 struct lwp_info *lwp;
1389
1390 process = find_process_pid (pid);
1391 if (process == NULL)
1392 return -1;
1393
1394 /* If we're killing a running inferior, make sure it is stopped
1395 first, as PTRACE_KILL will not work otherwise. */
1396 stop_all_lwps (0, NULL);
1397
1398 for_each_thread (pid, [&] (thread_info *thread)
1399 {
1400 kill_one_lwp_callback (thread, pid);
1401 });
1402
1403 /* See the comment in linux_kill_one_lwp. We did not kill the first
1404 thread in the list, so do so now. */
1405 lwp = find_lwp_pid (pid_to_ptid (pid));
1406
1407 if (lwp == NULL)
1408 {
1409 if (debug_threads)
1410 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1411 pid);
1412 }
1413 else
1414 kill_wait_lwp (lwp);
1415
1416 the_target->mourn (process);
1417
1418 /* Since we presently can only stop all lwps of all processes, we
1419 need to unstop lwps of other processes. */
1420 unstop_all_lwps (0, NULL);
1421 return 0;
1422 }
1423
1424 /* Get pending signal of THREAD, for detaching purposes. This is the
1425 signal the thread last stopped for, which we need to deliver to the
1426 thread when detaching, otherwise, it'd be suppressed/lost. */
1427
1428 static int
1429 get_detach_signal (struct thread_info *thread)
1430 {
1431 enum gdb_signal signo = GDB_SIGNAL_0;
1432 int status;
1433 struct lwp_info *lp = get_thread_lwp (thread);
1434
1435 if (lp->status_pending_p)
1436 status = lp->status_pending;
1437 else
1438 {
1439 /* If the thread had been suspended by gdbserver, and it stopped
1440 cleanly, then it'll have stopped with SIGSTOP. But we don't
1441 want to deliver that SIGSTOP. */
1442 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1443 || thread->last_status.value.sig == GDB_SIGNAL_0)
1444 return 0;
1445
1446 /* Otherwise, we may need to deliver the signal we
1447 intercepted. */
1448 status = lp->last_status;
1449 }
1450
1451 if (!WIFSTOPPED (status))
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1455 target_pid_to_str (ptid_of (thread)));
1456 return 0;
1457 }
1458
1459 /* Extended wait statuses aren't real SIGTRAPs. */
1460 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1461 {
1462 if (debug_threads)
1463 debug_printf ("GPS: lwp %s had stopped with extended "
1464 "status: no pending signal\n",
1465 target_pid_to_str (ptid_of (thread)));
1466 return 0;
1467 }
1468
1469 signo = gdb_signal_from_host (WSTOPSIG (status));
1470
1471 if (program_signals_p && !program_signals[signo])
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1475 target_pid_to_str (ptid_of (thread)),
1476 gdb_signal_to_string (signo));
1477 return 0;
1478 }
1479 else if (!program_signals_p
1480 /* If we have no way to know which signals GDB does not
1481 want to have passed to the program, assume
1482 SIGTRAP/SIGINT, which is GDB's default. */
1483 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1484 {
1485 if (debug_threads)
1486 debug_printf ("GPS: lwp %s had signal %s, "
1487 "but we don't know if we should pass it. "
1488 "Default to not.\n",
1489 target_pid_to_str (ptid_of (thread)),
1490 gdb_signal_to_string (signo));
1491 return 0;
1492 }
1493 else
1494 {
1495 if (debug_threads)
1496 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1497 target_pid_to_str (ptid_of (thread)),
1498 gdb_signal_to_string (signo));
1499
1500 return WSTOPSIG (status);
1501 }
1502 }
1503
1504 /* Detach from LWP. */
1505
1506 static void
1507 linux_detach_one_lwp (struct lwp_info *lwp)
1508 {
1509 struct thread_info *thread = get_lwp_thread (lwp);
1510 int sig;
1511 int lwpid;
1512
1513 /* If there is a pending SIGSTOP, get rid of it. */
1514 if (lwp->stop_expected)
1515 {
1516 if (debug_threads)
1517 debug_printf ("Sending SIGCONT to %s\n",
1518 target_pid_to_str (ptid_of (thread)));
1519
1520 kill_lwp (lwpid_of (thread), SIGCONT);
1521 lwp->stop_expected = 0;
1522 }
1523
1524 /* Pass on any pending signal for this thread. */
1525 sig = get_detach_signal (thread);
1526
1527 /* Preparing to resume may try to write registers, and fail if the
1528 lwp is zombie. If that happens, ignore the error. We'll handle
1529 it below, when detach fails with ESRCH. */
1530 TRY
1531 {
1532 /* Flush any pending changes to the process's registers. */
1533 regcache_invalidate_thread (thread);
1534
1535 /* Finally, let it resume. */
1536 if (the_low_target.prepare_to_resume != NULL)
1537 the_low_target.prepare_to_resume (lwp);
1538 }
1539 CATCH (ex, RETURN_MASK_ERROR)
1540 {
1541 if (!check_ptrace_stopped_lwp_gone (lwp))
1542 throw_exception (ex);
1543 }
1544 END_CATCH
1545
1546 lwpid = lwpid_of (thread);
1547 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1548 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1549 {
1550 int save_errno = errno;
1551
1552 /* We know the thread exists, so ESRCH must mean the lwp is
1553 zombie. This can happen if one of the already-detached
1554 threads exits the whole thread group. In that case we're
1555 still attached, and must reap the lwp. */
1556 if (save_errno == ESRCH)
1557 {
1558 int ret, status;
1559
1560 ret = my_waitpid (lwpid, &status, __WALL);
1561 if (ret == -1)
1562 {
1563 warning (_("Couldn't reap LWP %d while detaching: %s"),
1564 lwpid, strerror (errno));
1565 }
1566 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1567 {
1568 warning (_("Reaping LWP %d while detaching "
1569 "returned unexpected status 0x%x"),
1570 lwpid, status);
1571 }
1572 }
1573 else
1574 {
1575 error (_("Can't detach %s: %s"),
1576 target_pid_to_str (ptid_of (thread)),
1577 strerror (save_errno));
1578 }
1579 }
1580 else if (debug_threads)
1581 {
1582 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1583 target_pid_to_str (ptid_of (thread)),
1584 strsignal (sig));
1585 }
1586
1587 delete_lwp (lwp);
1588 }
1589
1590 /* Callback for find_inferior. Detaches from non-leader threads of a
1591 given process. */
1592
1593 static int
1594 linux_detach_lwp_callback (thread_info *thread, void *args)
1595 {
1596 struct lwp_info *lwp = get_thread_lwp (thread);
1597 int pid = *(int *) args;
1598 int lwpid = lwpid_of (thread);
1599
1600 /* Skip other processes. */
1601 if (thread->id.pid () != pid)
1602 return 0;
1603
1604 /* We don't actually detach from the thread group leader just yet.
1605 If the thread group exits, we must reap the zombie clone lwps
1606 before we're able to reap the leader. */
1607 if (thread->id.pid () == lwpid)
1608 return 0;
1609
1610 linux_detach_one_lwp (lwp);
1611 return 0;
1612 }
1613
1614 static int
1615 linux_detach (int pid)
1616 {
1617 struct process_info *process;
1618 struct lwp_info *main_lwp;
1619
1620 process = find_process_pid (pid);
1621 if (process == NULL)
1622 return -1;
1623
1624 /* As there's a step over already in progress, let it finish first,
1625 otherwise nesting a stabilize_threads operation on top gets real
1626 messy. */
1627 complete_ongoing_step_over ();
1628
1629 /* Stop all threads before detaching. First, ptrace requires that
1630 the thread is stopped to sucessfully detach. Second, thread_db
1631 may need to uninstall thread event breakpoints from memory, which
1632 only works with a stopped process anyway. */
1633 stop_all_lwps (0, NULL);
1634
1635 #ifdef USE_THREAD_DB
1636 thread_db_detach (process);
1637 #endif
1638
1639 /* Stabilize threads (move out of jump pads). */
1640 stabilize_threads ();
1641
1642 /* Detach from the clone lwps first. If the thread group exits just
1643 while we're detaching, we must reap the clone lwps before we're
1644 able to reap the leader. */
1645 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1646
1647 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1648 linux_detach_one_lwp (main_lwp);
1649
1650 the_target->mourn (process);
1651
1652 /* Since we presently can only stop all lwps of all processes, we
1653 need to unstop lwps of other processes. */
1654 unstop_all_lwps (0, NULL);
1655 return 0;
1656 }
1657
1658 /* Remove all LWPs that belong to process PROC from the lwp list. */
1659
1660 static int
1661 delete_lwp_callback (thread_info *thread, void *proc)
1662 {
1663 struct lwp_info *lwp = get_thread_lwp (thread);
1664 struct process_info *process = (struct process_info *) proc;
1665
1666 if (pid_of (thread) == pid_of (process))
1667 delete_lwp (lwp);
1668
1669 return 0;
1670 }
1671
1672 static void
1673 linux_mourn (struct process_info *process)
1674 {
1675 struct process_info_private *priv;
1676
1677 #ifdef USE_THREAD_DB
1678 thread_db_mourn (process);
1679 #endif
1680
1681 find_inferior (&all_threads, delete_lwp_callback, process);
1682
1683 /* Freeing all private data. */
1684 priv = process->priv;
1685 if (the_low_target.delete_process != NULL)
1686 the_low_target.delete_process (priv->arch_private);
1687 else
1688 gdb_assert (priv->arch_private == NULL);
1689 free (priv);
1690 process->priv = NULL;
1691
1692 remove_process (process);
1693 }
1694
1695 static void
1696 linux_join (int pid)
1697 {
1698 int status, ret;
1699
1700 do {
1701 ret = my_waitpid (pid, &status, 0);
1702 if (WIFEXITED (status) || WIFSIGNALED (status))
1703 break;
1704 } while (ret != -1 || errno != ECHILD);
1705 }
1706
1707 /* Return nonzero if the given thread is still alive. */
1708 static int
1709 linux_thread_alive (ptid_t ptid)
1710 {
1711 struct lwp_info *lwp = find_lwp_pid (ptid);
1712
1713 /* We assume we always know if a thread exits. If a whole process
1714 exited but we still haven't been able to report it to GDB, we'll
1715 hold on to the last lwp of the dead process. */
1716 if (lwp != NULL)
1717 return !lwp_is_marked_dead (lwp);
1718 else
1719 return 0;
1720 }
1721
1722 /* Return 1 if this lwp still has an interesting status pending. If
1723 not (e.g., it had stopped for a breakpoint that is gone), return
1724 false. */
1725
1726 static int
1727 thread_still_has_status_pending_p (struct thread_info *thread)
1728 {
1729 struct lwp_info *lp = get_thread_lwp (thread);
1730
1731 if (!lp->status_pending_p)
1732 return 0;
1733
1734 if (thread->last_resume_kind != resume_stop
1735 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1736 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1737 {
1738 struct thread_info *saved_thread;
1739 CORE_ADDR pc;
1740 int discard = 0;
1741
1742 gdb_assert (lp->last_status != 0);
1743
1744 pc = get_pc (lp);
1745
1746 saved_thread = current_thread;
1747 current_thread = thread;
1748
1749 if (pc != lp->stop_pc)
1750 {
1751 if (debug_threads)
1752 debug_printf ("PC of %ld changed\n",
1753 lwpid_of (thread));
1754 discard = 1;
1755 }
1756
1757 #if !USE_SIGTRAP_SIGINFO
1758 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1759 && !(*the_low_target.breakpoint_at) (pc))
1760 {
1761 if (debug_threads)
1762 debug_printf ("previous SW breakpoint of %ld gone\n",
1763 lwpid_of (thread));
1764 discard = 1;
1765 }
1766 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1767 && !hardware_breakpoint_inserted_here (pc))
1768 {
1769 if (debug_threads)
1770 debug_printf ("previous HW breakpoint of %ld gone\n",
1771 lwpid_of (thread));
1772 discard = 1;
1773 }
1774 #endif
1775
1776 current_thread = saved_thread;
1777
1778 if (discard)
1779 {
1780 if (debug_threads)
1781 debug_printf ("discarding pending breakpoint status\n");
1782 lp->status_pending_p = 0;
1783 return 0;
1784 }
1785 }
1786
1787 return 1;
1788 }
1789
1790 /* Returns true if LWP is resumed from the client's perspective. */
1791
1792 static int
1793 lwp_resumed (struct lwp_info *lwp)
1794 {
1795 struct thread_info *thread = get_lwp_thread (lwp);
1796
1797 if (thread->last_resume_kind != resume_stop)
1798 return 1;
1799
1800 /* Did gdb send us a `vCont;t', but we haven't reported the
1801 corresponding stop to gdb yet? If so, the thread is still
1802 resumed/running from gdb's perspective. */
1803 if (thread->last_resume_kind == resume_stop
1804 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1805 return 1;
1806
1807 return 0;
1808 }
1809
1810 /* Return 1 if this lwp has an interesting status pending. */
1811 static int
1812 status_pending_p_callback (thread_info *thread, void *arg)
1813 {
1814 struct lwp_info *lp = get_thread_lwp (thread);
1815 ptid_t ptid = * (ptid_t *) arg;
1816
1817 /* Check if we're only interested in events from a specific process
1818 or a specific LWP. */
1819 if (!ptid_match (ptid_of (thread), ptid))
1820 return 0;
1821
1822 if (!lwp_resumed (lp))
1823 return 0;
1824
1825 if (lp->status_pending_p
1826 && !thread_still_has_status_pending_p (thread))
1827 {
1828 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1829 return 0;
1830 }
1831
1832 return lp->status_pending_p;
1833 }
1834
1835 static int
1836 same_lwp (thread_info *thread, void *data)
1837 {
1838 ptid_t ptid = *(ptid_t *) data;
1839 int lwp;
1840
1841 if (ptid_get_lwp (ptid) != 0)
1842 lwp = ptid_get_lwp (ptid);
1843 else
1844 lwp = ptid_get_pid (ptid);
1845
1846 if (thread->id.lwp () == lwp)
1847 return 1;
1848
1849 return 0;
1850 }
1851
1852 struct lwp_info *
1853 find_lwp_pid (ptid_t ptid)
1854 {
1855 thread_info *thread = find_inferior (&all_threads, same_lwp, &ptid);
1856
1857 if (thread == NULL)
1858 return NULL;
1859
1860 return get_thread_lwp (thread);
1861 }
1862
1863 /* Return the number of known LWPs in the tgid given by PID. */
1864
1865 static int
1866 num_lwps (int pid)
1867 {
1868 int count = 0;
1869
1870 for_each_thread (pid, [&] (thread_info *thread)
1871 {
1872 count++;
1873 });
1874
1875 return count;
1876 }
1877
1878 /* See nat/linux-nat.h. */
1879
1880 struct lwp_info *
1881 iterate_over_lwps (ptid_t filter,
1882 iterate_over_lwps_ftype callback,
1883 void *data)
1884 {
1885 thread_info *thread = find_thread (filter, [&] (thread_info *thread)
1886 {
1887 lwp_info *lwp = get_thread_lwp (thread);
1888
1889 return callback (lwp, data);
1890 });
1891
1892 if (thread == NULL)
1893 return NULL;
1894
1895 return get_thread_lwp (thread);
1896 }
1897
1898 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1899 their exits until all other threads in the group have exited. */
1900
1901 static void
1902 check_zombie_leaders (void)
1903 {
1904 for_each_process ([] (process_info *proc) {
1905 pid_t leader_pid = pid_of (proc);
1906 struct lwp_info *leader_lp;
1907
1908 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1909
1910 if (debug_threads)
1911 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1912 "num_lwps=%d, zombie=%d\n",
1913 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1914 linux_proc_pid_is_zombie (leader_pid));
1915
1916 if (leader_lp != NULL && !leader_lp->stopped
1917 /* Check if there are other threads in the group, as we may
1918 have raced with the inferior simply exiting. */
1919 && !last_thread_of_process_p (leader_pid)
1920 && linux_proc_pid_is_zombie (leader_pid))
1921 {
1922 /* A leader zombie can mean one of two things:
1923
1924 - It exited, and there's an exit status pending
1925 available, or only the leader exited (not the whole
1926 program). In the latter case, we can't waitpid the
1927 leader's exit status until all other threads are gone.
1928
1929 - There are 3 or more threads in the group, and a thread
1930 other than the leader exec'd. On an exec, the Linux
1931 kernel destroys all other threads (except the execing
1932 one) in the thread group, and resets the execing thread's
1933 tid to the tgid. No exit notification is sent for the
1934 execing thread -- from the ptracer's perspective, it
1935 appears as though the execing thread just vanishes.
1936 Until we reap all other threads except the leader and the
1937 execing thread, the leader will be zombie, and the
1938 execing thread will be in `D (disc sleep)'. As soon as
1939 all other threads are reaped, the execing thread changes
1940 it's tid to the tgid, and the previous (zombie) leader
1941 vanishes, giving place to the "new" leader. We could try
1942 distinguishing the exit and exec cases, by waiting once
1943 more, and seeing if something comes out, but it doesn't
1944 sound useful. The previous leader _does_ go away, and
1945 we'll re-add the new one once we see the exec event
1946 (which is just the same as what would happen if the
1947 previous leader did exit voluntarily before some other
1948 thread execs). */
1949
1950 if (debug_threads)
1951 debug_printf ("CZL: Thread group leader %d zombie "
1952 "(it exited, or another thread execd).\n",
1953 leader_pid);
1954
1955 delete_lwp (leader_lp);
1956 }
1957 });
1958 }
1959
1960 /* Callback for `find_inferior'. Returns the first LWP that is not
1961 stopped. ARG is a PTID filter. */
1962
1963 static int
1964 not_stopped_callback (thread_info *thread, void *arg)
1965 {
1966 struct lwp_info *lwp;
1967 ptid_t filter = *(ptid_t *) arg;
1968
1969 if (!ptid_match (ptid_of (thread), filter))
1970 return 0;
1971
1972 lwp = get_thread_lwp (thread);
1973 if (!lwp->stopped)
1974 return 1;
1975
1976 return 0;
1977 }
1978
1979 /* Increment LWP's suspend count. */
1980
1981 static void
1982 lwp_suspended_inc (struct lwp_info *lwp)
1983 {
1984 lwp->suspended++;
1985
1986 if (debug_threads && lwp->suspended > 4)
1987 {
1988 struct thread_info *thread = get_lwp_thread (lwp);
1989
1990 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1991 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1992 }
1993 }
1994
1995 /* Decrement LWP's suspend count. */
1996
1997 static void
1998 lwp_suspended_decr (struct lwp_info *lwp)
1999 {
2000 lwp->suspended--;
2001
2002 if (lwp->suspended < 0)
2003 {
2004 struct thread_info *thread = get_lwp_thread (lwp);
2005
2006 internal_error (__FILE__, __LINE__,
2007 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2008 lwp->suspended);
2009 }
2010 }
2011
2012 /* This function should only be called if the LWP got a SIGTRAP.
2013
2014 Handle any tracepoint steps or hits. Return true if a tracepoint
2015 event was handled, 0 otherwise. */
2016
2017 static int
2018 handle_tracepoints (struct lwp_info *lwp)
2019 {
2020 struct thread_info *tinfo = get_lwp_thread (lwp);
2021 int tpoint_related_event = 0;
2022
2023 gdb_assert (lwp->suspended == 0);
2024
2025 /* If this tracepoint hit causes a tracing stop, we'll immediately
2026 uninsert tracepoints. To do this, we temporarily pause all
2027 threads, unpatch away, and then unpause threads. We need to make
2028 sure the unpausing doesn't resume LWP too. */
2029 lwp_suspended_inc (lwp);
2030
2031 /* And we need to be sure that any all-threads-stopping doesn't try
2032 to move threads out of the jump pads, as it could deadlock the
2033 inferior (LWP could be in the jump pad, maybe even holding the
2034 lock.) */
2035
2036 /* Do any necessary step collect actions. */
2037 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2038
2039 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2040
2041 /* See if we just hit a tracepoint and do its main collect
2042 actions. */
2043 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2044
2045 lwp_suspended_decr (lwp);
2046
2047 gdb_assert (lwp->suspended == 0);
2048 gdb_assert (!stabilizing_threads
2049 || (lwp->collecting_fast_tracepoint
2050 != fast_tpoint_collect_result::not_collecting));
2051
2052 if (tpoint_related_event)
2053 {
2054 if (debug_threads)
2055 debug_printf ("got a tracepoint event\n");
2056 return 1;
2057 }
2058
2059 return 0;
2060 }
2061
2062 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2063 collection status. */
2064
2065 static fast_tpoint_collect_result
2066 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2067 struct fast_tpoint_collect_status *status)
2068 {
2069 CORE_ADDR thread_area;
2070 struct thread_info *thread = get_lwp_thread (lwp);
2071
2072 if (the_low_target.get_thread_area == NULL)
2073 return fast_tpoint_collect_result::not_collecting;
2074
2075 /* Get the thread area address. This is used to recognize which
2076 thread is which when tracing with the in-process agent library.
2077 We don't read anything from the address, and treat it as opaque;
2078 it's the address itself that we assume is unique per-thread. */
2079 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2080 return fast_tpoint_collect_result::not_collecting;
2081
2082 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2083 }
2084
2085 /* The reason we resume in the caller, is because we want to be able
2086 to pass lwp->status_pending as WSTAT, and we need to clear
2087 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2088 refuses to resume. */
2089
2090 static int
2091 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2092 {
2093 struct thread_info *saved_thread;
2094
2095 saved_thread = current_thread;
2096 current_thread = get_lwp_thread (lwp);
2097
2098 if ((wstat == NULL
2099 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2100 && supports_fast_tracepoints ()
2101 && agent_loaded_p ())
2102 {
2103 struct fast_tpoint_collect_status status;
2104
2105 if (debug_threads)
2106 debug_printf ("Checking whether LWP %ld needs to move out of the "
2107 "jump pad.\n",
2108 lwpid_of (current_thread));
2109
2110 fast_tpoint_collect_result r
2111 = linux_fast_tracepoint_collecting (lwp, &status);
2112
2113 if (wstat == NULL
2114 || (WSTOPSIG (*wstat) != SIGILL
2115 && WSTOPSIG (*wstat) != SIGFPE
2116 && WSTOPSIG (*wstat) != SIGSEGV
2117 && WSTOPSIG (*wstat) != SIGBUS))
2118 {
2119 lwp->collecting_fast_tracepoint = r;
2120
2121 if (r != fast_tpoint_collect_result::not_collecting)
2122 {
2123 if (r == fast_tpoint_collect_result::before_insn
2124 && lwp->exit_jump_pad_bkpt == NULL)
2125 {
2126 /* Haven't executed the original instruction yet.
2127 Set breakpoint there, and wait till it's hit,
2128 then single-step until exiting the jump pad. */
2129 lwp->exit_jump_pad_bkpt
2130 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2131 }
2132
2133 if (debug_threads)
2134 debug_printf ("Checking whether LWP %ld needs to move out of "
2135 "the jump pad...it does\n",
2136 lwpid_of (current_thread));
2137 current_thread = saved_thread;
2138
2139 return 1;
2140 }
2141 }
2142 else
2143 {
2144 /* If we get a synchronous signal while collecting, *and*
2145 while executing the (relocated) original instruction,
2146 reset the PC to point at the tpoint address, before
2147 reporting to GDB. Otherwise, it's an IPA lib bug: just
2148 report the signal to GDB, and pray for the best. */
2149
2150 lwp->collecting_fast_tracepoint
2151 = fast_tpoint_collect_result::not_collecting;
2152
2153 if (r != fast_tpoint_collect_result::not_collecting
2154 && (status.adjusted_insn_addr <= lwp->stop_pc
2155 && lwp->stop_pc < status.adjusted_insn_addr_end))
2156 {
2157 siginfo_t info;
2158 struct regcache *regcache;
2159
2160 /* The si_addr on a few signals references the address
2161 of the faulting instruction. Adjust that as
2162 well. */
2163 if ((WSTOPSIG (*wstat) == SIGILL
2164 || WSTOPSIG (*wstat) == SIGFPE
2165 || WSTOPSIG (*wstat) == SIGBUS
2166 || WSTOPSIG (*wstat) == SIGSEGV)
2167 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2168 (PTRACE_TYPE_ARG3) 0, &info) == 0
2169 /* Final check just to make sure we don't clobber
2170 the siginfo of non-kernel-sent signals. */
2171 && (uintptr_t) info.si_addr == lwp->stop_pc)
2172 {
2173 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2174 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2175 (PTRACE_TYPE_ARG3) 0, &info);
2176 }
2177
2178 regcache = get_thread_regcache (current_thread, 1);
2179 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2180 lwp->stop_pc = status.tpoint_addr;
2181
2182 /* Cancel any fast tracepoint lock this thread was
2183 holding. */
2184 force_unlock_trace_buffer ();
2185 }
2186
2187 if (lwp->exit_jump_pad_bkpt != NULL)
2188 {
2189 if (debug_threads)
2190 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2191 "stopping all threads momentarily.\n");
2192
2193 stop_all_lwps (1, lwp);
2194
2195 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2196 lwp->exit_jump_pad_bkpt = NULL;
2197
2198 unstop_all_lwps (1, lwp);
2199
2200 gdb_assert (lwp->suspended >= 0);
2201 }
2202 }
2203 }
2204
2205 if (debug_threads)
2206 debug_printf ("Checking whether LWP %ld needs to move out of the "
2207 "jump pad...no\n",
2208 lwpid_of (current_thread));
2209
2210 current_thread = saved_thread;
2211 return 0;
2212 }
2213
2214 /* Enqueue one signal in the "signals to report later when out of the
2215 jump pad" list. */
2216
2217 static void
2218 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2219 {
2220 struct pending_signals *p_sig;
2221 struct thread_info *thread = get_lwp_thread (lwp);
2222
2223 if (debug_threads)
2224 debug_printf ("Deferring signal %d for LWP %ld.\n",
2225 WSTOPSIG (*wstat), lwpid_of (thread));
2226
2227 if (debug_threads)
2228 {
2229 struct pending_signals *sig;
2230
2231 for (sig = lwp->pending_signals_to_report;
2232 sig != NULL;
2233 sig = sig->prev)
2234 debug_printf (" Already queued %d\n",
2235 sig->signal);
2236
2237 debug_printf (" (no more currently queued signals)\n");
2238 }
2239
2240 /* Don't enqueue non-RT signals if they are already in the deferred
2241 queue. (SIGSTOP being the easiest signal to see ending up here
2242 twice) */
2243 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2244 {
2245 struct pending_signals *sig;
2246
2247 for (sig = lwp->pending_signals_to_report;
2248 sig != NULL;
2249 sig = sig->prev)
2250 {
2251 if (sig->signal == WSTOPSIG (*wstat))
2252 {
2253 if (debug_threads)
2254 debug_printf ("Not requeuing already queued non-RT signal %d"
2255 " for LWP %ld\n",
2256 sig->signal,
2257 lwpid_of (thread));
2258 return;
2259 }
2260 }
2261 }
2262
2263 p_sig = XCNEW (struct pending_signals);
2264 p_sig->prev = lwp->pending_signals_to_report;
2265 p_sig->signal = WSTOPSIG (*wstat);
2266
2267 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2268 &p_sig->info);
2269
2270 lwp->pending_signals_to_report = p_sig;
2271 }
2272
2273 /* Dequeue one signal from the "signals to report later when out of
2274 the jump pad" list. */
2275
2276 static int
2277 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2278 {
2279 struct thread_info *thread = get_lwp_thread (lwp);
2280
2281 if (lwp->pending_signals_to_report != NULL)
2282 {
2283 struct pending_signals **p_sig;
2284
2285 p_sig = &lwp->pending_signals_to_report;
2286 while ((*p_sig)->prev != NULL)
2287 p_sig = &(*p_sig)->prev;
2288
2289 *wstat = W_STOPCODE ((*p_sig)->signal);
2290 if ((*p_sig)->info.si_signo != 0)
2291 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2292 &(*p_sig)->info);
2293 free (*p_sig);
2294 *p_sig = NULL;
2295
2296 if (debug_threads)
2297 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2298 WSTOPSIG (*wstat), lwpid_of (thread));
2299
2300 if (debug_threads)
2301 {
2302 struct pending_signals *sig;
2303
2304 for (sig = lwp->pending_signals_to_report;
2305 sig != NULL;
2306 sig = sig->prev)
2307 debug_printf (" Still queued %d\n",
2308 sig->signal);
2309
2310 debug_printf (" (no more queued signals)\n");
2311 }
2312
2313 return 1;
2314 }
2315
2316 return 0;
2317 }
2318
2319 /* Fetch the possibly triggered data watchpoint info and store it in
2320 CHILD.
2321
2322 On some archs, like x86, that use debug registers to set
2323 watchpoints, it's possible that the way to know which watched
2324 address trapped, is to check the register that is used to select
2325 which address to watch. Problem is, between setting the watchpoint
2326 and reading back which data address trapped, the user may change
2327 the set of watchpoints, and, as a consequence, GDB changes the
2328 debug registers in the inferior. To avoid reading back a stale
2329 stopped-data-address when that happens, we cache in LP the fact
2330 that a watchpoint trapped, and the corresponding data address, as
2331 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2332 registers meanwhile, we have the cached data we can rely on. */
2333
2334 static int
2335 check_stopped_by_watchpoint (struct lwp_info *child)
2336 {
2337 if (the_low_target.stopped_by_watchpoint != NULL)
2338 {
2339 struct thread_info *saved_thread;
2340
2341 saved_thread = current_thread;
2342 current_thread = get_lwp_thread (child);
2343
2344 if (the_low_target.stopped_by_watchpoint ())
2345 {
2346 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2347
2348 if (the_low_target.stopped_data_address != NULL)
2349 child->stopped_data_address
2350 = the_low_target.stopped_data_address ();
2351 else
2352 child->stopped_data_address = 0;
2353 }
2354
2355 current_thread = saved_thread;
2356 }
2357
2358 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2359 }
2360
2361 /* Return the ptrace options that we want to try to enable. */
2362
2363 static int
2364 linux_low_ptrace_options (int attached)
2365 {
2366 int options = 0;
2367
2368 if (!attached)
2369 options |= PTRACE_O_EXITKILL;
2370
2371 if (report_fork_events)
2372 options |= PTRACE_O_TRACEFORK;
2373
2374 if (report_vfork_events)
2375 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2376
2377 if (report_exec_events)
2378 options |= PTRACE_O_TRACEEXEC;
2379
2380 options |= PTRACE_O_TRACESYSGOOD;
2381
2382 return options;
2383 }
2384
2385 /* Do low-level handling of the event, and check if we should go on
2386 and pass it to caller code. Return the affected lwp if we are, or
2387 NULL otherwise. */
2388
2389 static struct lwp_info *
2390 linux_low_filter_event (int lwpid, int wstat)
2391 {
2392 struct lwp_info *child;
2393 struct thread_info *thread;
2394 int have_stop_pc = 0;
2395
2396 child = find_lwp_pid (pid_to_ptid (lwpid));
2397
2398 /* Check for stop events reported by a process we didn't already
2399 know about - anything not already in our LWP list.
2400
2401 If we're expecting to receive stopped processes after
2402 fork, vfork, and clone events, then we'll just add the
2403 new one to our list and go back to waiting for the event
2404 to be reported - the stopped process might be returned
2405 from waitpid before or after the event is.
2406
2407 But note the case of a non-leader thread exec'ing after the
2408 leader having exited, and gone from our lists (because
2409 check_zombie_leaders deleted it). The non-leader thread
2410 changes its tid to the tgid. */
2411
2412 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2413 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2414 {
2415 ptid_t child_ptid;
2416
2417 /* A multi-thread exec after we had seen the leader exiting. */
2418 if (debug_threads)
2419 {
2420 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2421 "after exec.\n", lwpid);
2422 }
2423
2424 child_ptid = ptid_build (lwpid, lwpid, 0);
2425 child = add_lwp (child_ptid);
2426 child->stopped = 1;
2427 current_thread = child->thread;
2428 }
2429
2430 /* If we didn't find a process, one of two things presumably happened:
2431 - A process we started and then detached from has exited. Ignore it.
2432 - A process we are controlling has forked and the new child's stop
2433 was reported to us by the kernel. Save its PID. */
2434 if (child == NULL && WIFSTOPPED (wstat))
2435 {
2436 add_to_pid_list (&stopped_pids, lwpid, wstat);
2437 return NULL;
2438 }
2439 else if (child == NULL)
2440 return NULL;
2441
2442 thread = get_lwp_thread (child);
2443
2444 child->stopped = 1;
2445
2446 child->last_status = wstat;
2447
2448 /* Check if the thread has exited. */
2449 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2450 {
2451 if (debug_threads)
2452 debug_printf ("LLFE: %d exited.\n", lwpid);
2453
2454 if (finish_step_over (child))
2455 {
2456 /* Unsuspend all other LWPs, and set them back running again. */
2457 unsuspend_all_lwps (child);
2458 }
2459
2460 /* If there is at least one more LWP, then the exit signal was
2461 not the end of the debugged application and should be
2462 ignored, unless GDB wants to hear about thread exits. */
2463 if (report_thread_events
2464 || last_thread_of_process_p (pid_of (thread)))
2465 {
2466 /* Since events are serialized to GDB core, and we can't
2467 report this one right now. Leave the status pending for
2468 the next time we're able to report it. */
2469 mark_lwp_dead (child, wstat);
2470 return child;
2471 }
2472 else
2473 {
2474 delete_lwp (child);
2475 return NULL;
2476 }
2477 }
2478
2479 gdb_assert (WIFSTOPPED (wstat));
2480
2481 if (WIFSTOPPED (wstat))
2482 {
2483 struct process_info *proc;
2484
2485 /* Architecture-specific setup after inferior is running. */
2486 proc = find_process_pid (pid_of (thread));
2487 if (proc->tdesc == NULL)
2488 {
2489 if (proc->attached)
2490 {
2491 /* This needs to happen after we have attached to the
2492 inferior and it is stopped for the first time, but
2493 before we access any inferior registers. */
2494 linux_arch_setup_thread (thread);
2495 }
2496 else
2497 {
2498 /* The process is started, but GDBserver will do
2499 architecture-specific setup after the program stops at
2500 the first instruction. */
2501 child->status_pending_p = 1;
2502 child->status_pending = wstat;
2503 return child;
2504 }
2505 }
2506 }
2507
2508 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2509 {
2510 struct process_info *proc = find_process_pid (pid_of (thread));
2511 int options = linux_low_ptrace_options (proc->attached);
2512
2513 linux_enable_event_reporting (lwpid, options);
2514 child->must_set_ptrace_flags = 0;
2515 }
2516
2517 /* Always update syscall_state, even if it will be filtered later. */
2518 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2519 {
2520 child->syscall_state
2521 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2522 ? TARGET_WAITKIND_SYSCALL_RETURN
2523 : TARGET_WAITKIND_SYSCALL_ENTRY);
2524 }
2525 else
2526 {
2527 /* Almost all other ptrace-stops are known to be outside of system
2528 calls, with further exceptions in handle_extended_wait. */
2529 child->syscall_state = TARGET_WAITKIND_IGNORE;
2530 }
2531
2532 /* Be careful to not overwrite stop_pc until save_stop_reason is
2533 called. */
2534 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2535 && linux_is_extended_waitstatus (wstat))
2536 {
2537 child->stop_pc = get_pc (child);
2538 if (handle_extended_wait (&child, wstat))
2539 {
2540 /* The event has been handled, so just return without
2541 reporting it. */
2542 return NULL;
2543 }
2544 }
2545
2546 if (linux_wstatus_maybe_breakpoint (wstat))
2547 {
2548 if (save_stop_reason (child))
2549 have_stop_pc = 1;
2550 }
2551
2552 if (!have_stop_pc)
2553 child->stop_pc = get_pc (child);
2554
2555 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2556 && child->stop_expected)
2557 {
2558 if (debug_threads)
2559 debug_printf ("Expected stop.\n");
2560 child->stop_expected = 0;
2561
2562 if (thread->last_resume_kind == resume_stop)
2563 {
2564 /* We want to report the stop to the core. Treat the
2565 SIGSTOP as a normal event. */
2566 if (debug_threads)
2567 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2568 target_pid_to_str (ptid_of (thread)));
2569 }
2570 else if (stopping_threads != NOT_STOPPING_THREADS)
2571 {
2572 /* Stopping threads. We don't want this SIGSTOP to end up
2573 pending. */
2574 if (debug_threads)
2575 debug_printf ("LLW: SIGSTOP caught for %s "
2576 "while stopping threads.\n",
2577 target_pid_to_str (ptid_of (thread)));
2578 return NULL;
2579 }
2580 else
2581 {
2582 /* This is a delayed SIGSTOP. Filter out the event. */
2583 if (debug_threads)
2584 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2585 child->stepping ? "step" : "continue",
2586 target_pid_to_str (ptid_of (thread)));
2587
2588 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2589 return NULL;
2590 }
2591 }
2592
2593 child->status_pending_p = 1;
2594 child->status_pending = wstat;
2595 return child;
2596 }
2597
2598 /* Return true if THREAD is doing hardware single step. */
2599
2600 static int
2601 maybe_hw_step (struct thread_info *thread)
2602 {
2603 if (can_hardware_single_step ())
2604 return 1;
2605 else
2606 {
2607 /* GDBserver must insert single-step breakpoint for software
2608 single step. */
2609 gdb_assert (has_single_step_breakpoints (thread));
2610 return 0;
2611 }
2612 }
2613
2614 /* Resume LWPs that are currently stopped without any pending status
2615 to report, but are resumed from the core's perspective. */
2616
2617 static void
2618 resume_stopped_resumed_lwps (thread_info *thread)
2619 {
2620 struct lwp_info *lp = get_thread_lwp (thread);
2621
2622 if (lp->stopped
2623 && !lp->suspended
2624 && !lp->status_pending_p
2625 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2626 {
2627 int step = 0;
2628
2629 if (thread->last_resume_kind == resume_step)
2630 step = maybe_hw_step (thread);
2631
2632 if (debug_threads)
2633 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2634 target_pid_to_str (ptid_of (thread)),
2635 paddress (lp->stop_pc),
2636 step);
2637
2638 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2639 }
2640 }
2641
2642 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2643 match FILTER_PTID (leaving others pending). The PTIDs can be:
2644 minus_one_ptid, to specify any child; a pid PTID, specifying all
2645 lwps of a thread group; or a PTID representing a single lwp. Store
2646 the stop status through the status pointer WSTAT. OPTIONS is
2647 passed to the waitpid call. Return 0 if no event was found and
2648 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2649 was found. Return the PID of the stopped child otherwise. */
2650
2651 static int
2652 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2653 int *wstatp, int options)
2654 {
2655 struct thread_info *event_thread;
2656 struct lwp_info *event_child, *requested_child;
2657 sigset_t block_mask, prev_mask;
2658
2659 retry:
2660 /* N.B. event_thread points to the thread_info struct that contains
2661 event_child. Keep them in sync. */
2662 event_thread = NULL;
2663 event_child = NULL;
2664 requested_child = NULL;
2665
2666 /* Check for a lwp with a pending status. */
2667
2668 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2669 {
2670 event_thread = (struct thread_info *)
2671 find_inferior_in_random (&all_threads, status_pending_p_callback,
2672 &filter_ptid);
2673 if (event_thread != NULL)
2674 event_child = get_thread_lwp (event_thread);
2675 if (debug_threads && event_thread)
2676 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2677 }
2678 else if (!ptid_equal (filter_ptid, null_ptid))
2679 {
2680 requested_child = find_lwp_pid (filter_ptid);
2681
2682 if (stopping_threads == NOT_STOPPING_THREADS
2683 && requested_child->status_pending_p
2684 && (requested_child->collecting_fast_tracepoint
2685 != fast_tpoint_collect_result::not_collecting))
2686 {
2687 enqueue_one_deferred_signal (requested_child,
2688 &requested_child->status_pending);
2689 requested_child->status_pending_p = 0;
2690 requested_child->status_pending = 0;
2691 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2692 }
2693
2694 if (requested_child->suspended
2695 && requested_child->status_pending_p)
2696 {
2697 internal_error (__FILE__, __LINE__,
2698 "requesting an event out of a"
2699 " suspended child?");
2700 }
2701
2702 if (requested_child->status_pending_p)
2703 {
2704 event_child = requested_child;
2705 event_thread = get_lwp_thread (event_child);
2706 }
2707 }
2708
2709 if (event_child != NULL)
2710 {
2711 if (debug_threads)
2712 debug_printf ("Got an event from pending child %ld (%04x)\n",
2713 lwpid_of (event_thread), event_child->status_pending);
2714 *wstatp = event_child->status_pending;
2715 event_child->status_pending_p = 0;
2716 event_child->status_pending = 0;
2717 current_thread = event_thread;
2718 return lwpid_of (event_thread);
2719 }
2720
2721 /* But if we don't find a pending event, we'll have to wait.
2722
2723 We only enter this loop if no process has a pending wait status.
2724 Thus any action taken in response to a wait status inside this
2725 loop is responding as soon as we detect the status, not after any
2726 pending events. */
2727
2728 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2729 all signals while here. */
2730 sigfillset (&block_mask);
2731 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2732
2733 /* Always pull all events out of the kernel. We'll randomly select
2734 an event LWP out of all that have events, to prevent
2735 starvation. */
2736 while (event_child == NULL)
2737 {
2738 pid_t ret = 0;
2739
2740 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2741 quirks:
2742
2743 - If the thread group leader exits while other threads in the
2744 thread group still exist, waitpid(TGID, ...) hangs. That
2745 waitpid won't return an exit status until the other threads
2746 in the group are reaped.
2747
2748 - When a non-leader thread execs, that thread just vanishes
2749 without reporting an exit (so we'd hang if we waited for it
2750 explicitly in that case). The exec event is reported to
2751 the TGID pid. */
2752 errno = 0;
2753 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2754
2755 if (debug_threads)
2756 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2757 ret, errno ? strerror (errno) : "ERRNO-OK");
2758
2759 if (ret > 0)
2760 {
2761 if (debug_threads)
2762 {
2763 debug_printf ("LLW: waitpid %ld received %s\n",
2764 (long) ret, status_to_str (*wstatp));
2765 }
2766
2767 /* Filter all events. IOW, leave all events pending. We'll
2768 randomly select an event LWP out of all that have events
2769 below. */
2770 linux_low_filter_event (ret, *wstatp);
2771 /* Retry until nothing comes out of waitpid. A single
2772 SIGCHLD can indicate more than one child stopped. */
2773 continue;
2774 }
2775
2776 /* Now that we've pulled all events out of the kernel, resume
2777 LWPs that don't have an interesting event to report. */
2778 if (stopping_threads == NOT_STOPPING_THREADS)
2779 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2780
2781 /* ... and find an LWP with a status to report to the core, if
2782 any. */
2783 event_thread = (struct thread_info *)
2784 find_inferior_in_random (&all_threads, status_pending_p_callback,
2785 &filter_ptid);
2786 if (event_thread != NULL)
2787 {
2788 event_child = get_thread_lwp (event_thread);
2789 *wstatp = event_child->status_pending;
2790 event_child->status_pending_p = 0;
2791 event_child->status_pending = 0;
2792 break;
2793 }
2794
2795 /* Check for zombie thread group leaders. Those can't be reaped
2796 until all other threads in the thread group are. */
2797 check_zombie_leaders ();
2798
2799 /* If there are no resumed children left in the set of LWPs we
2800 want to wait for, bail. We can't just block in
2801 waitpid/sigsuspend, because lwps might have been left stopped
2802 in trace-stop state, and we'd be stuck forever waiting for
2803 their status to change (which would only happen if we resumed
2804 them). Even if WNOHANG is set, this return code is preferred
2805 over 0 (below), as it is more detailed. */
2806 if ((find_inferior (&all_threads,
2807 not_stopped_callback,
2808 &wait_ptid) == NULL))
2809 {
2810 if (debug_threads)
2811 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2812 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2813 return -1;
2814 }
2815
2816 /* No interesting event to report to the caller. */
2817 if ((options & WNOHANG))
2818 {
2819 if (debug_threads)
2820 debug_printf ("WNOHANG set, no event found\n");
2821
2822 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2823 return 0;
2824 }
2825
2826 /* Block until we get an event reported with SIGCHLD. */
2827 if (debug_threads)
2828 debug_printf ("sigsuspend'ing\n");
2829
2830 sigsuspend (&prev_mask);
2831 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2832 goto retry;
2833 }
2834
2835 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2836
2837 current_thread = event_thread;
2838
2839 return lwpid_of (event_thread);
2840 }
2841
2842 /* Wait for an event from child(ren) PTID. PTIDs can be:
2843 minus_one_ptid, to specify any child; a pid PTID, specifying all
2844 lwps of a thread group; or a PTID representing a single lwp. Store
2845 the stop status through the status pointer WSTAT. OPTIONS is
2846 passed to the waitpid call. Return 0 if no event was found and
2847 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2848 was found. Return the PID of the stopped child otherwise. */
2849
2850 static int
2851 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2852 {
2853 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2854 }
2855
2856 /* Count the LWP's that have had events. */
2857
2858 static int
2859 count_events_callback (thread_info *thread, void *data)
2860 {
2861 struct lwp_info *lp = get_thread_lwp (thread);
2862 int *count = (int *) data;
2863
2864 gdb_assert (count != NULL);
2865
2866 /* Count only resumed LWPs that have an event pending. */
2867 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2868 && lp->status_pending_p)
2869 (*count)++;
2870
2871 return 0;
2872 }
2873
2874 /* Select the LWP (if any) that is currently being single-stepped. */
2875
2876 static int
2877 select_singlestep_lwp_callback (thread_info *thread, void *data)
2878 {
2879 struct lwp_info *lp = get_thread_lwp (thread);
2880
2881 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2882 && thread->last_resume_kind == resume_step
2883 && lp->status_pending_p)
2884 return 1;
2885 else
2886 return 0;
2887 }
2888
2889 /* Select the Nth LWP that has had an event. */
2890
2891 static int
2892 select_event_lwp_callback (thread_info *thread, void *data)
2893 {
2894 struct lwp_info *lp = get_thread_lwp (thread);
2895 int *selector = (int *) data;
2896
2897 gdb_assert (selector != NULL);
2898
2899 /* Select only resumed LWPs that have an event pending. */
2900 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2901 && lp->status_pending_p)
2902 if ((*selector)-- == 0)
2903 return 1;
2904
2905 return 0;
2906 }
2907
2908 /* Select one LWP out of those that have events pending. */
2909
2910 static void
2911 select_event_lwp (struct lwp_info **orig_lp)
2912 {
2913 int num_events = 0;
2914 int random_selector;
2915 struct thread_info *event_thread = NULL;
2916
2917 /* In all-stop, give preference to the LWP that is being
2918 single-stepped. There will be at most one, and it's the LWP that
2919 the core is most interested in. If we didn't do this, then we'd
2920 have to handle pending step SIGTRAPs somehow in case the core
2921 later continues the previously-stepped thread, otherwise we'd
2922 report the pending SIGTRAP, and the core, not having stepped the
2923 thread, wouldn't understand what the trap was for, and therefore
2924 would report it to the user as a random signal. */
2925 if (!non_stop)
2926 {
2927 event_thread
2928 = (struct thread_info *) find_inferior (&all_threads,
2929 select_singlestep_lwp_callback,
2930 NULL);
2931 if (event_thread != NULL)
2932 {
2933 if (debug_threads)
2934 debug_printf ("SEL: Select single-step %s\n",
2935 target_pid_to_str (ptid_of (event_thread)));
2936 }
2937 }
2938 if (event_thread == NULL)
2939 {
2940 /* No single-stepping LWP. Select one at random, out of those
2941 which have had events. */
2942
2943 /* First see how many events we have. */
2944 find_inferior (&all_threads, count_events_callback, &num_events);
2945 gdb_assert (num_events > 0);
2946
2947 /* Now randomly pick a LWP out of those that have had
2948 events. */
2949 random_selector = (int)
2950 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2951
2952 if (debug_threads && num_events > 1)
2953 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2954 num_events, random_selector);
2955
2956 event_thread
2957 = (struct thread_info *) find_inferior (&all_threads,
2958 select_event_lwp_callback,
2959 &random_selector);
2960 }
2961
2962 if (event_thread != NULL)
2963 {
2964 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2965
2966 /* Switch the event LWP. */
2967 *orig_lp = event_lp;
2968 }
2969 }
2970
2971 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2972 NULL. */
2973
2974 static void
2975 unsuspend_all_lwps (struct lwp_info *except)
2976 {
2977 for_each_thread ([&] (thread_info *thread)
2978 {
2979 lwp_info *lwp = get_thread_lwp (thread);
2980
2981 if (lwp != except)
2982 lwp_suspended_decr (lwp);
2983 });
2984 }
2985
2986 static void move_out_of_jump_pad_callback (thread_info *thread);
2987 static bool stuck_in_jump_pad_callback (thread_info *thread);
2988 static int lwp_running (thread_info *thread, void *data);
2989 static ptid_t linux_wait_1 (ptid_t ptid,
2990 struct target_waitstatus *ourstatus,
2991 int target_options);
2992
2993 /* Stabilize threads (move out of jump pads).
2994
2995 If a thread is midway collecting a fast tracepoint, we need to
2996 finish the collection and move it out of the jump pad before
2997 reporting the signal.
2998
2999 This avoids recursion while collecting (when a signal arrives
3000 midway, and the signal handler itself collects), which would trash
3001 the trace buffer. In case the user set a breakpoint in a signal
3002 handler, this avoids the backtrace showing the jump pad, etc..
3003 Most importantly, there are certain things we can't do safely if
3004 threads are stopped in a jump pad (or in its callee's). For
3005 example:
3006
3007 - starting a new trace run. A thread still collecting the
3008 previous run, could trash the trace buffer when resumed. The trace
3009 buffer control structures would have been reset but the thread had
3010 no way to tell. The thread could even midway memcpy'ing to the
3011 buffer, which would mean that when resumed, it would clobber the
3012 trace buffer that had been set for a new run.
3013
3014 - we can't rewrite/reuse the jump pads for new tracepoints
3015 safely. Say you do tstart while a thread is stopped midway while
3016 collecting. When the thread is later resumed, it finishes the
3017 collection, and returns to the jump pad, to execute the original
3018 instruction that was under the tracepoint jump at the time the
3019 older run had been started. If the jump pad had been rewritten
3020 since for something else in the new run, the thread would now
3021 execute the wrong / random instructions. */
3022
3023 static void
3024 linux_stabilize_threads (void)
3025 {
3026 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
3027
3028 if (thread_stuck != NULL)
3029 {
3030 if (debug_threads)
3031 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3032 lwpid_of (thread_stuck));
3033 return;
3034 }
3035
3036 thread_info *saved_thread = current_thread;
3037
3038 stabilizing_threads = 1;
3039
3040 /* Kick 'em all. */
3041 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3042
3043 /* Loop until all are stopped out of the jump pads. */
3044 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3045 {
3046 struct target_waitstatus ourstatus;
3047 struct lwp_info *lwp;
3048 int wstat;
3049
3050 /* Note that we go through the full wait even loop. While
3051 moving threads out of jump pad, we need to be able to step
3052 over internal breakpoints and such. */
3053 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3054
3055 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3056 {
3057 lwp = get_thread_lwp (current_thread);
3058
3059 /* Lock it. */
3060 lwp_suspended_inc (lwp);
3061
3062 if (ourstatus.value.sig != GDB_SIGNAL_0
3063 || current_thread->last_resume_kind == resume_stop)
3064 {
3065 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3066 enqueue_one_deferred_signal (lwp, &wstat);
3067 }
3068 }
3069 }
3070
3071 unsuspend_all_lwps (NULL);
3072
3073 stabilizing_threads = 0;
3074
3075 current_thread = saved_thread;
3076
3077 if (debug_threads)
3078 {
3079 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3080
3081 if (thread_stuck != NULL)
3082 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3083 lwpid_of (thread_stuck));
3084 }
3085 }
3086
3087 /* Convenience function that is called when the kernel reports an
3088 event that is not passed out to GDB. */
3089
3090 static ptid_t
3091 ignore_event (struct target_waitstatus *ourstatus)
3092 {
3093 /* If we got an event, there may still be others, as a single
3094 SIGCHLD can indicate more than one child stopped. This forces
3095 another target_wait call. */
3096 async_file_mark ();
3097
3098 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3099 return null_ptid;
3100 }
3101
3102 /* Convenience function that is called when the kernel reports an exit
3103 event. This decides whether to report the event to GDB as a
3104 process exit event, a thread exit event, or to suppress the
3105 event. */
3106
3107 static ptid_t
3108 filter_exit_event (struct lwp_info *event_child,
3109 struct target_waitstatus *ourstatus)
3110 {
3111 struct thread_info *thread = get_lwp_thread (event_child);
3112 ptid_t ptid = ptid_of (thread);
3113
3114 if (!last_thread_of_process_p (pid_of (thread)))
3115 {
3116 if (report_thread_events)
3117 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3118 else
3119 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3120
3121 delete_lwp (event_child);
3122 }
3123 return ptid;
3124 }
3125
3126 /* Returns 1 if GDB is interested in any event_child syscalls. */
3127
3128 static int
3129 gdb_catching_syscalls_p (struct lwp_info *event_child)
3130 {
3131 struct thread_info *thread = get_lwp_thread (event_child);
3132 struct process_info *proc = get_thread_process (thread);
3133
3134 return !proc->syscalls_to_catch.empty ();
3135 }
3136
3137 /* Returns 1 if GDB is interested in the event_child syscall.
3138 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3139
3140 static int
3141 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3142 {
3143 int sysno;
3144 struct thread_info *thread = get_lwp_thread (event_child);
3145 struct process_info *proc = get_thread_process (thread);
3146
3147 if (proc->syscalls_to_catch.empty ())
3148 return 0;
3149
3150 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3151 return 1;
3152
3153 get_syscall_trapinfo (event_child, &sysno);
3154
3155 for (int iter : proc->syscalls_to_catch)
3156 if (iter == sysno)
3157 return 1;
3158
3159 return 0;
3160 }
3161
3162 /* Wait for process, returns status. */
3163
3164 static ptid_t
3165 linux_wait_1 (ptid_t ptid,
3166 struct target_waitstatus *ourstatus, int target_options)
3167 {
3168 int w;
3169 struct lwp_info *event_child;
3170 int options;
3171 int pid;
3172 int step_over_finished;
3173 int bp_explains_trap;
3174 int maybe_internal_trap;
3175 int report_to_gdb;
3176 int trace_event;
3177 int in_step_range;
3178 int any_resumed;
3179
3180 if (debug_threads)
3181 {
3182 debug_enter ();
3183 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3184 }
3185
3186 /* Translate generic target options into linux options. */
3187 options = __WALL;
3188 if (target_options & TARGET_WNOHANG)
3189 options |= WNOHANG;
3190
3191 bp_explains_trap = 0;
3192 trace_event = 0;
3193 in_step_range = 0;
3194 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3195
3196 /* Find a resumed LWP, if any. */
3197 if (find_inferior (&all_threads,
3198 status_pending_p_callback,
3199 &minus_one_ptid) != NULL)
3200 any_resumed = 1;
3201 else if ((find_inferior (&all_threads,
3202 not_stopped_callback,
3203 &minus_one_ptid) != NULL))
3204 any_resumed = 1;
3205 else
3206 any_resumed = 0;
3207
3208 if (ptid_equal (step_over_bkpt, null_ptid))
3209 pid = linux_wait_for_event (ptid, &w, options);
3210 else
3211 {
3212 if (debug_threads)
3213 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3214 target_pid_to_str (step_over_bkpt));
3215 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3216 }
3217
3218 if (pid == 0 || (pid == -1 && !any_resumed))
3219 {
3220 gdb_assert (target_options & TARGET_WNOHANG);
3221
3222 if (debug_threads)
3223 {
3224 debug_printf ("linux_wait_1 ret = null_ptid, "
3225 "TARGET_WAITKIND_IGNORE\n");
3226 debug_exit ();
3227 }
3228
3229 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3230 return null_ptid;
3231 }
3232 else if (pid == -1)
3233 {
3234 if (debug_threads)
3235 {
3236 debug_printf ("linux_wait_1 ret = null_ptid, "
3237 "TARGET_WAITKIND_NO_RESUMED\n");
3238 debug_exit ();
3239 }
3240
3241 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3242 return null_ptid;
3243 }
3244
3245 event_child = get_thread_lwp (current_thread);
3246
3247 /* linux_wait_for_event only returns an exit status for the last
3248 child of a process. Report it. */
3249 if (WIFEXITED (w) || WIFSIGNALED (w))
3250 {
3251 if (WIFEXITED (w))
3252 {
3253 ourstatus->kind = TARGET_WAITKIND_EXITED;
3254 ourstatus->value.integer = WEXITSTATUS (w);
3255
3256 if (debug_threads)
3257 {
3258 debug_printf ("linux_wait_1 ret = %s, exited with "
3259 "retcode %d\n",
3260 target_pid_to_str (ptid_of (current_thread)),
3261 WEXITSTATUS (w));
3262 debug_exit ();
3263 }
3264 }
3265 else
3266 {
3267 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3268 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3269
3270 if (debug_threads)
3271 {
3272 debug_printf ("linux_wait_1 ret = %s, terminated with "
3273 "signal %d\n",
3274 target_pid_to_str (ptid_of (current_thread)),
3275 WTERMSIG (w));
3276 debug_exit ();
3277 }
3278 }
3279
3280 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3281 return filter_exit_event (event_child, ourstatus);
3282
3283 return ptid_of (current_thread);
3284 }
3285
3286 /* If step-over executes a breakpoint instruction, in the case of a
3287 hardware single step it means a gdb/gdbserver breakpoint had been
3288 planted on top of a permanent breakpoint, in the case of a software
3289 single step it may just mean that gdbserver hit the reinsert breakpoint.
3290 The PC has been adjusted by save_stop_reason to point at
3291 the breakpoint address.
3292 So in the case of the hardware single step advance the PC manually
3293 past the breakpoint and in the case of software single step advance only
3294 if it's not the single_step_breakpoint we are hitting.
3295 This avoids that a program would keep trapping a permanent breakpoint
3296 forever. */
3297 if (!ptid_equal (step_over_bkpt, null_ptid)
3298 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3299 && (event_child->stepping
3300 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3301 {
3302 int increment_pc = 0;
3303 int breakpoint_kind = 0;
3304 CORE_ADDR stop_pc = event_child->stop_pc;
3305
3306 breakpoint_kind =
3307 the_target->breakpoint_kind_from_current_state (&stop_pc);
3308 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3309
3310 if (debug_threads)
3311 {
3312 debug_printf ("step-over for %s executed software breakpoint\n",
3313 target_pid_to_str (ptid_of (current_thread)));
3314 }
3315
3316 if (increment_pc != 0)
3317 {
3318 struct regcache *regcache
3319 = get_thread_regcache (current_thread, 1);
3320
3321 event_child->stop_pc += increment_pc;
3322 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3323
3324 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3325 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3326 }
3327 }
3328
3329 /* If this event was not handled before, and is not a SIGTRAP, we
3330 report it. SIGILL and SIGSEGV are also treated as traps in case
3331 a breakpoint is inserted at the current PC. If this target does
3332 not support internal breakpoints at all, we also report the
3333 SIGTRAP without further processing; it's of no concern to us. */
3334 maybe_internal_trap
3335 = (supports_breakpoints ()
3336 && (WSTOPSIG (w) == SIGTRAP
3337 || ((WSTOPSIG (w) == SIGILL
3338 || WSTOPSIG (w) == SIGSEGV)
3339 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3340
3341 if (maybe_internal_trap)
3342 {
3343 /* Handle anything that requires bookkeeping before deciding to
3344 report the event or continue waiting. */
3345
3346 /* First check if we can explain the SIGTRAP with an internal
3347 breakpoint, or if we should possibly report the event to GDB.
3348 Do this before anything that may remove or insert a
3349 breakpoint. */
3350 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3351
3352 /* We have a SIGTRAP, possibly a step-over dance has just
3353 finished. If so, tweak the state machine accordingly,
3354 reinsert breakpoints and delete any single-step
3355 breakpoints. */
3356 step_over_finished = finish_step_over (event_child);
3357
3358 /* Now invoke the callbacks of any internal breakpoints there. */
3359 check_breakpoints (event_child->stop_pc);
3360
3361 /* Handle tracepoint data collecting. This may overflow the
3362 trace buffer, and cause a tracing stop, removing
3363 breakpoints. */
3364 trace_event = handle_tracepoints (event_child);
3365
3366 if (bp_explains_trap)
3367 {
3368 if (debug_threads)
3369 debug_printf ("Hit a gdbserver breakpoint.\n");
3370 }
3371 }
3372 else
3373 {
3374 /* We have some other signal, possibly a step-over dance was in
3375 progress, and it should be cancelled too. */
3376 step_over_finished = finish_step_over (event_child);
3377 }
3378
3379 /* We have all the data we need. Either report the event to GDB, or
3380 resume threads and keep waiting for more. */
3381
3382 /* If we're collecting a fast tracepoint, finish the collection and
3383 move out of the jump pad before delivering a signal. See
3384 linux_stabilize_threads. */
3385
3386 if (WIFSTOPPED (w)
3387 && WSTOPSIG (w) != SIGTRAP
3388 && supports_fast_tracepoints ()
3389 && agent_loaded_p ())
3390 {
3391 if (debug_threads)
3392 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3393 "to defer or adjust it.\n",
3394 WSTOPSIG (w), lwpid_of (current_thread));
3395
3396 /* Allow debugging the jump pad itself. */
3397 if (current_thread->last_resume_kind != resume_step
3398 && maybe_move_out_of_jump_pad (event_child, &w))
3399 {
3400 enqueue_one_deferred_signal (event_child, &w);
3401
3402 if (debug_threads)
3403 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3404 WSTOPSIG (w), lwpid_of (current_thread));
3405
3406 linux_resume_one_lwp (event_child, 0, 0, NULL);
3407
3408 if (debug_threads)
3409 debug_exit ();
3410 return ignore_event (ourstatus);
3411 }
3412 }
3413
3414 if (event_child->collecting_fast_tracepoint
3415 != fast_tpoint_collect_result::not_collecting)
3416 {
3417 if (debug_threads)
3418 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3419 "Check if we're already there.\n",
3420 lwpid_of (current_thread),
3421 (int) event_child->collecting_fast_tracepoint);
3422
3423 trace_event = 1;
3424
3425 event_child->collecting_fast_tracepoint
3426 = linux_fast_tracepoint_collecting (event_child, NULL);
3427
3428 if (event_child->collecting_fast_tracepoint
3429 != fast_tpoint_collect_result::before_insn)
3430 {
3431 /* No longer need this breakpoint. */
3432 if (event_child->exit_jump_pad_bkpt != NULL)
3433 {
3434 if (debug_threads)
3435 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3436 "stopping all threads momentarily.\n");
3437
3438 /* Other running threads could hit this breakpoint.
3439 We don't handle moribund locations like GDB does,
3440 instead we always pause all threads when removing
3441 breakpoints, so that any step-over or
3442 decr_pc_after_break adjustment is always taken
3443 care of while the breakpoint is still
3444 inserted. */
3445 stop_all_lwps (1, event_child);
3446
3447 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3448 event_child->exit_jump_pad_bkpt = NULL;
3449
3450 unstop_all_lwps (1, event_child);
3451
3452 gdb_assert (event_child->suspended >= 0);
3453 }
3454 }
3455
3456 if (event_child->collecting_fast_tracepoint
3457 == fast_tpoint_collect_result::not_collecting)
3458 {
3459 if (debug_threads)
3460 debug_printf ("fast tracepoint finished "
3461 "collecting successfully.\n");
3462
3463 /* We may have a deferred signal to report. */
3464 if (dequeue_one_deferred_signal (event_child, &w))
3465 {
3466 if (debug_threads)
3467 debug_printf ("dequeued one signal.\n");
3468 }
3469 else
3470 {
3471 if (debug_threads)
3472 debug_printf ("no deferred signals.\n");
3473
3474 if (stabilizing_threads)
3475 {
3476 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3477 ourstatus->value.sig = GDB_SIGNAL_0;
3478
3479 if (debug_threads)
3480 {
3481 debug_printf ("linux_wait_1 ret = %s, stopped "
3482 "while stabilizing threads\n",
3483 target_pid_to_str (ptid_of (current_thread)));
3484 debug_exit ();
3485 }
3486
3487 return ptid_of (current_thread);
3488 }
3489 }
3490 }
3491 }
3492
3493 /* Check whether GDB would be interested in this event. */
3494
3495 /* Check if GDB is interested in this syscall. */
3496 if (WIFSTOPPED (w)
3497 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3498 && !gdb_catch_this_syscall_p (event_child))
3499 {
3500 if (debug_threads)
3501 {
3502 debug_printf ("Ignored syscall for LWP %ld.\n",
3503 lwpid_of (current_thread));
3504 }
3505
3506 linux_resume_one_lwp (event_child, event_child->stepping,
3507 0, NULL);
3508
3509 if (debug_threads)
3510 debug_exit ();
3511 return ignore_event (ourstatus);
3512 }
3513
3514 /* If GDB is not interested in this signal, don't stop other
3515 threads, and don't report it to GDB. Just resume the inferior
3516 right away. We do this for threading-related signals as well as
3517 any that GDB specifically requested we ignore. But never ignore
3518 SIGSTOP if we sent it ourselves, and do not ignore signals when
3519 stepping - they may require special handling to skip the signal
3520 handler. Also never ignore signals that could be caused by a
3521 breakpoint. */
3522 if (WIFSTOPPED (w)
3523 && current_thread->last_resume_kind != resume_step
3524 && (
3525 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3526 (current_process ()->priv->thread_db != NULL
3527 && (WSTOPSIG (w) == __SIGRTMIN
3528 || WSTOPSIG (w) == __SIGRTMIN + 1))
3529 ||
3530 #endif
3531 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3532 && !(WSTOPSIG (w) == SIGSTOP
3533 && current_thread->last_resume_kind == resume_stop)
3534 && !linux_wstatus_maybe_breakpoint (w))))
3535 {
3536 siginfo_t info, *info_p;
3537
3538 if (debug_threads)
3539 debug_printf ("Ignored signal %d for LWP %ld.\n",
3540 WSTOPSIG (w), lwpid_of (current_thread));
3541
3542 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3543 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3544 info_p = &info;
3545 else
3546 info_p = NULL;
3547
3548 if (step_over_finished)
3549 {
3550 /* We cancelled this thread's step-over above. We still
3551 need to unsuspend all other LWPs, and set them back
3552 running again while the signal handler runs. */
3553 unsuspend_all_lwps (event_child);
3554
3555 /* Enqueue the pending signal info so that proceed_all_lwps
3556 doesn't lose it. */
3557 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3558
3559 proceed_all_lwps ();
3560 }
3561 else
3562 {
3563 linux_resume_one_lwp (event_child, event_child->stepping,
3564 WSTOPSIG (w), info_p);
3565 }
3566
3567 if (debug_threads)
3568 debug_exit ();
3569
3570 return ignore_event (ourstatus);
3571 }
3572
3573 /* Note that all addresses are always "out of the step range" when
3574 there's no range to begin with. */
3575 in_step_range = lwp_in_step_range (event_child);
3576
3577 /* If GDB wanted this thread to single step, and the thread is out
3578 of the step range, we always want to report the SIGTRAP, and let
3579 GDB handle it. Watchpoints should always be reported. So should
3580 signals we can't explain. A SIGTRAP we can't explain could be a
3581 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3582 do, we're be able to handle GDB breakpoints on top of internal
3583 breakpoints, by handling the internal breakpoint and still
3584 reporting the event to GDB. If we don't, we're out of luck, GDB
3585 won't see the breakpoint hit. If we see a single-step event but
3586 the thread should be continuing, don't pass the trap to gdb.
3587 That indicates that we had previously finished a single-step but
3588 left the single-step pending -- see
3589 complete_ongoing_step_over. */
3590 report_to_gdb = (!maybe_internal_trap
3591 || (current_thread->last_resume_kind == resume_step
3592 && !in_step_range)
3593 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3594 || (!in_step_range
3595 && !bp_explains_trap
3596 && !trace_event
3597 && !step_over_finished
3598 && !(current_thread->last_resume_kind == resume_continue
3599 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3600 || (gdb_breakpoint_here (event_child->stop_pc)
3601 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3602 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3603 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3604
3605 run_breakpoint_commands (event_child->stop_pc);
3606
3607 /* We found no reason GDB would want us to stop. We either hit one
3608 of our own breakpoints, or finished an internal step GDB
3609 shouldn't know about. */
3610 if (!report_to_gdb)
3611 {
3612 if (debug_threads)
3613 {
3614 if (bp_explains_trap)
3615 debug_printf ("Hit a gdbserver breakpoint.\n");
3616 if (step_over_finished)
3617 debug_printf ("Step-over finished.\n");
3618 if (trace_event)
3619 debug_printf ("Tracepoint event.\n");
3620 if (lwp_in_step_range (event_child))
3621 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3622 paddress (event_child->stop_pc),
3623 paddress (event_child->step_range_start),
3624 paddress (event_child->step_range_end));
3625 }
3626
3627 /* We're not reporting this breakpoint to GDB, so apply the
3628 decr_pc_after_break adjustment to the inferior's regcache
3629 ourselves. */
3630
3631 if (the_low_target.set_pc != NULL)
3632 {
3633 struct regcache *regcache
3634 = get_thread_regcache (current_thread, 1);
3635 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3636 }
3637
3638 if (step_over_finished)
3639 {
3640 /* If we have finished stepping over a breakpoint, we've
3641 stopped and suspended all LWPs momentarily except the
3642 stepping one. This is where we resume them all again.
3643 We're going to keep waiting, so use proceed, which
3644 handles stepping over the next breakpoint. */
3645 unsuspend_all_lwps (event_child);
3646 }
3647 else
3648 {
3649 /* Remove the single-step breakpoints if any. Note that
3650 there isn't single-step breakpoint if we finished stepping
3651 over. */
3652 if (can_software_single_step ()
3653 && has_single_step_breakpoints (current_thread))
3654 {
3655 stop_all_lwps (0, event_child);
3656 delete_single_step_breakpoints (current_thread);
3657 unstop_all_lwps (0, event_child);
3658 }
3659 }
3660
3661 if (debug_threads)
3662 debug_printf ("proceeding all threads.\n");
3663 proceed_all_lwps ();
3664
3665 if (debug_threads)
3666 debug_exit ();
3667
3668 return ignore_event (ourstatus);
3669 }
3670
3671 if (debug_threads)
3672 {
3673 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3674 {
3675 std::string str
3676 = target_waitstatus_to_string (&event_child->waitstatus);
3677
3678 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3679 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3680 }
3681 if (current_thread->last_resume_kind == resume_step)
3682 {
3683 if (event_child->step_range_start == event_child->step_range_end)
3684 debug_printf ("GDB wanted to single-step, reporting event.\n");
3685 else if (!lwp_in_step_range (event_child))
3686 debug_printf ("Out of step range, reporting event.\n");
3687 }
3688 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3689 debug_printf ("Stopped by watchpoint.\n");
3690 else if (gdb_breakpoint_here (event_child->stop_pc))
3691 debug_printf ("Stopped by GDB breakpoint.\n");
3692 if (debug_threads)
3693 debug_printf ("Hit a non-gdbserver trap event.\n");
3694 }
3695
3696 /* Alright, we're going to report a stop. */
3697
3698 /* Remove single-step breakpoints. */
3699 if (can_software_single_step ())
3700 {
3701 /* Remove single-step breakpoints or not. It it is true, stop all
3702 lwps, so that other threads won't hit the breakpoint in the
3703 staled memory. */
3704 int remove_single_step_breakpoints_p = 0;
3705
3706 if (non_stop)
3707 {
3708 remove_single_step_breakpoints_p
3709 = has_single_step_breakpoints (current_thread);
3710 }
3711 else
3712 {
3713 /* In all-stop, a stop reply cancels all previous resume
3714 requests. Delete all single-step breakpoints. */
3715
3716 find_thread ([&] (thread_info *thread) {
3717 if (has_single_step_breakpoints (thread))
3718 {
3719 remove_single_step_breakpoints_p = 1;
3720 return true;
3721 }
3722
3723 return false;
3724 });
3725 }
3726
3727 if (remove_single_step_breakpoints_p)
3728 {
3729 /* If we remove single-step breakpoints from memory, stop all lwps,
3730 so that other threads won't hit the breakpoint in the staled
3731 memory. */
3732 stop_all_lwps (0, event_child);
3733
3734 if (non_stop)
3735 {
3736 gdb_assert (has_single_step_breakpoints (current_thread));
3737 delete_single_step_breakpoints (current_thread);
3738 }
3739 else
3740 {
3741 for_each_thread ([] (thread_info *thread){
3742 if (has_single_step_breakpoints (thread))
3743 delete_single_step_breakpoints (thread);
3744 });
3745 }
3746
3747 unstop_all_lwps (0, event_child);
3748 }
3749 }
3750
3751 if (!stabilizing_threads)
3752 {
3753 /* In all-stop, stop all threads. */
3754 if (!non_stop)
3755 stop_all_lwps (0, NULL);
3756
3757 if (step_over_finished)
3758 {
3759 if (!non_stop)
3760 {
3761 /* If we were doing a step-over, all other threads but
3762 the stepping one had been paused in start_step_over,
3763 with their suspend counts incremented. We don't want
3764 to do a full unstop/unpause, because we're in
3765 all-stop mode (so we want threads stopped), but we
3766 still need to unsuspend the other threads, to
3767 decrement their `suspended' count back. */
3768 unsuspend_all_lwps (event_child);
3769 }
3770 else
3771 {
3772 /* If we just finished a step-over, then all threads had
3773 been momentarily paused. In all-stop, that's fine,
3774 we want threads stopped by now anyway. In non-stop,
3775 we need to re-resume threads that GDB wanted to be
3776 running. */
3777 unstop_all_lwps (1, event_child);
3778 }
3779 }
3780
3781 /* If we're not waiting for a specific LWP, choose an event LWP
3782 from among those that have had events. Giving equal priority
3783 to all LWPs that have had events helps prevent
3784 starvation. */
3785 if (ptid_equal (ptid, minus_one_ptid))
3786 {
3787 event_child->status_pending_p = 1;
3788 event_child->status_pending = w;
3789
3790 select_event_lwp (&event_child);
3791
3792 /* current_thread and event_child must stay in sync. */
3793 current_thread = get_lwp_thread (event_child);
3794
3795 event_child->status_pending_p = 0;
3796 w = event_child->status_pending;
3797 }
3798
3799
3800 /* Stabilize threads (move out of jump pads). */
3801 if (!non_stop)
3802 stabilize_threads ();
3803 }
3804 else
3805 {
3806 /* If we just finished a step-over, then all threads had been
3807 momentarily paused. In all-stop, that's fine, we want
3808 threads stopped by now anyway. In non-stop, we need to
3809 re-resume threads that GDB wanted to be running. */
3810 if (step_over_finished)
3811 unstop_all_lwps (1, event_child);
3812 }
3813
3814 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3815 {
3816 /* If the reported event is an exit, fork, vfork or exec, let
3817 GDB know. */
3818
3819 /* Break the unreported fork relationship chain. */
3820 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3821 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3822 {
3823 event_child->fork_relative->fork_relative = NULL;
3824 event_child->fork_relative = NULL;
3825 }
3826
3827 *ourstatus = event_child->waitstatus;
3828 /* Clear the event lwp's waitstatus since we handled it already. */
3829 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3830 }
3831 else
3832 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3833
3834 /* Now that we've selected our final event LWP, un-adjust its PC if
3835 it was a software breakpoint, and the client doesn't know we can
3836 adjust the breakpoint ourselves. */
3837 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3838 && !swbreak_feature)
3839 {
3840 int decr_pc = the_low_target.decr_pc_after_break;
3841
3842 if (decr_pc != 0)
3843 {
3844 struct regcache *regcache
3845 = get_thread_regcache (current_thread, 1);
3846 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3847 }
3848 }
3849
3850 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3851 {
3852 get_syscall_trapinfo (event_child,
3853 &ourstatus->value.syscall_number);
3854 ourstatus->kind = event_child->syscall_state;
3855 }
3856 else if (current_thread->last_resume_kind == resume_stop
3857 && WSTOPSIG (w) == SIGSTOP)
3858 {
3859 /* A thread that has been requested to stop by GDB with vCont;t,
3860 and it stopped cleanly, so report as SIG0. The use of
3861 SIGSTOP is an implementation detail. */
3862 ourstatus->value.sig = GDB_SIGNAL_0;
3863 }
3864 else if (current_thread->last_resume_kind == resume_stop
3865 && WSTOPSIG (w) != SIGSTOP)
3866 {
3867 /* A thread that has been requested to stop by GDB with vCont;t,
3868 but, it stopped for other reasons. */
3869 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3870 }
3871 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3872 {
3873 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3874 }
3875
3876 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3877
3878 if (debug_threads)
3879 {
3880 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3881 target_pid_to_str (ptid_of (current_thread)),
3882 ourstatus->kind, ourstatus->value.sig);
3883 debug_exit ();
3884 }
3885
3886 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3887 return filter_exit_event (event_child, ourstatus);
3888
3889 return ptid_of (current_thread);
3890 }
3891
3892 /* Get rid of any pending event in the pipe. */
3893 static void
3894 async_file_flush (void)
3895 {
3896 int ret;
3897 char buf;
3898
3899 do
3900 ret = read (linux_event_pipe[0], &buf, 1);
3901 while (ret >= 0 || (ret == -1 && errno == EINTR));
3902 }
3903
3904 /* Put something in the pipe, so the event loop wakes up. */
3905 static void
3906 async_file_mark (void)
3907 {
3908 int ret;
3909
3910 async_file_flush ();
3911
3912 do
3913 ret = write (linux_event_pipe[1], "+", 1);
3914 while (ret == 0 || (ret == -1 && errno == EINTR));
3915
3916 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3917 be awakened anyway. */
3918 }
3919
3920 static ptid_t
3921 linux_wait (ptid_t ptid,
3922 struct target_waitstatus *ourstatus, int target_options)
3923 {
3924 ptid_t event_ptid;
3925
3926 /* Flush the async file first. */
3927 if (target_is_async_p ())
3928 async_file_flush ();
3929
3930 do
3931 {
3932 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3933 }
3934 while ((target_options & TARGET_WNOHANG) == 0
3935 && ptid_equal (event_ptid, null_ptid)
3936 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3937
3938 /* If at least one stop was reported, there may be more. A single
3939 SIGCHLD can signal more than one child stop. */
3940 if (target_is_async_p ()
3941 && (target_options & TARGET_WNOHANG) != 0
3942 && !ptid_equal (event_ptid, null_ptid))
3943 async_file_mark ();
3944
3945 return event_ptid;
3946 }
3947
3948 /* Send a signal to an LWP. */
3949
3950 static int
3951 kill_lwp (unsigned long lwpid, int signo)
3952 {
3953 int ret;
3954
3955 errno = 0;
3956 ret = syscall (__NR_tkill, lwpid, signo);
3957 if (errno == ENOSYS)
3958 {
3959 /* If tkill fails, then we are not using nptl threads, a
3960 configuration we no longer support. */
3961 perror_with_name (("tkill"));
3962 }
3963 return ret;
3964 }
3965
3966 void
3967 linux_stop_lwp (struct lwp_info *lwp)
3968 {
3969 send_sigstop (lwp);
3970 }
3971
3972 static void
3973 send_sigstop (struct lwp_info *lwp)
3974 {
3975 int pid;
3976
3977 pid = lwpid_of (get_lwp_thread (lwp));
3978
3979 /* If we already have a pending stop signal for this process, don't
3980 send another. */
3981 if (lwp->stop_expected)
3982 {
3983 if (debug_threads)
3984 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3985
3986 return;
3987 }
3988
3989 if (debug_threads)
3990 debug_printf ("Sending sigstop to lwp %d\n", pid);
3991
3992 lwp->stop_expected = 1;
3993 kill_lwp (pid, SIGSTOP);
3994 }
3995
3996 static int
3997 send_sigstop_callback (thread_info *thread, void *except)
3998 {
3999 struct lwp_info *lwp = get_thread_lwp (thread);
4000
4001 /* Ignore EXCEPT. */
4002 if (lwp == except)
4003 return 0;
4004
4005 if (lwp->stopped)
4006 return 0;
4007
4008 send_sigstop (lwp);
4009 return 0;
4010 }
4011
4012 /* Increment the suspend count of an LWP, and stop it, if not stopped
4013 yet. */
4014 static int
4015 suspend_and_send_sigstop_callback (thread_info *thread, void *except)
4016 {
4017 struct lwp_info *lwp = get_thread_lwp (thread);
4018
4019 /* Ignore EXCEPT. */
4020 if (lwp == except)
4021 return 0;
4022
4023 lwp_suspended_inc (lwp);
4024
4025 return send_sigstop_callback (thread, except);
4026 }
4027
4028 static void
4029 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4030 {
4031 /* Store the exit status for later. */
4032 lwp->status_pending_p = 1;
4033 lwp->status_pending = wstat;
4034
4035 /* Store in waitstatus as well, as there's nothing else to process
4036 for this event. */
4037 if (WIFEXITED (wstat))
4038 {
4039 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4040 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4041 }
4042 else if (WIFSIGNALED (wstat))
4043 {
4044 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4045 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4046 }
4047
4048 /* Prevent trying to stop it. */
4049 lwp->stopped = 1;
4050
4051 /* No further stops are expected from a dead lwp. */
4052 lwp->stop_expected = 0;
4053 }
4054
4055 /* Return true if LWP has exited already, and has a pending exit event
4056 to report to GDB. */
4057
4058 static int
4059 lwp_is_marked_dead (struct lwp_info *lwp)
4060 {
4061 return (lwp->status_pending_p
4062 && (WIFEXITED (lwp->status_pending)
4063 || WIFSIGNALED (lwp->status_pending)));
4064 }
4065
4066 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4067
4068 static void
4069 wait_for_sigstop (void)
4070 {
4071 struct thread_info *saved_thread;
4072 ptid_t saved_tid;
4073 int wstat;
4074 int ret;
4075
4076 saved_thread = current_thread;
4077 if (saved_thread != NULL)
4078 saved_tid = saved_thread->id;
4079 else
4080 saved_tid = null_ptid; /* avoid bogus unused warning */
4081
4082 if (debug_threads)
4083 debug_printf ("wait_for_sigstop: pulling events\n");
4084
4085 /* Passing NULL_PTID as filter indicates we want all events to be
4086 left pending. Eventually this returns when there are no
4087 unwaited-for children left. */
4088 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4089 &wstat, __WALL);
4090 gdb_assert (ret == -1);
4091
4092 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4093 current_thread = saved_thread;
4094 else
4095 {
4096 if (debug_threads)
4097 debug_printf ("Previously current thread died.\n");
4098
4099 /* We can't change the current inferior behind GDB's back,
4100 otherwise, a subsequent command may apply to the wrong
4101 process. */
4102 current_thread = NULL;
4103 }
4104 }
4105
4106 /* Returns true if THREAD is stopped in a jump pad, and we can't
4107 move it out, because we need to report the stop event to GDB. For
4108 example, if the user puts a breakpoint in the jump pad, it's
4109 because she wants to debug it. */
4110
4111 static bool
4112 stuck_in_jump_pad_callback (thread_info *thread)
4113 {
4114 struct lwp_info *lwp = get_thread_lwp (thread);
4115
4116 if (lwp->suspended != 0)
4117 {
4118 internal_error (__FILE__, __LINE__,
4119 "LWP %ld is suspended, suspended=%d\n",
4120 lwpid_of (thread), lwp->suspended);
4121 }
4122 gdb_assert (lwp->stopped);
4123
4124 /* Allow debugging the jump pad, gdb_collect, etc.. */
4125 return (supports_fast_tracepoints ()
4126 && agent_loaded_p ()
4127 && (gdb_breakpoint_here (lwp->stop_pc)
4128 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4129 || thread->last_resume_kind == resume_step)
4130 && (linux_fast_tracepoint_collecting (lwp, NULL)
4131 != fast_tpoint_collect_result::not_collecting));
4132 }
4133
4134 static void
4135 move_out_of_jump_pad_callback (thread_info *thread)
4136 {
4137 struct thread_info *saved_thread;
4138 struct lwp_info *lwp = get_thread_lwp (thread);
4139 int *wstat;
4140
4141 if (lwp->suspended != 0)
4142 {
4143 internal_error (__FILE__, __LINE__,
4144 "LWP %ld is suspended, suspended=%d\n",
4145 lwpid_of (thread), lwp->suspended);
4146 }
4147 gdb_assert (lwp->stopped);
4148
4149 /* For gdb_breakpoint_here. */
4150 saved_thread = current_thread;
4151 current_thread = thread;
4152
4153 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4154
4155 /* Allow debugging the jump pad, gdb_collect, etc. */
4156 if (!gdb_breakpoint_here (lwp->stop_pc)
4157 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4158 && thread->last_resume_kind != resume_step
4159 && maybe_move_out_of_jump_pad (lwp, wstat))
4160 {
4161 if (debug_threads)
4162 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4163 lwpid_of (thread));
4164
4165 if (wstat)
4166 {
4167 lwp->status_pending_p = 0;
4168 enqueue_one_deferred_signal (lwp, wstat);
4169
4170 if (debug_threads)
4171 debug_printf ("Signal %d for LWP %ld deferred "
4172 "(in jump pad)\n",
4173 WSTOPSIG (*wstat), lwpid_of (thread));
4174 }
4175
4176 linux_resume_one_lwp (lwp, 0, 0, NULL);
4177 }
4178 else
4179 lwp_suspended_inc (lwp);
4180
4181 current_thread = saved_thread;
4182 }
4183
4184 static int
4185 lwp_running (thread_info *thread, void *data)
4186 {
4187 struct lwp_info *lwp = get_thread_lwp (thread);
4188
4189 if (lwp_is_marked_dead (lwp))
4190 return 0;
4191 if (lwp->stopped)
4192 return 0;
4193 return 1;
4194 }
4195
4196 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4197 If SUSPEND, then also increase the suspend count of every LWP,
4198 except EXCEPT. */
4199
4200 static void
4201 stop_all_lwps (int suspend, struct lwp_info *except)
4202 {
4203 /* Should not be called recursively. */
4204 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4205
4206 if (debug_threads)
4207 {
4208 debug_enter ();
4209 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4210 suspend ? "stop-and-suspend" : "stop",
4211 except != NULL
4212 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4213 : "none");
4214 }
4215
4216 stopping_threads = (suspend
4217 ? STOPPING_AND_SUSPENDING_THREADS
4218 : STOPPING_THREADS);
4219
4220 if (suspend)
4221 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4222 else
4223 find_inferior (&all_threads, send_sigstop_callback, except);
4224 wait_for_sigstop ();
4225 stopping_threads = NOT_STOPPING_THREADS;
4226
4227 if (debug_threads)
4228 {
4229 debug_printf ("stop_all_lwps done, setting stopping_threads "
4230 "back to !stopping\n");
4231 debug_exit ();
4232 }
4233 }
4234
4235 /* Enqueue one signal in the chain of signals which need to be
4236 delivered to this process on next resume. */
4237
4238 static void
4239 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4240 {
4241 struct pending_signals *p_sig = XNEW (struct pending_signals);
4242
4243 p_sig->prev = lwp->pending_signals;
4244 p_sig->signal = signal;
4245 if (info == NULL)
4246 memset (&p_sig->info, 0, sizeof (siginfo_t));
4247 else
4248 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4249 lwp->pending_signals = p_sig;
4250 }
4251
4252 /* Install breakpoints for software single stepping. */
4253
4254 static void
4255 install_software_single_step_breakpoints (struct lwp_info *lwp)
4256 {
4257 struct thread_info *thread = get_lwp_thread (lwp);
4258 struct regcache *regcache = get_thread_regcache (thread, 1);
4259 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4260
4261 current_thread = thread;
4262 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4263
4264 for (CORE_ADDR pc : next_pcs)
4265 set_single_step_breakpoint (pc, current_ptid);
4266
4267 do_cleanups (old_chain);
4268 }
4269
4270 /* Single step via hardware or software single step.
4271 Return 1 if hardware single stepping, 0 if software single stepping
4272 or can't single step. */
4273
4274 static int
4275 single_step (struct lwp_info* lwp)
4276 {
4277 int step = 0;
4278
4279 if (can_hardware_single_step ())
4280 {
4281 step = 1;
4282 }
4283 else if (can_software_single_step ())
4284 {
4285 install_software_single_step_breakpoints (lwp);
4286 step = 0;
4287 }
4288 else
4289 {
4290 if (debug_threads)
4291 debug_printf ("stepping is not implemented on this target");
4292 }
4293
4294 return step;
4295 }
4296
4297 /* The signal can be delivered to the inferior if we are not trying to
4298 finish a fast tracepoint collect. Since signal can be delivered in
4299 the step-over, the program may go to signal handler and trap again
4300 after return from the signal handler. We can live with the spurious
4301 double traps. */
4302
4303 static int
4304 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4305 {
4306 return (lwp->collecting_fast_tracepoint
4307 == fast_tpoint_collect_result::not_collecting);
4308 }
4309
4310 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4311 SIGNAL is nonzero, give it that signal. */
4312
4313 static void
4314 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4315 int step, int signal, siginfo_t *info)
4316 {
4317 struct thread_info *thread = get_lwp_thread (lwp);
4318 struct thread_info *saved_thread;
4319 int ptrace_request;
4320 struct process_info *proc = get_thread_process (thread);
4321
4322 /* Note that target description may not be initialised
4323 (proc->tdesc == NULL) at this point because the program hasn't
4324 stopped at the first instruction yet. It means GDBserver skips
4325 the extra traps from the wrapper program (see option --wrapper).
4326 Code in this function that requires register access should be
4327 guarded by proc->tdesc == NULL or something else. */
4328
4329 if (lwp->stopped == 0)
4330 return;
4331
4332 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4333
4334 fast_tpoint_collect_result fast_tp_collecting
4335 = lwp->collecting_fast_tracepoint;
4336
4337 gdb_assert (!stabilizing_threads
4338 || (fast_tp_collecting
4339 != fast_tpoint_collect_result::not_collecting));
4340
4341 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4342 user used the "jump" command, or "set $pc = foo"). */
4343 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4344 {
4345 /* Collecting 'while-stepping' actions doesn't make sense
4346 anymore. */
4347 release_while_stepping_state_list (thread);
4348 }
4349
4350 /* If we have pending signals or status, and a new signal, enqueue the
4351 signal. Also enqueue the signal if it can't be delivered to the
4352 inferior right now. */
4353 if (signal != 0
4354 && (lwp->status_pending_p
4355 || lwp->pending_signals != NULL
4356 || !lwp_signal_can_be_delivered (lwp)))
4357 {
4358 enqueue_pending_signal (lwp, signal, info);
4359
4360 /* Postpone any pending signal. It was enqueued above. */
4361 signal = 0;
4362 }
4363
4364 if (lwp->status_pending_p)
4365 {
4366 if (debug_threads)
4367 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4368 " has pending status\n",
4369 lwpid_of (thread), step ? "step" : "continue",
4370 lwp->stop_expected ? "expected" : "not expected");
4371 return;
4372 }
4373
4374 saved_thread = current_thread;
4375 current_thread = thread;
4376
4377 /* This bit needs some thinking about. If we get a signal that
4378 we must report while a single-step reinsert is still pending,
4379 we often end up resuming the thread. It might be better to
4380 (ew) allow a stack of pending events; then we could be sure that
4381 the reinsert happened right away and not lose any signals.
4382
4383 Making this stack would also shrink the window in which breakpoints are
4384 uninserted (see comment in linux_wait_for_lwp) but not enough for
4385 complete correctness, so it won't solve that problem. It may be
4386 worthwhile just to solve this one, however. */
4387 if (lwp->bp_reinsert != 0)
4388 {
4389 if (debug_threads)
4390 debug_printf (" pending reinsert at 0x%s\n",
4391 paddress (lwp->bp_reinsert));
4392
4393 if (can_hardware_single_step ())
4394 {
4395 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4396 {
4397 if (step == 0)
4398 warning ("BAD - reinserting but not stepping.");
4399 if (lwp->suspended)
4400 warning ("BAD - reinserting and suspended(%d).",
4401 lwp->suspended);
4402 }
4403 }
4404
4405 step = maybe_hw_step (thread);
4406 }
4407
4408 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4409 {
4410 if (debug_threads)
4411 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4412 " (exit-jump-pad-bkpt)\n",
4413 lwpid_of (thread));
4414 }
4415 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4416 {
4417 if (debug_threads)
4418 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4419 " single-stepping\n",
4420 lwpid_of (thread));
4421
4422 if (can_hardware_single_step ())
4423 step = 1;
4424 else
4425 {
4426 internal_error (__FILE__, __LINE__,
4427 "moving out of jump pad single-stepping"
4428 " not implemented on this target");
4429 }
4430 }
4431
4432 /* If we have while-stepping actions in this thread set it stepping.
4433 If we have a signal to deliver, it may or may not be set to
4434 SIG_IGN, we don't know. Assume so, and allow collecting
4435 while-stepping into a signal handler. A possible smart thing to
4436 do would be to set an internal breakpoint at the signal return
4437 address, continue, and carry on catching this while-stepping
4438 action only when that breakpoint is hit. A future
4439 enhancement. */
4440 if (thread->while_stepping != NULL)
4441 {
4442 if (debug_threads)
4443 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4444 lwpid_of (thread));
4445
4446 step = single_step (lwp);
4447 }
4448
4449 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4450 {
4451 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4452
4453 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4454
4455 if (debug_threads)
4456 {
4457 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4458 (long) lwp->stop_pc);
4459 }
4460 }
4461
4462 /* If we have pending signals, consume one if it can be delivered to
4463 the inferior. */
4464 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4465 {
4466 struct pending_signals **p_sig;
4467
4468 p_sig = &lwp->pending_signals;
4469 while ((*p_sig)->prev != NULL)
4470 p_sig = &(*p_sig)->prev;
4471
4472 signal = (*p_sig)->signal;
4473 if ((*p_sig)->info.si_signo != 0)
4474 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4475 &(*p_sig)->info);
4476
4477 free (*p_sig);
4478 *p_sig = NULL;
4479 }
4480
4481 if (debug_threads)
4482 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4483 lwpid_of (thread), step ? "step" : "continue", signal,
4484 lwp->stop_expected ? "expected" : "not expected");
4485
4486 if (the_low_target.prepare_to_resume != NULL)
4487 the_low_target.prepare_to_resume (lwp);
4488
4489 regcache_invalidate_thread (thread);
4490 errno = 0;
4491 lwp->stepping = step;
4492 if (step)
4493 ptrace_request = PTRACE_SINGLESTEP;
4494 else if (gdb_catching_syscalls_p (lwp))
4495 ptrace_request = PTRACE_SYSCALL;
4496 else
4497 ptrace_request = PTRACE_CONT;
4498 ptrace (ptrace_request,
4499 lwpid_of (thread),
4500 (PTRACE_TYPE_ARG3) 0,
4501 /* Coerce to a uintptr_t first to avoid potential gcc warning
4502 of coercing an 8 byte integer to a 4 byte pointer. */
4503 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4504
4505 current_thread = saved_thread;
4506 if (errno)
4507 perror_with_name ("resuming thread");
4508
4509 /* Successfully resumed. Clear state that no longer makes sense,
4510 and mark the LWP as running. Must not do this before resuming
4511 otherwise if that fails other code will be confused. E.g., we'd
4512 later try to stop the LWP and hang forever waiting for a stop
4513 status. Note that we must not throw after this is cleared,
4514 otherwise handle_zombie_lwp_error would get confused. */
4515 lwp->stopped = 0;
4516 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4517 }
4518
4519 /* Called when we try to resume a stopped LWP and that errors out. If
4520 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4521 or about to become), discard the error, clear any pending status
4522 the LWP may have, and return true (we'll collect the exit status
4523 soon enough). Otherwise, return false. */
4524
4525 static int
4526 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4527 {
4528 struct thread_info *thread = get_lwp_thread (lp);
4529
4530 /* If we get an error after resuming the LWP successfully, we'd
4531 confuse !T state for the LWP being gone. */
4532 gdb_assert (lp->stopped);
4533
4534 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4535 because even if ptrace failed with ESRCH, the tracee may be "not
4536 yet fully dead", but already refusing ptrace requests. In that
4537 case the tracee has 'R (Running)' state for a little bit
4538 (observed in Linux 3.18). See also the note on ESRCH in the
4539 ptrace(2) man page. Instead, check whether the LWP has any state
4540 other than ptrace-stopped. */
4541
4542 /* Don't assume anything if /proc/PID/status can't be read. */
4543 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4544 {
4545 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4546 lp->status_pending_p = 0;
4547 return 1;
4548 }
4549 return 0;
4550 }
4551
4552 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4553 disappears while we try to resume it. */
4554
4555 static void
4556 linux_resume_one_lwp (struct lwp_info *lwp,
4557 int step, int signal, siginfo_t *info)
4558 {
4559 TRY
4560 {
4561 linux_resume_one_lwp_throw (lwp, step, signal, info);
4562 }
4563 CATCH (ex, RETURN_MASK_ERROR)
4564 {
4565 if (!check_ptrace_stopped_lwp_gone (lwp))
4566 throw_exception (ex);
4567 }
4568 END_CATCH
4569 }
4570
4571 /* This function is called once per thread via for_each_thread.
4572 We look up which resume request applies to THREAD and mark it with a
4573 pointer to the appropriate resume request.
4574
4575 This algorithm is O(threads * resume elements), but resume elements
4576 is small (and will remain small at least until GDB supports thread
4577 suspension). */
4578
4579 static void
4580 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4581 {
4582 struct lwp_info *lwp = get_thread_lwp (thread);
4583
4584 for (int ndx = 0; ndx < n; ndx++)
4585 {
4586 ptid_t ptid = resume[ndx].thread;
4587 if (ptid_equal (ptid, minus_one_ptid)
4588 || ptid == thread->id
4589 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4590 of PID'. */
4591 || (ptid_get_pid (ptid) == pid_of (thread)
4592 && (ptid_is_pid (ptid)
4593 || ptid_get_lwp (ptid) == -1)))
4594 {
4595 if (resume[ndx].kind == resume_stop
4596 && thread->last_resume_kind == resume_stop)
4597 {
4598 if (debug_threads)
4599 debug_printf ("already %s LWP %ld at GDB's request\n",
4600 (thread->last_status.kind
4601 == TARGET_WAITKIND_STOPPED)
4602 ? "stopped"
4603 : "stopping",
4604 lwpid_of (thread));
4605
4606 continue;
4607 }
4608
4609 /* Ignore (wildcard) resume requests for already-resumed
4610 threads. */
4611 if (resume[ndx].kind != resume_stop
4612 && thread->last_resume_kind != resume_stop)
4613 {
4614 if (debug_threads)
4615 debug_printf ("already %s LWP %ld at GDB's request\n",
4616 (thread->last_resume_kind
4617 == resume_step)
4618 ? "stepping"
4619 : "continuing",
4620 lwpid_of (thread));
4621 continue;
4622 }
4623
4624 /* Don't let wildcard resumes resume fork children that GDB
4625 does not yet know are new fork children. */
4626 if (lwp->fork_relative != NULL)
4627 {
4628 struct lwp_info *rel = lwp->fork_relative;
4629
4630 if (rel->status_pending_p
4631 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4632 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4633 {
4634 if (debug_threads)
4635 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4636 lwpid_of (thread));
4637 continue;
4638 }
4639 }
4640
4641 /* If the thread has a pending event that has already been
4642 reported to GDBserver core, but GDB has not pulled the
4643 event out of the vStopped queue yet, likewise, ignore the
4644 (wildcard) resume request. */
4645 if (in_queued_stop_replies (thread->id))
4646 {
4647 if (debug_threads)
4648 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4649 lwpid_of (thread));
4650 continue;
4651 }
4652
4653 lwp->resume = &resume[ndx];
4654 thread->last_resume_kind = lwp->resume->kind;
4655
4656 lwp->step_range_start = lwp->resume->step_range_start;
4657 lwp->step_range_end = lwp->resume->step_range_end;
4658
4659 /* If we had a deferred signal to report, dequeue one now.
4660 This can happen if LWP gets more than one signal while
4661 trying to get out of a jump pad. */
4662 if (lwp->stopped
4663 && !lwp->status_pending_p
4664 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4665 {
4666 lwp->status_pending_p = 1;
4667
4668 if (debug_threads)
4669 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4670 "leaving status pending.\n",
4671 WSTOPSIG (lwp->status_pending),
4672 lwpid_of (thread));
4673 }
4674
4675 return;
4676 }
4677 }
4678
4679 /* No resume action for this thread. */
4680 lwp->resume = NULL;
4681 }
4682
4683 /* find_inferior callback for linux_resume.
4684 Set *FLAG_P if this lwp has an interesting status pending. */
4685
4686 static bool
4687 resume_status_pending_p (thread_info *thread)
4688 {
4689 struct lwp_info *lwp = get_thread_lwp (thread);
4690
4691 /* LWPs which will not be resumed are not interesting, because
4692 we might not wait for them next time through linux_wait. */
4693 if (lwp->resume == NULL)
4694 return false;
4695
4696 return thread_still_has_status_pending_p (thread);
4697 }
4698
4699 /* Return 1 if this lwp that GDB wants running is stopped at an
4700 internal breakpoint that we need to step over. It assumes that any
4701 required STOP_PC adjustment has already been propagated to the
4702 inferior's regcache. */
4703
4704 static bool
4705 need_step_over_p (thread_info *thread)
4706 {
4707 struct lwp_info *lwp = get_thread_lwp (thread);
4708 struct thread_info *saved_thread;
4709 CORE_ADDR pc;
4710 struct process_info *proc = get_thread_process (thread);
4711
4712 /* GDBserver is skipping the extra traps from the wrapper program,
4713 don't have to do step over. */
4714 if (proc->tdesc == NULL)
4715 return false;
4716
4717 /* LWPs which will not be resumed are not interesting, because we
4718 might not wait for them next time through linux_wait. */
4719
4720 if (!lwp->stopped)
4721 {
4722 if (debug_threads)
4723 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4724 lwpid_of (thread));
4725 return false;
4726 }
4727
4728 if (thread->last_resume_kind == resume_stop)
4729 {
4730 if (debug_threads)
4731 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4732 " stopped\n",
4733 lwpid_of (thread));
4734 return false;
4735 }
4736
4737 gdb_assert (lwp->suspended >= 0);
4738
4739 if (lwp->suspended)
4740 {
4741 if (debug_threads)
4742 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4743 lwpid_of (thread));
4744 return false;
4745 }
4746
4747 if (lwp->status_pending_p)
4748 {
4749 if (debug_threads)
4750 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4751 " status.\n",
4752 lwpid_of (thread));
4753 return false;
4754 }
4755
4756 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4757 or we have. */
4758 pc = get_pc (lwp);
4759
4760 /* If the PC has changed since we stopped, then don't do anything,
4761 and let the breakpoint/tracepoint be hit. This happens if, for
4762 instance, GDB handled the decr_pc_after_break subtraction itself,
4763 GDB is OOL stepping this thread, or the user has issued a "jump"
4764 command, or poked thread's registers herself. */
4765 if (pc != lwp->stop_pc)
4766 {
4767 if (debug_threads)
4768 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4769 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4770 lwpid_of (thread),
4771 paddress (lwp->stop_pc), paddress (pc));
4772 return false;
4773 }
4774
4775 /* On software single step target, resume the inferior with signal
4776 rather than stepping over. */
4777 if (can_software_single_step ()
4778 && lwp->pending_signals != NULL
4779 && lwp_signal_can_be_delivered (lwp))
4780 {
4781 if (debug_threads)
4782 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4783 " signals.\n",
4784 lwpid_of (thread));
4785
4786 return false;
4787 }
4788
4789 saved_thread = current_thread;
4790 current_thread = thread;
4791
4792 /* We can only step over breakpoints we know about. */
4793 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4794 {
4795 /* Don't step over a breakpoint that GDB expects to hit
4796 though. If the condition is being evaluated on the target's side
4797 and it evaluate to false, step over this breakpoint as well. */
4798 if (gdb_breakpoint_here (pc)
4799 && gdb_condition_true_at_breakpoint (pc)
4800 && gdb_no_commands_at_breakpoint (pc))
4801 {
4802 if (debug_threads)
4803 debug_printf ("Need step over [LWP %ld]? yes, but found"
4804 " GDB breakpoint at 0x%s; skipping step over\n",
4805 lwpid_of (thread), paddress (pc));
4806
4807 current_thread = saved_thread;
4808 return false;
4809 }
4810 else
4811 {
4812 if (debug_threads)
4813 debug_printf ("Need step over [LWP %ld]? yes, "
4814 "found breakpoint at 0x%s\n",
4815 lwpid_of (thread), paddress (pc));
4816
4817 /* We've found an lwp that needs stepping over --- return 1 so
4818 that find_inferior stops looking. */
4819 current_thread = saved_thread;
4820
4821 return true;
4822 }
4823 }
4824
4825 current_thread = saved_thread;
4826
4827 if (debug_threads)
4828 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4829 " at 0x%s\n",
4830 lwpid_of (thread), paddress (pc));
4831
4832 return false;
4833 }
4834
4835 /* Start a step-over operation on LWP. When LWP stopped at a
4836 breakpoint, to make progress, we need to remove the breakpoint out
4837 of the way. If we let other threads run while we do that, they may
4838 pass by the breakpoint location and miss hitting it. To avoid
4839 that, a step-over momentarily stops all threads while LWP is
4840 single-stepped by either hardware or software while the breakpoint
4841 is temporarily uninserted from the inferior. When the single-step
4842 finishes, we reinsert the breakpoint, and let all threads that are
4843 supposed to be running, run again. */
4844
4845 static int
4846 start_step_over (struct lwp_info *lwp)
4847 {
4848 struct thread_info *thread = get_lwp_thread (lwp);
4849 struct thread_info *saved_thread;
4850 CORE_ADDR pc;
4851 int step;
4852
4853 if (debug_threads)
4854 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4855 lwpid_of (thread));
4856
4857 stop_all_lwps (1, lwp);
4858
4859 if (lwp->suspended != 0)
4860 {
4861 internal_error (__FILE__, __LINE__,
4862 "LWP %ld suspended=%d\n", lwpid_of (thread),
4863 lwp->suspended);
4864 }
4865
4866 if (debug_threads)
4867 debug_printf ("Done stopping all threads for step-over.\n");
4868
4869 /* Note, we should always reach here with an already adjusted PC,
4870 either by GDB (if we're resuming due to GDB's request), or by our
4871 caller, if we just finished handling an internal breakpoint GDB
4872 shouldn't care about. */
4873 pc = get_pc (lwp);
4874
4875 saved_thread = current_thread;
4876 current_thread = thread;
4877
4878 lwp->bp_reinsert = pc;
4879 uninsert_breakpoints_at (pc);
4880 uninsert_fast_tracepoint_jumps_at (pc);
4881
4882 step = single_step (lwp);
4883
4884 current_thread = saved_thread;
4885
4886 linux_resume_one_lwp (lwp, step, 0, NULL);
4887
4888 /* Require next event from this LWP. */
4889 step_over_bkpt = thread->id;
4890 return 1;
4891 }
4892
4893 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4894 start_step_over, if still there, and delete any single-step
4895 breakpoints we've set, on non hardware single-step targets. */
4896
4897 static int
4898 finish_step_over (struct lwp_info *lwp)
4899 {
4900 if (lwp->bp_reinsert != 0)
4901 {
4902 struct thread_info *saved_thread = current_thread;
4903
4904 if (debug_threads)
4905 debug_printf ("Finished step over.\n");
4906
4907 current_thread = get_lwp_thread (lwp);
4908
4909 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4910 may be no breakpoint to reinsert there by now. */
4911 reinsert_breakpoints_at (lwp->bp_reinsert);
4912 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4913
4914 lwp->bp_reinsert = 0;
4915
4916 /* Delete any single-step breakpoints. No longer needed. We
4917 don't have to worry about other threads hitting this trap,
4918 and later not being able to explain it, because we were
4919 stepping over a breakpoint, and we hold all threads but
4920 LWP stopped while doing that. */
4921 if (!can_hardware_single_step ())
4922 {
4923 gdb_assert (has_single_step_breakpoints (current_thread));
4924 delete_single_step_breakpoints (current_thread);
4925 }
4926
4927 step_over_bkpt = null_ptid;
4928 current_thread = saved_thread;
4929 return 1;
4930 }
4931 else
4932 return 0;
4933 }
4934
4935 /* If there's a step over in progress, wait until all threads stop
4936 (that is, until the stepping thread finishes its step), and
4937 unsuspend all lwps. The stepping thread ends with its status
4938 pending, which is processed later when we get back to processing
4939 events. */
4940
4941 static void
4942 complete_ongoing_step_over (void)
4943 {
4944 if (!ptid_equal (step_over_bkpt, null_ptid))
4945 {
4946 struct lwp_info *lwp;
4947 int wstat;
4948 int ret;
4949
4950 if (debug_threads)
4951 debug_printf ("detach: step over in progress, finish it first\n");
4952
4953 /* Passing NULL_PTID as filter indicates we want all events to
4954 be left pending. Eventually this returns when there are no
4955 unwaited-for children left. */
4956 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4957 &wstat, __WALL);
4958 gdb_assert (ret == -1);
4959
4960 lwp = find_lwp_pid (step_over_bkpt);
4961 if (lwp != NULL)
4962 finish_step_over (lwp);
4963 step_over_bkpt = null_ptid;
4964 unsuspend_all_lwps (lwp);
4965 }
4966 }
4967
4968 /* This function is called once per thread. We check the thread's resume
4969 request, which will tell us whether to resume, step, or leave the thread
4970 stopped; and what signal, if any, it should be sent.
4971
4972 For threads which we aren't explicitly told otherwise, we preserve
4973 the stepping flag; this is used for stepping over gdbserver-placed
4974 breakpoints.
4975
4976 If pending_flags was set in any thread, we queue any needed
4977 signals, since we won't actually resume. We already have a pending
4978 event to report, so we don't need to preserve any step requests;
4979 they should be re-issued if necessary. */
4980
4981 static int
4982 linux_resume_one_thread (thread_info *thread, void *arg)
4983 {
4984 struct lwp_info *lwp = get_thread_lwp (thread);
4985 int leave_all_stopped = * (int *) arg;
4986 int leave_pending;
4987
4988 if (lwp->resume == NULL)
4989 return 0;
4990
4991 if (lwp->resume->kind == resume_stop)
4992 {
4993 if (debug_threads)
4994 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4995
4996 if (!lwp->stopped)
4997 {
4998 if (debug_threads)
4999 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
5000
5001 /* Stop the thread, and wait for the event asynchronously,
5002 through the event loop. */
5003 send_sigstop (lwp);
5004 }
5005 else
5006 {
5007 if (debug_threads)
5008 debug_printf ("already stopped LWP %ld\n",
5009 lwpid_of (thread));
5010
5011 /* The LWP may have been stopped in an internal event that
5012 was not meant to be notified back to GDB (e.g., gdbserver
5013 breakpoint), so we should be reporting a stop event in
5014 this case too. */
5015
5016 /* If the thread already has a pending SIGSTOP, this is a
5017 no-op. Otherwise, something later will presumably resume
5018 the thread and this will cause it to cancel any pending
5019 operation, due to last_resume_kind == resume_stop. If
5020 the thread already has a pending status to report, we
5021 will still report it the next time we wait - see
5022 status_pending_p_callback. */
5023
5024 /* If we already have a pending signal to report, then
5025 there's no need to queue a SIGSTOP, as this means we're
5026 midway through moving the LWP out of the jumppad, and we
5027 will report the pending signal as soon as that is
5028 finished. */
5029 if (lwp->pending_signals_to_report == NULL)
5030 send_sigstop (lwp);
5031 }
5032
5033 /* For stop requests, we're done. */
5034 lwp->resume = NULL;
5035 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5036 return 0;
5037 }
5038
5039 /* If this thread which is about to be resumed has a pending status,
5040 then don't resume it - we can just report the pending status.
5041 Likewise if it is suspended, because e.g., another thread is
5042 stepping past a breakpoint. Make sure to queue any signals that
5043 would otherwise be sent. In all-stop mode, we do this decision
5044 based on if *any* thread has a pending status. If there's a
5045 thread that needs the step-over-breakpoint dance, then don't
5046 resume any other thread but that particular one. */
5047 leave_pending = (lwp->suspended
5048 || lwp->status_pending_p
5049 || leave_all_stopped);
5050
5051 /* If we have a new signal, enqueue the signal. */
5052 if (lwp->resume->sig != 0)
5053 {
5054 siginfo_t info, *info_p;
5055
5056 /* If this is the same signal we were previously stopped by,
5057 make sure to queue its siginfo. */
5058 if (WIFSTOPPED (lwp->last_status)
5059 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5060 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5061 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5062 info_p = &info;
5063 else
5064 info_p = NULL;
5065
5066 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5067 }
5068
5069 if (!leave_pending)
5070 {
5071 if (debug_threads)
5072 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5073
5074 proceed_one_lwp (thread, NULL);
5075 }
5076 else
5077 {
5078 if (debug_threads)
5079 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5080 }
5081
5082 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5083 lwp->resume = NULL;
5084 return 0;
5085 }
5086
5087 static void
5088 linux_resume (struct thread_resume *resume_info, size_t n)
5089 {
5090 struct thread_info *need_step_over = NULL;
5091 int leave_all_stopped;
5092
5093 if (debug_threads)
5094 {
5095 debug_enter ();
5096 debug_printf ("linux_resume:\n");
5097 }
5098
5099 for_each_thread ([&] (thread_info *thread)
5100 {
5101 linux_set_resume_request (thread, resume_info, n);
5102 });
5103
5104 /* If there is a thread which would otherwise be resumed, which has
5105 a pending status, then don't resume any threads - we can just
5106 report the pending status. Make sure to queue any signals that
5107 would otherwise be sent. In non-stop mode, we'll apply this
5108 logic to each thread individually. We consume all pending events
5109 before considering to start a step-over (in all-stop). */
5110 bool any_pending = false;
5111 if (!non_stop)
5112 any_pending = find_thread (resume_status_pending_p) != NULL;
5113
5114 /* If there is a thread which would otherwise be resumed, which is
5115 stopped at a breakpoint that needs stepping over, then don't
5116 resume any threads - have it step over the breakpoint with all
5117 other threads stopped, then resume all threads again. Make sure
5118 to queue any signals that would otherwise be delivered or
5119 queued. */
5120 if (!any_pending && supports_breakpoints ())
5121 need_step_over = find_thread (need_step_over_p);
5122
5123 leave_all_stopped = (need_step_over != NULL || any_pending);
5124
5125 if (debug_threads)
5126 {
5127 if (need_step_over != NULL)
5128 debug_printf ("Not resuming all, need step over\n");
5129 else if (any_pending)
5130 debug_printf ("Not resuming, all-stop and found "
5131 "an LWP with pending status\n");
5132 else
5133 debug_printf ("Resuming, no pending status or step over needed\n");
5134 }
5135
5136 /* Even if we're leaving threads stopped, queue all signals we'd
5137 otherwise deliver. */
5138 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5139
5140 if (need_step_over)
5141 start_step_over (get_thread_lwp (need_step_over));
5142
5143 if (debug_threads)
5144 {
5145 debug_printf ("linux_resume done\n");
5146 debug_exit ();
5147 }
5148
5149 /* We may have events that were pending that can/should be sent to
5150 the client now. Trigger a linux_wait call. */
5151 if (target_is_async_p ())
5152 async_file_mark ();
5153 }
5154
5155 /* This function is called once per thread. We check the thread's
5156 last resume request, which will tell us whether to resume, step, or
5157 leave the thread stopped. Any signal the client requested to be
5158 delivered has already been enqueued at this point.
5159
5160 If any thread that GDB wants running is stopped at an internal
5161 breakpoint that needs stepping over, we start a step-over operation
5162 on that particular thread, and leave all others stopped. */
5163
5164 static int
5165 proceed_one_lwp (thread_info *thread, void *except)
5166 {
5167 struct lwp_info *lwp = get_thread_lwp (thread);
5168 int step;
5169
5170 if (lwp == except)
5171 return 0;
5172
5173 if (debug_threads)
5174 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5175
5176 if (!lwp->stopped)
5177 {
5178 if (debug_threads)
5179 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5180 return 0;
5181 }
5182
5183 if (thread->last_resume_kind == resume_stop
5184 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5185 {
5186 if (debug_threads)
5187 debug_printf (" client wants LWP to remain %ld stopped\n",
5188 lwpid_of (thread));
5189 return 0;
5190 }
5191
5192 if (lwp->status_pending_p)
5193 {
5194 if (debug_threads)
5195 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5196 lwpid_of (thread));
5197 return 0;
5198 }
5199
5200 gdb_assert (lwp->suspended >= 0);
5201
5202 if (lwp->suspended)
5203 {
5204 if (debug_threads)
5205 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5206 return 0;
5207 }
5208
5209 if (thread->last_resume_kind == resume_stop
5210 && lwp->pending_signals_to_report == NULL
5211 && (lwp->collecting_fast_tracepoint
5212 == fast_tpoint_collect_result::not_collecting))
5213 {
5214 /* We haven't reported this LWP as stopped yet (otherwise, the
5215 last_status.kind check above would catch it, and we wouldn't
5216 reach here. This LWP may have been momentarily paused by a
5217 stop_all_lwps call while handling for example, another LWP's
5218 step-over. In that case, the pending expected SIGSTOP signal
5219 that was queued at vCont;t handling time will have already
5220 been consumed by wait_for_sigstop, and so we need to requeue
5221 another one here. Note that if the LWP already has a SIGSTOP
5222 pending, this is a no-op. */
5223
5224 if (debug_threads)
5225 debug_printf ("Client wants LWP %ld to stop. "
5226 "Making sure it has a SIGSTOP pending\n",
5227 lwpid_of (thread));
5228
5229 send_sigstop (lwp);
5230 }
5231
5232 if (thread->last_resume_kind == resume_step)
5233 {
5234 if (debug_threads)
5235 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5236 lwpid_of (thread));
5237
5238 /* If resume_step is requested by GDB, install single-step
5239 breakpoints when the thread is about to be actually resumed if
5240 the single-step breakpoints weren't removed. */
5241 if (can_software_single_step ()
5242 && !has_single_step_breakpoints (thread))
5243 install_software_single_step_breakpoints (lwp);
5244
5245 step = maybe_hw_step (thread);
5246 }
5247 else if (lwp->bp_reinsert != 0)
5248 {
5249 if (debug_threads)
5250 debug_printf (" stepping LWP %ld, reinsert set\n",
5251 lwpid_of (thread));
5252
5253 step = maybe_hw_step (thread);
5254 }
5255 else
5256 step = 0;
5257
5258 linux_resume_one_lwp (lwp, step, 0, NULL);
5259 return 0;
5260 }
5261
5262 static int
5263 unsuspend_and_proceed_one_lwp (thread_info *thread, void *except)
5264 {
5265 struct lwp_info *lwp = get_thread_lwp (thread);
5266
5267 if (lwp == except)
5268 return 0;
5269
5270 lwp_suspended_decr (lwp);
5271
5272 return proceed_one_lwp (thread, except);
5273 }
5274
5275 /* When we finish a step-over, set threads running again. If there's
5276 another thread that may need a step-over, now's the time to start
5277 it. Eventually, we'll move all threads past their breakpoints. */
5278
5279 static void
5280 proceed_all_lwps (void)
5281 {
5282 struct thread_info *need_step_over;
5283
5284 /* If there is a thread which would otherwise be resumed, which is
5285 stopped at a breakpoint that needs stepping over, then don't
5286 resume any threads - have it step over the breakpoint with all
5287 other threads stopped, then resume all threads again. */
5288
5289 if (supports_breakpoints ())
5290 {
5291 need_step_over = find_thread (need_step_over_p);
5292
5293 if (need_step_over != NULL)
5294 {
5295 if (debug_threads)
5296 debug_printf ("proceed_all_lwps: found "
5297 "thread %ld needing a step-over\n",
5298 lwpid_of (need_step_over));
5299
5300 start_step_over (get_thread_lwp (need_step_over));
5301 return;
5302 }
5303 }
5304
5305 if (debug_threads)
5306 debug_printf ("Proceeding, no step-over needed\n");
5307
5308 find_inferior (&all_threads, proceed_one_lwp, NULL);
5309 }
5310
5311 /* Stopped LWPs that the client wanted to be running, that don't have
5312 pending statuses, are set to run again, except for EXCEPT, if not
5313 NULL. This undoes a stop_all_lwps call. */
5314
5315 static void
5316 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5317 {
5318 if (debug_threads)
5319 {
5320 debug_enter ();
5321 if (except)
5322 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5323 lwpid_of (get_lwp_thread (except)));
5324 else
5325 debug_printf ("unstopping all lwps\n");
5326 }
5327
5328 if (unsuspend)
5329 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5330 else
5331 find_inferior (&all_threads, proceed_one_lwp, except);
5332
5333 if (debug_threads)
5334 {
5335 debug_printf ("unstop_all_lwps done\n");
5336 debug_exit ();
5337 }
5338 }
5339
5340
5341 #ifdef HAVE_LINUX_REGSETS
5342
5343 #define use_linux_regsets 1
5344
5345 /* Returns true if REGSET has been disabled. */
5346
5347 static int
5348 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5349 {
5350 return (info->disabled_regsets != NULL
5351 && info->disabled_regsets[regset - info->regsets]);
5352 }
5353
5354 /* Disable REGSET. */
5355
5356 static void
5357 disable_regset (struct regsets_info *info, struct regset_info *regset)
5358 {
5359 int dr_offset;
5360
5361 dr_offset = regset - info->regsets;
5362 if (info->disabled_regsets == NULL)
5363 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5364 info->disabled_regsets[dr_offset] = 1;
5365 }
5366
5367 static int
5368 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5369 struct regcache *regcache)
5370 {
5371 struct regset_info *regset;
5372 int saw_general_regs = 0;
5373 int pid;
5374 struct iovec iov;
5375
5376 pid = lwpid_of (current_thread);
5377 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5378 {
5379 void *buf, *data;
5380 int nt_type, res;
5381
5382 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5383 continue;
5384
5385 buf = xmalloc (regset->size);
5386
5387 nt_type = regset->nt_type;
5388 if (nt_type)
5389 {
5390 iov.iov_base = buf;
5391 iov.iov_len = regset->size;
5392 data = (void *) &iov;
5393 }
5394 else
5395 data = buf;
5396
5397 #ifndef __sparc__
5398 res = ptrace (regset->get_request, pid,
5399 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5400 #else
5401 res = ptrace (regset->get_request, pid, data, nt_type);
5402 #endif
5403 if (res < 0)
5404 {
5405 if (errno == EIO)
5406 {
5407 /* If we get EIO on a regset, do not try it again for
5408 this process mode. */
5409 disable_regset (regsets_info, regset);
5410 }
5411 else if (errno == ENODATA)
5412 {
5413 /* ENODATA may be returned if the regset is currently
5414 not "active". This can happen in normal operation,
5415 so suppress the warning in this case. */
5416 }
5417 else if (errno == ESRCH)
5418 {
5419 /* At this point, ESRCH should mean the process is
5420 already gone, in which case we simply ignore attempts
5421 to read its registers. */
5422 }
5423 else
5424 {
5425 char s[256];
5426 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5427 pid);
5428 perror (s);
5429 }
5430 }
5431 else
5432 {
5433 if (regset->type == GENERAL_REGS)
5434 saw_general_regs = 1;
5435 regset->store_function (regcache, buf);
5436 }
5437 free (buf);
5438 }
5439 if (saw_general_regs)
5440 return 0;
5441 else
5442 return 1;
5443 }
5444
5445 static int
5446 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5447 struct regcache *regcache)
5448 {
5449 struct regset_info *regset;
5450 int saw_general_regs = 0;
5451 int pid;
5452 struct iovec iov;
5453
5454 pid = lwpid_of (current_thread);
5455 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5456 {
5457 void *buf, *data;
5458 int nt_type, res;
5459
5460 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5461 || regset->fill_function == NULL)
5462 continue;
5463
5464 buf = xmalloc (regset->size);
5465
5466 /* First fill the buffer with the current register set contents,
5467 in case there are any items in the kernel's regset that are
5468 not in gdbserver's regcache. */
5469
5470 nt_type = regset->nt_type;
5471 if (nt_type)
5472 {
5473 iov.iov_base = buf;
5474 iov.iov_len = regset->size;
5475 data = (void *) &iov;
5476 }
5477 else
5478 data = buf;
5479
5480 #ifndef __sparc__
5481 res = ptrace (regset->get_request, pid,
5482 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5483 #else
5484 res = ptrace (regset->get_request, pid, data, nt_type);
5485 #endif
5486
5487 if (res == 0)
5488 {
5489 /* Then overlay our cached registers on that. */
5490 regset->fill_function (regcache, buf);
5491
5492 /* Only now do we write the register set. */
5493 #ifndef __sparc__
5494 res = ptrace (regset->set_request, pid,
5495 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5496 #else
5497 res = ptrace (regset->set_request, pid, data, nt_type);
5498 #endif
5499 }
5500
5501 if (res < 0)
5502 {
5503 if (errno == EIO)
5504 {
5505 /* If we get EIO on a regset, do not try it again for
5506 this process mode. */
5507 disable_regset (regsets_info, regset);
5508 }
5509 else if (errno == ESRCH)
5510 {
5511 /* At this point, ESRCH should mean the process is
5512 already gone, in which case we simply ignore attempts
5513 to change its registers. See also the related
5514 comment in linux_resume_one_lwp. */
5515 free (buf);
5516 return 0;
5517 }
5518 else
5519 {
5520 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5521 }
5522 }
5523 else if (regset->type == GENERAL_REGS)
5524 saw_general_regs = 1;
5525 free (buf);
5526 }
5527 if (saw_general_regs)
5528 return 0;
5529 else
5530 return 1;
5531 }
5532
5533 #else /* !HAVE_LINUX_REGSETS */
5534
5535 #define use_linux_regsets 0
5536 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5537 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5538
5539 #endif
5540
5541 /* Return 1 if register REGNO is supported by one of the regset ptrace
5542 calls or 0 if it has to be transferred individually. */
5543
5544 static int
5545 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5546 {
5547 unsigned char mask = 1 << (regno % 8);
5548 size_t index = regno / 8;
5549
5550 return (use_linux_regsets
5551 && (regs_info->regset_bitmap == NULL
5552 || (regs_info->regset_bitmap[index] & mask) != 0));
5553 }
5554
5555 #ifdef HAVE_LINUX_USRREGS
5556
5557 static int
5558 register_addr (const struct usrregs_info *usrregs, int regnum)
5559 {
5560 int addr;
5561
5562 if (regnum < 0 || regnum >= usrregs->num_regs)
5563 error ("Invalid register number %d.", regnum);
5564
5565 addr = usrregs->regmap[regnum];
5566
5567 return addr;
5568 }
5569
5570 /* Fetch one register. */
5571 static void
5572 fetch_register (const struct usrregs_info *usrregs,
5573 struct regcache *regcache, int regno)
5574 {
5575 CORE_ADDR regaddr;
5576 int i, size;
5577 char *buf;
5578 int pid;
5579
5580 if (regno >= usrregs->num_regs)
5581 return;
5582 if ((*the_low_target.cannot_fetch_register) (regno))
5583 return;
5584
5585 regaddr = register_addr (usrregs, regno);
5586 if (regaddr == -1)
5587 return;
5588
5589 size = ((register_size (regcache->tdesc, regno)
5590 + sizeof (PTRACE_XFER_TYPE) - 1)
5591 & -sizeof (PTRACE_XFER_TYPE));
5592 buf = (char *) alloca (size);
5593
5594 pid = lwpid_of (current_thread);
5595 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5596 {
5597 errno = 0;
5598 *(PTRACE_XFER_TYPE *) (buf + i) =
5599 ptrace (PTRACE_PEEKUSER, pid,
5600 /* Coerce to a uintptr_t first to avoid potential gcc warning
5601 of coercing an 8 byte integer to a 4 byte pointer. */
5602 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5603 regaddr += sizeof (PTRACE_XFER_TYPE);
5604 if (errno != 0)
5605 error ("reading register %d: %s", regno, strerror (errno));
5606 }
5607
5608 if (the_low_target.supply_ptrace_register)
5609 the_low_target.supply_ptrace_register (regcache, regno, buf);
5610 else
5611 supply_register (regcache, regno, buf);
5612 }
5613
5614 /* Store one register. */
5615 static void
5616 store_register (const struct usrregs_info *usrregs,
5617 struct regcache *regcache, int regno)
5618 {
5619 CORE_ADDR regaddr;
5620 int i, size;
5621 char *buf;
5622 int pid;
5623
5624 if (regno >= usrregs->num_regs)
5625 return;
5626 if ((*the_low_target.cannot_store_register) (regno))
5627 return;
5628
5629 regaddr = register_addr (usrregs, regno);
5630 if (regaddr == -1)
5631 return;
5632
5633 size = ((register_size (regcache->tdesc, regno)
5634 + sizeof (PTRACE_XFER_TYPE) - 1)
5635 & -sizeof (PTRACE_XFER_TYPE));
5636 buf = (char *) alloca (size);
5637 memset (buf, 0, size);
5638
5639 if (the_low_target.collect_ptrace_register)
5640 the_low_target.collect_ptrace_register (regcache, regno, buf);
5641 else
5642 collect_register (regcache, regno, buf);
5643
5644 pid = lwpid_of (current_thread);
5645 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5646 {
5647 errno = 0;
5648 ptrace (PTRACE_POKEUSER, pid,
5649 /* Coerce to a uintptr_t first to avoid potential gcc warning
5650 about coercing an 8 byte integer to a 4 byte pointer. */
5651 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5652 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5653 if (errno != 0)
5654 {
5655 /* At this point, ESRCH should mean the process is
5656 already gone, in which case we simply ignore attempts
5657 to change its registers. See also the related
5658 comment in linux_resume_one_lwp. */
5659 if (errno == ESRCH)
5660 return;
5661
5662 if ((*the_low_target.cannot_store_register) (regno) == 0)
5663 error ("writing register %d: %s", regno, strerror (errno));
5664 }
5665 regaddr += sizeof (PTRACE_XFER_TYPE);
5666 }
5667 }
5668
5669 /* Fetch all registers, or just one, from the child process.
5670 If REGNO is -1, do this for all registers, skipping any that are
5671 assumed to have been retrieved by regsets_fetch_inferior_registers,
5672 unless ALL is non-zero.
5673 Otherwise, REGNO specifies which register (so we can save time). */
5674 static void
5675 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5676 struct regcache *regcache, int regno, int all)
5677 {
5678 struct usrregs_info *usr = regs_info->usrregs;
5679
5680 if (regno == -1)
5681 {
5682 for (regno = 0; regno < usr->num_regs; regno++)
5683 if (all || !linux_register_in_regsets (regs_info, regno))
5684 fetch_register (usr, regcache, regno);
5685 }
5686 else
5687 fetch_register (usr, regcache, regno);
5688 }
5689
5690 /* Store our register values back into the inferior.
5691 If REGNO is -1, do this for all registers, skipping any that are
5692 assumed to have been saved by regsets_store_inferior_registers,
5693 unless ALL is non-zero.
5694 Otherwise, REGNO specifies which register (so we can save time). */
5695 static void
5696 usr_store_inferior_registers (const struct regs_info *regs_info,
5697 struct regcache *regcache, int regno, int all)
5698 {
5699 struct usrregs_info *usr = regs_info->usrregs;
5700
5701 if (regno == -1)
5702 {
5703 for (regno = 0; regno < usr->num_regs; regno++)
5704 if (all || !linux_register_in_regsets (regs_info, regno))
5705 store_register (usr, regcache, regno);
5706 }
5707 else
5708 store_register (usr, regcache, regno);
5709 }
5710
5711 #else /* !HAVE_LINUX_USRREGS */
5712
5713 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5714 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5715
5716 #endif
5717
5718
5719 static void
5720 linux_fetch_registers (struct regcache *regcache, int regno)
5721 {
5722 int use_regsets;
5723 int all = 0;
5724 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5725
5726 if (regno == -1)
5727 {
5728 if (the_low_target.fetch_register != NULL
5729 && regs_info->usrregs != NULL)
5730 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5731 (*the_low_target.fetch_register) (regcache, regno);
5732
5733 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5734 if (regs_info->usrregs != NULL)
5735 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5736 }
5737 else
5738 {
5739 if (the_low_target.fetch_register != NULL
5740 && (*the_low_target.fetch_register) (regcache, regno))
5741 return;
5742
5743 use_regsets = linux_register_in_regsets (regs_info, regno);
5744 if (use_regsets)
5745 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5746 regcache);
5747 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5748 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5749 }
5750 }
5751
5752 static void
5753 linux_store_registers (struct regcache *regcache, int regno)
5754 {
5755 int use_regsets;
5756 int all = 0;
5757 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5758
5759 if (regno == -1)
5760 {
5761 all = regsets_store_inferior_registers (regs_info->regsets_info,
5762 regcache);
5763 if (regs_info->usrregs != NULL)
5764 usr_store_inferior_registers (regs_info, regcache, regno, all);
5765 }
5766 else
5767 {
5768 use_regsets = linux_register_in_regsets (regs_info, regno);
5769 if (use_regsets)
5770 all = regsets_store_inferior_registers (regs_info->regsets_info,
5771 regcache);
5772 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5773 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5774 }
5775 }
5776
5777
5778 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5779 to debugger memory starting at MYADDR. */
5780
5781 static int
5782 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5783 {
5784 int pid = lwpid_of (current_thread);
5785 PTRACE_XFER_TYPE *buffer;
5786 CORE_ADDR addr;
5787 int count;
5788 char filename[64];
5789 int i;
5790 int ret;
5791 int fd;
5792
5793 /* Try using /proc. Don't bother for one word. */
5794 if (len >= 3 * sizeof (long))
5795 {
5796 int bytes;
5797
5798 /* We could keep this file open and cache it - possibly one per
5799 thread. That requires some juggling, but is even faster. */
5800 sprintf (filename, "/proc/%d/mem", pid);
5801 fd = open (filename, O_RDONLY | O_LARGEFILE);
5802 if (fd == -1)
5803 goto no_proc;
5804
5805 /* If pread64 is available, use it. It's faster if the kernel
5806 supports it (only one syscall), and it's 64-bit safe even on
5807 32-bit platforms (for instance, SPARC debugging a SPARC64
5808 application). */
5809 #ifdef HAVE_PREAD64
5810 bytes = pread64 (fd, myaddr, len, memaddr);
5811 #else
5812 bytes = -1;
5813 if (lseek (fd, memaddr, SEEK_SET) != -1)
5814 bytes = read (fd, myaddr, len);
5815 #endif
5816
5817 close (fd);
5818 if (bytes == len)
5819 return 0;
5820
5821 /* Some data was read, we'll try to get the rest with ptrace. */
5822 if (bytes > 0)
5823 {
5824 memaddr += bytes;
5825 myaddr += bytes;
5826 len -= bytes;
5827 }
5828 }
5829
5830 no_proc:
5831 /* Round starting address down to longword boundary. */
5832 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5833 /* Round ending address up; get number of longwords that makes. */
5834 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5835 / sizeof (PTRACE_XFER_TYPE));
5836 /* Allocate buffer of that many longwords. */
5837 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5838
5839 /* Read all the longwords */
5840 errno = 0;
5841 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5842 {
5843 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5844 about coercing an 8 byte integer to a 4 byte pointer. */
5845 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5846 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5847 (PTRACE_TYPE_ARG4) 0);
5848 if (errno)
5849 break;
5850 }
5851 ret = errno;
5852
5853 /* Copy appropriate bytes out of the buffer. */
5854 if (i > 0)
5855 {
5856 i *= sizeof (PTRACE_XFER_TYPE);
5857 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5858 memcpy (myaddr,
5859 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5860 i < len ? i : len);
5861 }
5862
5863 return ret;
5864 }
5865
5866 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5867 memory at MEMADDR. On failure (cannot write to the inferior)
5868 returns the value of errno. Always succeeds if LEN is zero. */
5869
5870 static int
5871 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5872 {
5873 int i;
5874 /* Round starting address down to longword boundary. */
5875 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5876 /* Round ending address up; get number of longwords that makes. */
5877 int count
5878 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5879 / sizeof (PTRACE_XFER_TYPE);
5880
5881 /* Allocate buffer of that many longwords. */
5882 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5883
5884 int pid = lwpid_of (current_thread);
5885
5886 if (len == 0)
5887 {
5888 /* Zero length write always succeeds. */
5889 return 0;
5890 }
5891
5892 if (debug_threads)
5893 {
5894 /* Dump up to four bytes. */
5895 char str[4 * 2 + 1];
5896 char *p = str;
5897 int dump = len < 4 ? len : 4;
5898
5899 for (i = 0; i < dump; i++)
5900 {
5901 sprintf (p, "%02x", myaddr[i]);
5902 p += 2;
5903 }
5904 *p = '\0';
5905
5906 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5907 str, (long) memaddr, pid);
5908 }
5909
5910 /* Fill start and end extra bytes of buffer with existing memory data. */
5911
5912 errno = 0;
5913 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5914 about coercing an 8 byte integer to a 4 byte pointer. */
5915 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5916 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5917 (PTRACE_TYPE_ARG4) 0);
5918 if (errno)
5919 return errno;
5920
5921 if (count > 1)
5922 {
5923 errno = 0;
5924 buffer[count - 1]
5925 = ptrace (PTRACE_PEEKTEXT, pid,
5926 /* Coerce to a uintptr_t first to avoid potential gcc warning
5927 about coercing an 8 byte integer to a 4 byte pointer. */
5928 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5929 * sizeof (PTRACE_XFER_TYPE)),
5930 (PTRACE_TYPE_ARG4) 0);
5931 if (errno)
5932 return errno;
5933 }
5934
5935 /* Copy data to be written over corresponding part of buffer. */
5936
5937 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5938 myaddr, len);
5939
5940 /* Write the entire buffer. */
5941
5942 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5943 {
5944 errno = 0;
5945 ptrace (PTRACE_POKETEXT, pid,
5946 /* Coerce to a uintptr_t first to avoid potential gcc warning
5947 about coercing an 8 byte integer to a 4 byte pointer. */
5948 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5949 (PTRACE_TYPE_ARG4) buffer[i]);
5950 if (errno)
5951 return errno;
5952 }
5953
5954 return 0;
5955 }
5956
5957 static void
5958 linux_look_up_symbols (void)
5959 {
5960 #ifdef USE_THREAD_DB
5961 struct process_info *proc = current_process ();
5962
5963 if (proc->priv->thread_db != NULL)
5964 return;
5965
5966 thread_db_init ();
5967 #endif
5968 }
5969
5970 static void
5971 linux_request_interrupt (void)
5972 {
5973 /* Send a SIGINT to the process group. This acts just like the user
5974 typed a ^C on the controlling terminal. */
5975 kill (-signal_pid, SIGINT);
5976 }
5977
5978 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5979 to debugger memory starting at MYADDR. */
5980
5981 static int
5982 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5983 {
5984 char filename[PATH_MAX];
5985 int fd, n;
5986 int pid = lwpid_of (current_thread);
5987
5988 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5989
5990 fd = open (filename, O_RDONLY);
5991 if (fd < 0)
5992 return -1;
5993
5994 if (offset != (CORE_ADDR) 0
5995 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5996 n = -1;
5997 else
5998 n = read (fd, myaddr, len);
5999
6000 close (fd);
6001
6002 return n;
6003 }
6004
6005 /* These breakpoint and watchpoint related wrapper functions simply
6006 pass on the function call if the target has registered a
6007 corresponding function. */
6008
6009 static int
6010 linux_supports_z_point_type (char z_type)
6011 {
6012 return (the_low_target.supports_z_point_type != NULL
6013 && the_low_target.supports_z_point_type (z_type));
6014 }
6015
6016 static int
6017 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6018 int size, struct raw_breakpoint *bp)
6019 {
6020 if (type == raw_bkpt_type_sw)
6021 return insert_memory_breakpoint (bp);
6022 else if (the_low_target.insert_point != NULL)
6023 return the_low_target.insert_point (type, addr, size, bp);
6024 else
6025 /* Unsupported (see target.h). */
6026 return 1;
6027 }
6028
6029 static int
6030 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6031 int size, struct raw_breakpoint *bp)
6032 {
6033 if (type == raw_bkpt_type_sw)
6034 return remove_memory_breakpoint (bp);
6035 else if (the_low_target.remove_point != NULL)
6036 return the_low_target.remove_point (type, addr, size, bp);
6037 else
6038 /* Unsupported (see target.h). */
6039 return 1;
6040 }
6041
6042 /* Implement the to_stopped_by_sw_breakpoint target_ops
6043 method. */
6044
6045 static int
6046 linux_stopped_by_sw_breakpoint (void)
6047 {
6048 struct lwp_info *lwp = get_thread_lwp (current_thread);
6049
6050 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6051 }
6052
6053 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6054 method. */
6055
6056 static int
6057 linux_supports_stopped_by_sw_breakpoint (void)
6058 {
6059 return USE_SIGTRAP_SIGINFO;
6060 }
6061
6062 /* Implement the to_stopped_by_hw_breakpoint target_ops
6063 method. */
6064
6065 static int
6066 linux_stopped_by_hw_breakpoint (void)
6067 {
6068 struct lwp_info *lwp = get_thread_lwp (current_thread);
6069
6070 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6071 }
6072
6073 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6074 method. */
6075
6076 static int
6077 linux_supports_stopped_by_hw_breakpoint (void)
6078 {
6079 return USE_SIGTRAP_SIGINFO;
6080 }
6081
6082 /* Implement the supports_hardware_single_step target_ops method. */
6083
6084 static int
6085 linux_supports_hardware_single_step (void)
6086 {
6087 return can_hardware_single_step ();
6088 }
6089
6090 static int
6091 linux_supports_software_single_step (void)
6092 {
6093 return can_software_single_step ();
6094 }
6095
6096 static int
6097 linux_stopped_by_watchpoint (void)
6098 {
6099 struct lwp_info *lwp = get_thread_lwp (current_thread);
6100
6101 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6102 }
6103
6104 static CORE_ADDR
6105 linux_stopped_data_address (void)
6106 {
6107 struct lwp_info *lwp = get_thread_lwp (current_thread);
6108
6109 return lwp->stopped_data_address;
6110 }
6111
6112 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6113 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6114 && defined(PT_TEXT_END_ADDR)
6115
6116 /* This is only used for targets that define PT_TEXT_ADDR,
6117 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6118 the target has different ways of acquiring this information, like
6119 loadmaps. */
6120
6121 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6122 to tell gdb about. */
6123
6124 static int
6125 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6126 {
6127 unsigned long text, text_end, data;
6128 int pid = lwpid_of (current_thread);
6129
6130 errno = 0;
6131
6132 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6133 (PTRACE_TYPE_ARG4) 0);
6134 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6135 (PTRACE_TYPE_ARG4) 0);
6136 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6137 (PTRACE_TYPE_ARG4) 0);
6138
6139 if (errno == 0)
6140 {
6141 /* Both text and data offsets produced at compile-time (and so
6142 used by gdb) are relative to the beginning of the program,
6143 with the data segment immediately following the text segment.
6144 However, the actual runtime layout in memory may put the data
6145 somewhere else, so when we send gdb a data base-address, we
6146 use the real data base address and subtract the compile-time
6147 data base-address from it (which is just the length of the
6148 text segment). BSS immediately follows data in both
6149 cases. */
6150 *text_p = text;
6151 *data_p = data - (text_end - text);
6152
6153 return 1;
6154 }
6155 return 0;
6156 }
6157 #endif
6158
6159 static int
6160 linux_qxfer_osdata (const char *annex,
6161 unsigned char *readbuf, unsigned const char *writebuf,
6162 CORE_ADDR offset, int len)
6163 {
6164 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6165 }
6166
6167 /* Convert a native/host siginfo object, into/from the siginfo in the
6168 layout of the inferiors' architecture. */
6169
6170 static void
6171 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6172 {
6173 int done = 0;
6174
6175 if (the_low_target.siginfo_fixup != NULL)
6176 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6177
6178 /* If there was no callback, or the callback didn't do anything,
6179 then just do a straight memcpy. */
6180 if (!done)
6181 {
6182 if (direction == 1)
6183 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6184 else
6185 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6186 }
6187 }
6188
6189 static int
6190 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6191 unsigned const char *writebuf, CORE_ADDR offset, int len)
6192 {
6193 int pid;
6194 siginfo_t siginfo;
6195 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6196
6197 if (current_thread == NULL)
6198 return -1;
6199
6200 pid = lwpid_of (current_thread);
6201
6202 if (debug_threads)
6203 debug_printf ("%s siginfo for lwp %d.\n",
6204 readbuf != NULL ? "Reading" : "Writing",
6205 pid);
6206
6207 if (offset >= sizeof (siginfo))
6208 return -1;
6209
6210 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6211 return -1;
6212
6213 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6214 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6215 inferior with a 64-bit GDBSERVER should look the same as debugging it
6216 with a 32-bit GDBSERVER, we need to convert it. */
6217 siginfo_fixup (&siginfo, inf_siginfo, 0);
6218
6219 if (offset + len > sizeof (siginfo))
6220 len = sizeof (siginfo) - offset;
6221
6222 if (readbuf != NULL)
6223 memcpy (readbuf, inf_siginfo + offset, len);
6224 else
6225 {
6226 memcpy (inf_siginfo + offset, writebuf, len);
6227
6228 /* Convert back to ptrace layout before flushing it out. */
6229 siginfo_fixup (&siginfo, inf_siginfo, 1);
6230
6231 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6232 return -1;
6233 }
6234
6235 return len;
6236 }
6237
6238 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6239 so we notice when children change state; as the handler for the
6240 sigsuspend in my_waitpid. */
6241
6242 static void
6243 sigchld_handler (int signo)
6244 {
6245 int old_errno = errno;
6246
6247 if (debug_threads)
6248 {
6249 do
6250 {
6251 /* fprintf is not async-signal-safe, so call write
6252 directly. */
6253 if (write (2, "sigchld_handler\n",
6254 sizeof ("sigchld_handler\n") - 1) < 0)
6255 break; /* just ignore */
6256 } while (0);
6257 }
6258
6259 if (target_is_async_p ())
6260 async_file_mark (); /* trigger a linux_wait */
6261
6262 errno = old_errno;
6263 }
6264
6265 static int
6266 linux_supports_non_stop (void)
6267 {
6268 return 1;
6269 }
6270
6271 static int
6272 linux_async (int enable)
6273 {
6274 int previous = target_is_async_p ();
6275
6276 if (debug_threads)
6277 debug_printf ("linux_async (%d), previous=%d\n",
6278 enable, previous);
6279
6280 if (previous != enable)
6281 {
6282 sigset_t mask;
6283 sigemptyset (&mask);
6284 sigaddset (&mask, SIGCHLD);
6285
6286 sigprocmask (SIG_BLOCK, &mask, NULL);
6287
6288 if (enable)
6289 {
6290 if (pipe (linux_event_pipe) == -1)
6291 {
6292 linux_event_pipe[0] = -1;
6293 linux_event_pipe[1] = -1;
6294 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6295
6296 warning ("creating event pipe failed.");
6297 return previous;
6298 }
6299
6300 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6301 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6302
6303 /* Register the event loop handler. */
6304 add_file_handler (linux_event_pipe[0],
6305 handle_target_event, NULL);
6306
6307 /* Always trigger a linux_wait. */
6308 async_file_mark ();
6309 }
6310 else
6311 {
6312 delete_file_handler (linux_event_pipe[0]);
6313
6314 close (linux_event_pipe[0]);
6315 close (linux_event_pipe[1]);
6316 linux_event_pipe[0] = -1;
6317 linux_event_pipe[1] = -1;
6318 }
6319
6320 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6321 }
6322
6323 return previous;
6324 }
6325
6326 static int
6327 linux_start_non_stop (int nonstop)
6328 {
6329 /* Register or unregister from event-loop accordingly. */
6330 linux_async (nonstop);
6331
6332 if (target_is_async_p () != (nonstop != 0))
6333 return -1;
6334
6335 return 0;
6336 }
6337
6338 static int
6339 linux_supports_multi_process (void)
6340 {
6341 return 1;
6342 }
6343
6344 /* Check if fork events are supported. */
6345
6346 static int
6347 linux_supports_fork_events (void)
6348 {
6349 return linux_supports_tracefork ();
6350 }
6351
6352 /* Check if vfork events are supported. */
6353
6354 static int
6355 linux_supports_vfork_events (void)
6356 {
6357 return linux_supports_tracefork ();
6358 }
6359
6360 /* Check if exec events are supported. */
6361
6362 static int
6363 linux_supports_exec_events (void)
6364 {
6365 return linux_supports_traceexec ();
6366 }
6367
6368 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6369 ptrace flags for all inferiors. This is in case the new GDB connection
6370 doesn't support the same set of events that the previous one did. */
6371
6372 static void
6373 linux_handle_new_gdb_connection (void)
6374 {
6375 /* Request that all the lwps reset their ptrace options. */
6376 for_each_thread ([] (thread_info *thread)
6377 {
6378 struct lwp_info *lwp = get_thread_lwp (thread);
6379
6380 if (!lwp->stopped)
6381 {
6382 /* Stop the lwp so we can modify its ptrace options. */
6383 lwp->must_set_ptrace_flags = 1;
6384 linux_stop_lwp (lwp);
6385 }
6386 else
6387 {
6388 /* Already stopped; go ahead and set the ptrace options. */
6389 struct process_info *proc = find_process_pid (pid_of (thread));
6390 int options = linux_low_ptrace_options (proc->attached);
6391
6392 linux_enable_event_reporting (lwpid_of (thread), options);
6393 lwp->must_set_ptrace_flags = 0;
6394 }
6395 });
6396 }
6397
6398 static int
6399 linux_supports_disable_randomization (void)
6400 {
6401 #ifdef HAVE_PERSONALITY
6402 return 1;
6403 #else
6404 return 0;
6405 #endif
6406 }
6407
6408 static int
6409 linux_supports_agent (void)
6410 {
6411 return 1;
6412 }
6413
6414 static int
6415 linux_supports_range_stepping (void)
6416 {
6417 if (can_software_single_step ())
6418 return 1;
6419 if (*the_low_target.supports_range_stepping == NULL)
6420 return 0;
6421
6422 return (*the_low_target.supports_range_stepping) ();
6423 }
6424
6425 /* Enumerate spufs IDs for process PID. */
6426 static int
6427 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6428 {
6429 int pos = 0;
6430 int written = 0;
6431 char path[128];
6432 DIR *dir;
6433 struct dirent *entry;
6434
6435 sprintf (path, "/proc/%ld/fd", pid);
6436 dir = opendir (path);
6437 if (!dir)
6438 return -1;
6439
6440 rewinddir (dir);
6441 while ((entry = readdir (dir)) != NULL)
6442 {
6443 struct stat st;
6444 struct statfs stfs;
6445 int fd;
6446
6447 fd = atoi (entry->d_name);
6448 if (!fd)
6449 continue;
6450
6451 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6452 if (stat (path, &st) != 0)
6453 continue;
6454 if (!S_ISDIR (st.st_mode))
6455 continue;
6456
6457 if (statfs (path, &stfs) != 0)
6458 continue;
6459 if (stfs.f_type != SPUFS_MAGIC)
6460 continue;
6461
6462 if (pos >= offset && pos + 4 <= offset + len)
6463 {
6464 *(unsigned int *)(buf + pos - offset) = fd;
6465 written += 4;
6466 }
6467 pos += 4;
6468 }
6469
6470 closedir (dir);
6471 return written;
6472 }
6473
6474 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6475 object type, using the /proc file system. */
6476 static int
6477 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6478 unsigned const char *writebuf,
6479 CORE_ADDR offset, int len)
6480 {
6481 long pid = lwpid_of (current_thread);
6482 char buf[128];
6483 int fd = 0;
6484 int ret = 0;
6485
6486 if (!writebuf && !readbuf)
6487 return -1;
6488
6489 if (!*annex)
6490 {
6491 if (!readbuf)
6492 return -1;
6493 else
6494 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6495 }
6496
6497 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6498 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6499 if (fd <= 0)
6500 return -1;
6501
6502 if (offset != 0
6503 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6504 {
6505 close (fd);
6506 return 0;
6507 }
6508
6509 if (writebuf)
6510 ret = write (fd, writebuf, (size_t) len);
6511 else
6512 ret = read (fd, readbuf, (size_t) len);
6513
6514 close (fd);
6515 return ret;
6516 }
6517
6518 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6519 struct target_loadseg
6520 {
6521 /* Core address to which the segment is mapped. */
6522 Elf32_Addr addr;
6523 /* VMA recorded in the program header. */
6524 Elf32_Addr p_vaddr;
6525 /* Size of this segment in memory. */
6526 Elf32_Word p_memsz;
6527 };
6528
6529 # if defined PT_GETDSBT
6530 struct target_loadmap
6531 {
6532 /* Protocol version number, must be zero. */
6533 Elf32_Word version;
6534 /* Pointer to the DSBT table, its size, and the DSBT index. */
6535 unsigned *dsbt_table;
6536 unsigned dsbt_size, dsbt_index;
6537 /* Number of segments in this map. */
6538 Elf32_Word nsegs;
6539 /* The actual memory map. */
6540 struct target_loadseg segs[/*nsegs*/];
6541 };
6542 # define LINUX_LOADMAP PT_GETDSBT
6543 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6544 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6545 # else
6546 struct target_loadmap
6547 {
6548 /* Protocol version number, must be zero. */
6549 Elf32_Half version;
6550 /* Number of segments in this map. */
6551 Elf32_Half nsegs;
6552 /* The actual memory map. */
6553 struct target_loadseg segs[/*nsegs*/];
6554 };
6555 # define LINUX_LOADMAP PTRACE_GETFDPIC
6556 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6557 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6558 # endif
6559
6560 static int
6561 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6562 unsigned char *myaddr, unsigned int len)
6563 {
6564 int pid = lwpid_of (current_thread);
6565 int addr = -1;
6566 struct target_loadmap *data = NULL;
6567 unsigned int actual_length, copy_length;
6568
6569 if (strcmp (annex, "exec") == 0)
6570 addr = (int) LINUX_LOADMAP_EXEC;
6571 else if (strcmp (annex, "interp") == 0)
6572 addr = (int) LINUX_LOADMAP_INTERP;
6573 else
6574 return -1;
6575
6576 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6577 return -1;
6578
6579 if (data == NULL)
6580 return -1;
6581
6582 actual_length = sizeof (struct target_loadmap)
6583 + sizeof (struct target_loadseg) * data->nsegs;
6584
6585 if (offset < 0 || offset > actual_length)
6586 return -1;
6587
6588 copy_length = actual_length - offset < len ? actual_length - offset : len;
6589 memcpy (myaddr, (char *) data + offset, copy_length);
6590 return copy_length;
6591 }
6592 #else
6593 # define linux_read_loadmap NULL
6594 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6595
6596 static void
6597 linux_process_qsupported (char **features, int count)
6598 {
6599 if (the_low_target.process_qsupported != NULL)
6600 the_low_target.process_qsupported (features, count);
6601 }
6602
6603 static int
6604 linux_supports_catch_syscall (void)
6605 {
6606 return (the_low_target.get_syscall_trapinfo != NULL
6607 && linux_supports_tracesysgood ());
6608 }
6609
6610 static int
6611 linux_get_ipa_tdesc_idx (void)
6612 {
6613 if (the_low_target.get_ipa_tdesc_idx == NULL)
6614 return 0;
6615
6616 return (*the_low_target.get_ipa_tdesc_idx) ();
6617 }
6618
6619 static int
6620 linux_supports_tracepoints (void)
6621 {
6622 if (*the_low_target.supports_tracepoints == NULL)
6623 return 0;
6624
6625 return (*the_low_target.supports_tracepoints) ();
6626 }
6627
6628 static CORE_ADDR
6629 linux_read_pc (struct regcache *regcache)
6630 {
6631 if (the_low_target.get_pc == NULL)
6632 return 0;
6633
6634 return (*the_low_target.get_pc) (regcache);
6635 }
6636
6637 static void
6638 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6639 {
6640 gdb_assert (the_low_target.set_pc != NULL);
6641
6642 (*the_low_target.set_pc) (regcache, pc);
6643 }
6644
6645 static int
6646 linux_thread_stopped (struct thread_info *thread)
6647 {
6648 return get_thread_lwp (thread)->stopped;
6649 }
6650
6651 /* This exposes stop-all-threads functionality to other modules. */
6652
6653 static void
6654 linux_pause_all (int freeze)
6655 {
6656 stop_all_lwps (freeze, NULL);
6657 }
6658
6659 /* This exposes unstop-all-threads functionality to other gdbserver
6660 modules. */
6661
6662 static void
6663 linux_unpause_all (int unfreeze)
6664 {
6665 unstop_all_lwps (unfreeze, NULL);
6666 }
6667
6668 static int
6669 linux_prepare_to_access_memory (void)
6670 {
6671 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6672 running LWP. */
6673 if (non_stop)
6674 linux_pause_all (1);
6675 return 0;
6676 }
6677
6678 static void
6679 linux_done_accessing_memory (void)
6680 {
6681 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6682 running LWP. */
6683 if (non_stop)
6684 linux_unpause_all (1);
6685 }
6686
6687 static int
6688 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6689 CORE_ADDR collector,
6690 CORE_ADDR lockaddr,
6691 ULONGEST orig_size,
6692 CORE_ADDR *jump_entry,
6693 CORE_ADDR *trampoline,
6694 ULONGEST *trampoline_size,
6695 unsigned char *jjump_pad_insn,
6696 ULONGEST *jjump_pad_insn_size,
6697 CORE_ADDR *adjusted_insn_addr,
6698 CORE_ADDR *adjusted_insn_addr_end,
6699 char *err)
6700 {
6701 return (*the_low_target.install_fast_tracepoint_jump_pad)
6702 (tpoint, tpaddr, collector, lockaddr, orig_size,
6703 jump_entry, trampoline, trampoline_size,
6704 jjump_pad_insn, jjump_pad_insn_size,
6705 adjusted_insn_addr, adjusted_insn_addr_end,
6706 err);
6707 }
6708
6709 static struct emit_ops *
6710 linux_emit_ops (void)
6711 {
6712 if (the_low_target.emit_ops != NULL)
6713 return (*the_low_target.emit_ops) ();
6714 else
6715 return NULL;
6716 }
6717
6718 static int
6719 linux_get_min_fast_tracepoint_insn_len (void)
6720 {
6721 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6722 }
6723
6724 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6725
6726 static int
6727 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6728 CORE_ADDR *phdr_memaddr, int *num_phdr)
6729 {
6730 char filename[PATH_MAX];
6731 int fd;
6732 const int auxv_size = is_elf64
6733 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6734 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6735
6736 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6737
6738 fd = open (filename, O_RDONLY);
6739 if (fd < 0)
6740 return 1;
6741
6742 *phdr_memaddr = 0;
6743 *num_phdr = 0;
6744 while (read (fd, buf, auxv_size) == auxv_size
6745 && (*phdr_memaddr == 0 || *num_phdr == 0))
6746 {
6747 if (is_elf64)
6748 {
6749 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6750
6751 switch (aux->a_type)
6752 {
6753 case AT_PHDR:
6754 *phdr_memaddr = aux->a_un.a_val;
6755 break;
6756 case AT_PHNUM:
6757 *num_phdr = aux->a_un.a_val;
6758 break;
6759 }
6760 }
6761 else
6762 {
6763 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6764
6765 switch (aux->a_type)
6766 {
6767 case AT_PHDR:
6768 *phdr_memaddr = aux->a_un.a_val;
6769 break;
6770 case AT_PHNUM:
6771 *num_phdr = aux->a_un.a_val;
6772 break;
6773 }
6774 }
6775 }
6776
6777 close (fd);
6778
6779 if (*phdr_memaddr == 0 || *num_phdr == 0)
6780 {
6781 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6782 "phdr_memaddr = %ld, phdr_num = %d",
6783 (long) *phdr_memaddr, *num_phdr);
6784 return 2;
6785 }
6786
6787 return 0;
6788 }
6789
6790 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6791
6792 static CORE_ADDR
6793 get_dynamic (const int pid, const int is_elf64)
6794 {
6795 CORE_ADDR phdr_memaddr, relocation;
6796 int num_phdr, i;
6797 unsigned char *phdr_buf;
6798 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6799
6800 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6801 return 0;
6802
6803 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6804 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6805
6806 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6807 return 0;
6808
6809 /* Compute relocation: it is expected to be 0 for "regular" executables,
6810 non-zero for PIE ones. */
6811 relocation = -1;
6812 for (i = 0; relocation == -1 && i < num_phdr; i++)
6813 if (is_elf64)
6814 {
6815 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6816
6817 if (p->p_type == PT_PHDR)
6818 relocation = phdr_memaddr - p->p_vaddr;
6819 }
6820 else
6821 {
6822 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6823
6824 if (p->p_type == PT_PHDR)
6825 relocation = phdr_memaddr - p->p_vaddr;
6826 }
6827
6828 if (relocation == -1)
6829 {
6830 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6831 any real world executables, including PIE executables, have always
6832 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6833 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6834 or present DT_DEBUG anyway (fpc binaries are statically linked).
6835
6836 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6837
6838 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6839
6840 return 0;
6841 }
6842
6843 for (i = 0; i < num_phdr; i++)
6844 {
6845 if (is_elf64)
6846 {
6847 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6848
6849 if (p->p_type == PT_DYNAMIC)
6850 return p->p_vaddr + relocation;
6851 }
6852 else
6853 {
6854 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6855
6856 if (p->p_type == PT_DYNAMIC)
6857 return p->p_vaddr + relocation;
6858 }
6859 }
6860
6861 return 0;
6862 }
6863
6864 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6865 can be 0 if the inferior does not yet have the library list initialized.
6866 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6867 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6868
6869 static CORE_ADDR
6870 get_r_debug (const int pid, const int is_elf64)
6871 {
6872 CORE_ADDR dynamic_memaddr;
6873 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6874 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6875 CORE_ADDR map = -1;
6876
6877 dynamic_memaddr = get_dynamic (pid, is_elf64);
6878 if (dynamic_memaddr == 0)
6879 return map;
6880
6881 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6882 {
6883 if (is_elf64)
6884 {
6885 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6886 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6887 union
6888 {
6889 Elf64_Xword map;
6890 unsigned char buf[sizeof (Elf64_Xword)];
6891 }
6892 rld_map;
6893 #endif
6894 #ifdef DT_MIPS_RLD_MAP
6895 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6896 {
6897 if (linux_read_memory (dyn->d_un.d_val,
6898 rld_map.buf, sizeof (rld_map.buf)) == 0)
6899 return rld_map.map;
6900 else
6901 break;
6902 }
6903 #endif /* DT_MIPS_RLD_MAP */
6904 #ifdef DT_MIPS_RLD_MAP_REL
6905 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6906 {
6907 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6908 rld_map.buf, sizeof (rld_map.buf)) == 0)
6909 return rld_map.map;
6910 else
6911 break;
6912 }
6913 #endif /* DT_MIPS_RLD_MAP_REL */
6914
6915 if (dyn->d_tag == DT_DEBUG && map == -1)
6916 map = dyn->d_un.d_val;
6917
6918 if (dyn->d_tag == DT_NULL)
6919 break;
6920 }
6921 else
6922 {
6923 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6924 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6925 union
6926 {
6927 Elf32_Word map;
6928 unsigned char buf[sizeof (Elf32_Word)];
6929 }
6930 rld_map;
6931 #endif
6932 #ifdef DT_MIPS_RLD_MAP
6933 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6934 {
6935 if (linux_read_memory (dyn->d_un.d_val,
6936 rld_map.buf, sizeof (rld_map.buf)) == 0)
6937 return rld_map.map;
6938 else
6939 break;
6940 }
6941 #endif /* DT_MIPS_RLD_MAP */
6942 #ifdef DT_MIPS_RLD_MAP_REL
6943 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6944 {
6945 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6946 rld_map.buf, sizeof (rld_map.buf)) == 0)
6947 return rld_map.map;
6948 else
6949 break;
6950 }
6951 #endif /* DT_MIPS_RLD_MAP_REL */
6952
6953 if (dyn->d_tag == DT_DEBUG && map == -1)
6954 map = dyn->d_un.d_val;
6955
6956 if (dyn->d_tag == DT_NULL)
6957 break;
6958 }
6959
6960 dynamic_memaddr += dyn_size;
6961 }
6962
6963 return map;
6964 }
6965
6966 /* Read one pointer from MEMADDR in the inferior. */
6967
6968 static int
6969 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6970 {
6971 int ret;
6972
6973 /* Go through a union so this works on either big or little endian
6974 hosts, when the inferior's pointer size is smaller than the size
6975 of CORE_ADDR. It is assumed the inferior's endianness is the
6976 same of the superior's. */
6977 union
6978 {
6979 CORE_ADDR core_addr;
6980 unsigned int ui;
6981 unsigned char uc;
6982 } addr;
6983
6984 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6985 if (ret == 0)
6986 {
6987 if (ptr_size == sizeof (CORE_ADDR))
6988 *ptr = addr.core_addr;
6989 else if (ptr_size == sizeof (unsigned int))
6990 *ptr = addr.ui;
6991 else
6992 gdb_assert_not_reached ("unhandled pointer size");
6993 }
6994 return ret;
6995 }
6996
6997 struct link_map_offsets
6998 {
6999 /* Offset and size of r_debug.r_version. */
7000 int r_version_offset;
7001
7002 /* Offset and size of r_debug.r_map. */
7003 int r_map_offset;
7004
7005 /* Offset to l_addr field in struct link_map. */
7006 int l_addr_offset;
7007
7008 /* Offset to l_name field in struct link_map. */
7009 int l_name_offset;
7010
7011 /* Offset to l_ld field in struct link_map. */
7012 int l_ld_offset;
7013
7014 /* Offset to l_next field in struct link_map. */
7015 int l_next_offset;
7016
7017 /* Offset to l_prev field in struct link_map. */
7018 int l_prev_offset;
7019 };
7020
7021 /* Construct qXfer:libraries-svr4:read reply. */
7022
7023 static int
7024 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7025 unsigned const char *writebuf,
7026 CORE_ADDR offset, int len)
7027 {
7028 char *document;
7029 unsigned document_len;
7030 struct process_info_private *const priv = current_process ()->priv;
7031 char filename[PATH_MAX];
7032 int pid, is_elf64;
7033
7034 static const struct link_map_offsets lmo_32bit_offsets =
7035 {
7036 0, /* r_version offset. */
7037 4, /* r_debug.r_map offset. */
7038 0, /* l_addr offset in link_map. */
7039 4, /* l_name offset in link_map. */
7040 8, /* l_ld offset in link_map. */
7041 12, /* l_next offset in link_map. */
7042 16 /* l_prev offset in link_map. */
7043 };
7044
7045 static const struct link_map_offsets lmo_64bit_offsets =
7046 {
7047 0, /* r_version offset. */
7048 8, /* r_debug.r_map offset. */
7049 0, /* l_addr offset in link_map. */
7050 8, /* l_name offset in link_map. */
7051 16, /* l_ld offset in link_map. */
7052 24, /* l_next offset in link_map. */
7053 32 /* l_prev offset in link_map. */
7054 };
7055 const struct link_map_offsets *lmo;
7056 unsigned int machine;
7057 int ptr_size;
7058 CORE_ADDR lm_addr = 0, lm_prev = 0;
7059 int allocated = 1024;
7060 char *p;
7061 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7062 int header_done = 0;
7063
7064 if (writebuf != NULL)
7065 return -2;
7066 if (readbuf == NULL)
7067 return -1;
7068
7069 pid = lwpid_of (current_thread);
7070 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7071 is_elf64 = elf_64_file_p (filename, &machine);
7072 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7073 ptr_size = is_elf64 ? 8 : 4;
7074
7075 while (annex[0] != '\0')
7076 {
7077 const char *sep;
7078 CORE_ADDR *addrp;
7079 int len;
7080
7081 sep = strchr (annex, '=');
7082 if (sep == NULL)
7083 break;
7084
7085 len = sep - annex;
7086 if (len == 5 && startswith (annex, "start"))
7087 addrp = &lm_addr;
7088 else if (len == 4 && startswith (annex, "prev"))
7089 addrp = &lm_prev;
7090 else
7091 {
7092 annex = strchr (sep, ';');
7093 if (annex == NULL)
7094 break;
7095 annex++;
7096 continue;
7097 }
7098
7099 annex = decode_address_to_semicolon (addrp, sep + 1);
7100 }
7101
7102 if (lm_addr == 0)
7103 {
7104 int r_version = 0;
7105
7106 if (priv->r_debug == 0)
7107 priv->r_debug = get_r_debug (pid, is_elf64);
7108
7109 /* We failed to find DT_DEBUG. Such situation will not change
7110 for this inferior - do not retry it. Report it to GDB as
7111 E01, see for the reasons at the GDB solib-svr4.c side. */
7112 if (priv->r_debug == (CORE_ADDR) -1)
7113 return -1;
7114
7115 if (priv->r_debug != 0)
7116 {
7117 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7118 (unsigned char *) &r_version,
7119 sizeof (r_version)) != 0
7120 || r_version != 1)
7121 {
7122 warning ("unexpected r_debug version %d", r_version);
7123 }
7124 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7125 &lm_addr, ptr_size) != 0)
7126 {
7127 warning ("unable to read r_map from 0x%lx",
7128 (long) priv->r_debug + lmo->r_map_offset);
7129 }
7130 }
7131 }
7132
7133 document = (char *) xmalloc (allocated);
7134 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7135 p = document + strlen (document);
7136
7137 while (lm_addr
7138 && read_one_ptr (lm_addr + lmo->l_name_offset,
7139 &l_name, ptr_size) == 0
7140 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7141 &l_addr, ptr_size) == 0
7142 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7143 &l_ld, ptr_size) == 0
7144 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7145 &l_prev, ptr_size) == 0
7146 && read_one_ptr (lm_addr + lmo->l_next_offset,
7147 &l_next, ptr_size) == 0)
7148 {
7149 unsigned char libname[PATH_MAX];
7150
7151 if (lm_prev != l_prev)
7152 {
7153 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7154 (long) lm_prev, (long) l_prev);
7155 break;
7156 }
7157
7158 /* Ignore the first entry even if it has valid name as the first entry
7159 corresponds to the main executable. The first entry should not be
7160 skipped if the dynamic loader was loaded late by a static executable
7161 (see solib-svr4.c parameter ignore_first). But in such case the main
7162 executable does not have PT_DYNAMIC present and this function already
7163 exited above due to failed get_r_debug. */
7164 if (lm_prev == 0)
7165 {
7166 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7167 p = p + strlen (p);
7168 }
7169 else
7170 {
7171 /* Not checking for error because reading may stop before
7172 we've got PATH_MAX worth of characters. */
7173 libname[0] = '\0';
7174 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7175 libname[sizeof (libname) - 1] = '\0';
7176 if (libname[0] != '\0')
7177 {
7178 /* 6x the size for xml_escape_text below. */
7179 size_t len = 6 * strlen ((char *) libname);
7180
7181 if (!header_done)
7182 {
7183 /* Terminate `<library-list-svr4'. */
7184 *p++ = '>';
7185 header_done = 1;
7186 }
7187
7188 while (allocated < p - document + len + 200)
7189 {
7190 /* Expand to guarantee sufficient storage. */
7191 uintptr_t document_len = p - document;
7192
7193 document = (char *) xrealloc (document, 2 * allocated);
7194 allocated *= 2;
7195 p = document + document_len;
7196 }
7197
7198 std::string name = xml_escape_text ((char *) libname);
7199 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7200 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7201 name.c_str (), (unsigned long) lm_addr,
7202 (unsigned long) l_addr, (unsigned long) l_ld);
7203 }
7204 }
7205
7206 lm_prev = lm_addr;
7207 lm_addr = l_next;
7208 }
7209
7210 if (!header_done)
7211 {
7212 /* Empty list; terminate `<library-list-svr4'. */
7213 strcpy (p, "/>");
7214 }
7215 else
7216 strcpy (p, "</library-list-svr4>");
7217
7218 document_len = strlen (document);
7219 if (offset < document_len)
7220 document_len -= offset;
7221 else
7222 document_len = 0;
7223 if (len > document_len)
7224 len = document_len;
7225
7226 memcpy (readbuf, document + offset, len);
7227 xfree (document);
7228
7229 return len;
7230 }
7231
7232 #ifdef HAVE_LINUX_BTRACE
7233
7234 /* See to_disable_btrace target method. */
7235
7236 static int
7237 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7238 {
7239 enum btrace_error err;
7240
7241 err = linux_disable_btrace (tinfo);
7242 return (err == BTRACE_ERR_NONE ? 0 : -1);
7243 }
7244
7245 /* Encode an Intel Processor Trace configuration. */
7246
7247 static void
7248 linux_low_encode_pt_config (struct buffer *buffer,
7249 const struct btrace_data_pt_config *config)
7250 {
7251 buffer_grow_str (buffer, "<pt-config>\n");
7252
7253 switch (config->cpu.vendor)
7254 {
7255 case CV_INTEL:
7256 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7257 "model=\"%u\" stepping=\"%u\"/>\n",
7258 config->cpu.family, config->cpu.model,
7259 config->cpu.stepping);
7260 break;
7261
7262 default:
7263 break;
7264 }
7265
7266 buffer_grow_str (buffer, "</pt-config>\n");
7267 }
7268
7269 /* Encode a raw buffer. */
7270
7271 static void
7272 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7273 unsigned int size)
7274 {
7275 if (size == 0)
7276 return;
7277
7278 /* We use hex encoding - see common/rsp-low.h. */
7279 buffer_grow_str (buffer, "<raw>\n");
7280
7281 while (size-- > 0)
7282 {
7283 char elem[2];
7284
7285 elem[0] = tohex ((*data >> 4) & 0xf);
7286 elem[1] = tohex (*data++ & 0xf);
7287
7288 buffer_grow (buffer, elem, 2);
7289 }
7290
7291 buffer_grow_str (buffer, "</raw>\n");
7292 }
7293
7294 /* See to_read_btrace target method. */
7295
7296 static int
7297 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7298 enum btrace_read_type type)
7299 {
7300 struct btrace_data btrace;
7301 struct btrace_block *block;
7302 enum btrace_error err;
7303 int i;
7304
7305 btrace_data_init (&btrace);
7306
7307 err = linux_read_btrace (&btrace, tinfo, type);
7308 if (err != BTRACE_ERR_NONE)
7309 {
7310 if (err == BTRACE_ERR_OVERFLOW)
7311 buffer_grow_str0 (buffer, "E.Overflow.");
7312 else
7313 buffer_grow_str0 (buffer, "E.Generic Error.");
7314
7315 goto err;
7316 }
7317
7318 switch (btrace.format)
7319 {
7320 case BTRACE_FORMAT_NONE:
7321 buffer_grow_str0 (buffer, "E.No Trace.");
7322 goto err;
7323
7324 case BTRACE_FORMAT_BTS:
7325 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7326 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7327
7328 for (i = 0;
7329 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7330 i++)
7331 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7332 paddress (block->begin), paddress (block->end));
7333
7334 buffer_grow_str0 (buffer, "</btrace>\n");
7335 break;
7336
7337 case BTRACE_FORMAT_PT:
7338 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7339 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7340 buffer_grow_str (buffer, "<pt>\n");
7341
7342 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7343
7344 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7345 btrace.variant.pt.size);
7346
7347 buffer_grow_str (buffer, "</pt>\n");
7348 buffer_grow_str0 (buffer, "</btrace>\n");
7349 break;
7350
7351 default:
7352 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7353 goto err;
7354 }
7355
7356 btrace_data_fini (&btrace);
7357 return 0;
7358
7359 err:
7360 btrace_data_fini (&btrace);
7361 return -1;
7362 }
7363
7364 /* See to_btrace_conf target method. */
7365
7366 static int
7367 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7368 struct buffer *buffer)
7369 {
7370 const struct btrace_config *conf;
7371
7372 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7373 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7374
7375 conf = linux_btrace_conf (tinfo);
7376 if (conf != NULL)
7377 {
7378 switch (conf->format)
7379 {
7380 case BTRACE_FORMAT_NONE:
7381 break;
7382
7383 case BTRACE_FORMAT_BTS:
7384 buffer_xml_printf (buffer, "<bts");
7385 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7386 buffer_xml_printf (buffer, " />\n");
7387 break;
7388
7389 case BTRACE_FORMAT_PT:
7390 buffer_xml_printf (buffer, "<pt");
7391 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7392 buffer_xml_printf (buffer, "/>\n");
7393 break;
7394 }
7395 }
7396
7397 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7398 return 0;
7399 }
7400 #endif /* HAVE_LINUX_BTRACE */
7401
7402 /* See nat/linux-nat.h. */
7403
7404 ptid_t
7405 current_lwp_ptid (void)
7406 {
7407 return ptid_of (current_thread);
7408 }
7409
7410 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7411
7412 static int
7413 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7414 {
7415 if (the_low_target.breakpoint_kind_from_pc != NULL)
7416 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7417 else
7418 return default_breakpoint_kind_from_pc (pcptr);
7419 }
7420
7421 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7422
7423 static const gdb_byte *
7424 linux_sw_breakpoint_from_kind (int kind, int *size)
7425 {
7426 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7427
7428 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7429 }
7430
7431 /* Implementation of the target_ops method
7432 "breakpoint_kind_from_current_state". */
7433
7434 static int
7435 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7436 {
7437 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7438 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7439 else
7440 return linux_breakpoint_kind_from_pc (pcptr);
7441 }
7442
7443 /* Default implementation of linux_target_ops method "set_pc" for
7444 32-bit pc register which is literally named "pc". */
7445
7446 void
7447 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7448 {
7449 uint32_t newpc = pc;
7450
7451 supply_register_by_name (regcache, "pc", &newpc);
7452 }
7453
7454 /* Default implementation of linux_target_ops method "get_pc" for
7455 32-bit pc register which is literally named "pc". */
7456
7457 CORE_ADDR
7458 linux_get_pc_32bit (struct regcache *regcache)
7459 {
7460 uint32_t pc;
7461
7462 collect_register_by_name (regcache, "pc", &pc);
7463 if (debug_threads)
7464 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7465 return pc;
7466 }
7467
7468 /* Default implementation of linux_target_ops method "set_pc" for
7469 64-bit pc register which is literally named "pc". */
7470
7471 void
7472 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7473 {
7474 uint64_t newpc = pc;
7475
7476 supply_register_by_name (regcache, "pc", &newpc);
7477 }
7478
7479 /* Default implementation of linux_target_ops method "get_pc" for
7480 64-bit pc register which is literally named "pc". */
7481
7482 CORE_ADDR
7483 linux_get_pc_64bit (struct regcache *regcache)
7484 {
7485 uint64_t pc;
7486
7487 collect_register_by_name (regcache, "pc", &pc);
7488 if (debug_threads)
7489 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7490 return pc;
7491 }
7492
7493
7494 static struct target_ops linux_target_ops = {
7495 linux_create_inferior,
7496 linux_post_create_inferior,
7497 linux_attach,
7498 linux_kill,
7499 linux_detach,
7500 linux_mourn,
7501 linux_join,
7502 linux_thread_alive,
7503 linux_resume,
7504 linux_wait,
7505 linux_fetch_registers,
7506 linux_store_registers,
7507 linux_prepare_to_access_memory,
7508 linux_done_accessing_memory,
7509 linux_read_memory,
7510 linux_write_memory,
7511 linux_look_up_symbols,
7512 linux_request_interrupt,
7513 linux_read_auxv,
7514 linux_supports_z_point_type,
7515 linux_insert_point,
7516 linux_remove_point,
7517 linux_stopped_by_sw_breakpoint,
7518 linux_supports_stopped_by_sw_breakpoint,
7519 linux_stopped_by_hw_breakpoint,
7520 linux_supports_stopped_by_hw_breakpoint,
7521 linux_supports_hardware_single_step,
7522 linux_stopped_by_watchpoint,
7523 linux_stopped_data_address,
7524 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7525 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7526 && defined(PT_TEXT_END_ADDR)
7527 linux_read_offsets,
7528 #else
7529 NULL,
7530 #endif
7531 #ifdef USE_THREAD_DB
7532 thread_db_get_tls_address,
7533 #else
7534 NULL,
7535 #endif
7536 linux_qxfer_spu,
7537 hostio_last_error_from_errno,
7538 linux_qxfer_osdata,
7539 linux_xfer_siginfo,
7540 linux_supports_non_stop,
7541 linux_async,
7542 linux_start_non_stop,
7543 linux_supports_multi_process,
7544 linux_supports_fork_events,
7545 linux_supports_vfork_events,
7546 linux_supports_exec_events,
7547 linux_handle_new_gdb_connection,
7548 #ifdef USE_THREAD_DB
7549 thread_db_handle_monitor_command,
7550 #else
7551 NULL,
7552 #endif
7553 linux_common_core_of_thread,
7554 linux_read_loadmap,
7555 linux_process_qsupported,
7556 linux_supports_tracepoints,
7557 linux_read_pc,
7558 linux_write_pc,
7559 linux_thread_stopped,
7560 NULL,
7561 linux_pause_all,
7562 linux_unpause_all,
7563 linux_stabilize_threads,
7564 linux_install_fast_tracepoint_jump_pad,
7565 linux_emit_ops,
7566 linux_supports_disable_randomization,
7567 linux_get_min_fast_tracepoint_insn_len,
7568 linux_qxfer_libraries_svr4,
7569 linux_supports_agent,
7570 #ifdef HAVE_LINUX_BTRACE
7571 linux_supports_btrace,
7572 linux_enable_btrace,
7573 linux_low_disable_btrace,
7574 linux_low_read_btrace,
7575 linux_low_btrace_conf,
7576 #else
7577 NULL,
7578 NULL,
7579 NULL,
7580 NULL,
7581 NULL,
7582 #endif
7583 linux_supports_range_stepping,
7584 linux_proc_pid_to_exec_file,
7585 linux_mntns_open_cloexec,
7586 linux_mntns_unlink,
7587 linux_mntns_readlink,
7588 linux_breakpoint_kind_from_pc,
7589 linux_sw_breakpoint_from_kind,
7590 linux_proc_tid_get_name,
7591 linux_breakpoint_kind_from_current_state,
7592 linux_supports_software_single_step,
7593 linux_supports_catch_syscall,
7594 linux_get_ipa_tdesc_idx,
7595 #if USE_THREAD_DB
7596 thread_db_thread_handle,
7597 #else
7598 NULL,
7599 #endif
7600 };
7601
7602 #ifdef HAVE_LINUX_REGSETS
7603 void
7604 initialize_regsets_info (struct regsets_info *info)
7605 {
7606 for (info->num_regsets = 0;
7607 info->regsets[info->num_regsets].size >= 0;
7608 info->num_regsets++)
7609 ;
7610 }
7611 #endif
7612
7613 void
7614 initialize_low (void)
7615 {
7616 struct sigaction sigchld_action;
7617
7618 memset (&sigchld_action, 0, sizeof (sigchld_action));
7619 set_target_ops (&linux_target_ops);
7620
7621 linux_ptrace_init_warnings ();
7622
7623 sigchld_action.sa_handler = sigchld_handler;
7624 sigemptyset (&sigchld_action.sa_mask);
7625 sigchld_action.sa_flags = SA_RESTART;
7626 sigaction (SIGCHLD, &sigchld_action, NULL);
7627
7628 initialize_low_arch ();
7629
7630 linux_check_ptrace_features ();
7631 }
This page took 0.184804 seconds and 5 git commands to generate.