gdbserver/linux-low: turn 'fetch_register' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void linux_resume_one_lwp (struct lwp_info *lwp,
271 int step, int signal, siginfo_t *info);
272 static void unsuspend_all_lwps (struct lwp_info *except);
273 static struct lwp_info *add_lwp (ptid_t ptid);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static int linux_low_ptrace_options (int attached);
280 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
281 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
282
283 /* When the event-loop is doing a step-over, this points at the thread
284 being stepped. */
285 ptid_t step_over_bkpt;
286
287 /* True if the low target can hardware single-step. */
288
289 static int
290 can_hardware_single_step (void)
291 {
292 if (the_low_target.supports_hardware_single_step != NULL)
293 return the_low_target.supports_hardware_single_step ();
294 else
295 return 0;
296 }
297
298 /* True if the low target can software single-step. Such targets
299 implement the GET_NEXT_PCS callback. */
300
301 static int
302 can_software_single_step (void)
303 {
304 return (the_low_target.get_next_pcs != NULL);
305 }
306
307 /* True if the low target supports memory breakpoints. If so, we'll
308 have a GET_PC implementation. */
309
310 static int
311 supports_breakpoints (void)
312 {
313 return (the_low_target.get_pc != NULL);
314 }
315
316 /* Returns true if this target can support fast tracepoints. This
317 does not mean that the in-process agent has been loaded in the
318 inferior. */
319
320 static int
321 supports_fast_tracepoints (void)
322 {
323 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
324 }
325
326 /* True if LWP is stopped in its stepping range. */
327
328 static int
329 lwp_in_step_range (struct lwp_info *lwp)
330 {
331 CORE_ADDR pc = lwp->stop_pc;
332
333 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
334 }
335
336 struct pending_signals
337 {
338 int signal;
339 siginfo_t info;
340 struct pending_signals *prev;
341 };
342
343 /* The read/write ends of the pipe registered as waitable file in the
344 event loop. */
345 static int linux_event_pipe[2] = { -1, -1 };
346
347 /* True if we're currently in async mode. */
348 #define target_is_async_p() (linux_event_pipe[0] != -1)
349
350 static void send_sigstop (struct lwp_info *lwp);
351
352 /* Return non-zero if HEADER is a 64-bit ELF file. */
353
354 static int
355 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
356 {
357 if (header->e_ident[EI_MAG0] == ELFMAG0
358 && header->e_ident[EI_MAG1] == ELFMAG1
359 && header->e_ident[EI_MAG2] == ELFMAG2
360 && header->e_ident[EI_MAG3] == ELFMAG3)
361 {
362 *machine = header->e_machine;
363 return header->e_ident[EI_CLASS] == ELFCLASS64;
364
365 }
366 *machine = EM_NONE;
367 return -1;
368 }
369
370 /* Return non-zero if FILE is a 64-bit ELF file,
371 zero if the file is not a 64-bit ELF file,
372 and -1 if the file is not accessible or doesn't exist. */
373
374 static int
375 elf_64_file_p (const char *file, unsigned int *machine)
376 {
377 Elf64_Ehdr header;
378 int fd;
379
380 fd = open (file, O_RDONLY);
381 if (fd < 0)
382 return -1;
383
384 if (read (fd, &header, sizeof (header)) != sizeof (header))
385 {
386 close (fd);
387 return 0;
388 }
389 close (fd);
390
391 return elf_64_header_p (&header, machine);
392 }
393
394 /* Accepts an integer PID; Returns true if the executable PID is
395 running is a 64-bit ELF file.. */
396
397 int
398 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
399 {
400 char file[PATH_MAX];
401
402 sprintf (file, "/proc/%d/exe", pid);
403 return elf_64_file_p (file, machine);
404 }
405
406 static void
407 delete_lwp (struct lwp_info *lwp)
408 {
409 struct thread_info *thr = get_lwp_thread (lwp);
410
411 if (debug_threads)
412 debug_printf ("deleting %ld\n", lwpid_of (thr));
413
414 remove_thread (thr);
415
416 if (the_low_target.delete_thread != NULL)
417 the_low_target.delete_thread (lwp->arch_private);
418 else
419 gdb_assert (lwp->arch_private == NULL);
420
421 free (lwp);
422 }
423
424 /* Add a process to the common process list, and set its private
425 data. */
426
427 static struct process_info *
428 linux_add_process (int pid, int attached)
429 {
430 struct process_info *proc;
431
432 proc = add_process (pid, attached);
433 proc->priv = XCNEW (struct process_info_private);
434
435 if (the_low_target.new_process != NULL)
436 proc->priv->arch_private = the_low_target.new_process ();
437
438 return proc;
439 }
440
441 static CORE_ADDR get_pc (struct lwp_info *lwp);
442
443 void
444 linux_process_target::arch_setup_thread (thread_info *thread)
445 {
446 struct thread_info *saved_thread;
447
448 saved_thread = current_thread;
449 current_thread = thread;
450
451 low_arch_setup ();
452
453 current_thread = saved_thread;
454 }
455
456 int
457 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
458 int wstat)
459 {
460 client_state &cs = get_client_state ();
461 struct lwp_info *event_lwp = *orig_event_lwp;
462 int event = linux_ptrace_get_extended_event (wstat);
463 struct thread_info *event_thr = get_lwp_thread (event_lwp);
464 struct lwp_info *new_lwp;
465
466 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
467
468 /* All extended events we currently use are mid-syscall. Only
469 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
470 you have to be using PTRACE_SEIZE to get that. */
471 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
472
473 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
474 || (event == PTRACE_EVENT_CLONE))
475 {
476 ptid_t ptid;
477 unsigned long new_pid;
478 int ret, status;
479
480 /* Get the pid of the new lwp. */
481 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
482 &new_pid);
483
484 /* If we haven't already seen the new PID stop, wait for it now. */
485 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
486 {
487 /* The new child has a pending SIGSTOP. We can't affect it until it
488 hits the SIGSTOP, but we're already attached. */
489
490 ret = my_waitpid (new_pid, &status, __WALL);
491
492 if (ret == -1)
493 perror_with_name ("waiting for new child");
494 else if (ret != new_pid)
495 warning ("wait returned unexpected PID %d", ret);
496 else if (!WIFSTOPPED (status))
497 warning ("wait returned unexpected status 0x%x", status);
498 }
499
500 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
501 {
502 struct process_info *parent_proc;
503 struct process_info *child_proc;
504 struct lwp_info *child_lwp;
505 struct thread_info *child_thr;
506 struct target_desc *tdesc;
507
508 ptid = ptid_t (new_pid, new_pid, 0);
509
510 if (debug_threads)
511 {
512 debug_printf ("HEW: Got fork event from LWP %ld, "
513 "new child is %d\n",
514 ptid_of (event_thr).lwp (),
515 ptid.pid ());
516 }
517
518 /* Add the new process to the tables and clone the breakpoint
519 lists of the parent. We need to do this even if the new process
520 will be detached, since we will need the process object and the
521 breakpoints to remove any breakpoints from memory when we
522 detach, and the client side will access registers. */
523 child_proc = linux_add_process (new_pid, 0);
524 gdb_assert (child_proc != NULL);
525 child_lwp = add_lwp (ptid);
526 gdb_assert (child_lwp != NULL);
527 child_lwp->stopped = 1;
528 child_lwp->must_set_ptrace_flags = 1;
529 child_lwp->status_pending_p = 0;
530 child_thr = get_lwp_thread (child_lwp);
531 child_thr->last_resume_kind = resume_stop;
532 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
533
534 /* If we're suspending all threads, leave this one suspended
535 too. If the fork/clone parent is stepping over a breakpoint,
536 all other threads have been suspended already. Leave the
537 child suspended too. */
538 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
539 || event_lwp->bp_reinsert != 0)
540 {
541 if (debug_threads)
542 debug_printf ("HEW: leaving child suspended\n");
543 child_lwp->suspended = 1;
544 }
545
546 parent_proc = get_thread_process (event_thr);
547 child_proc->attached = parent_proc->attached;
548
549 if (event_lwp->bp_reinsert != 0
550 && can_software_single_step ()
551 && event == PTRACE_EVENT_VFORK)
552 {
553 /* If we leave single-step breakpoints there, child will
554 hit it, so uninsert single-step breakpoints from parent
555 (and child). Once vfork child is done, reinsert
556 them back to parent. */
557 uninsert_single_step_breakpoints (event_thr);
558 }
559
560 clone_all_breakpoints (child_thr, event_thr);
561
562 tdesc = allocate_target_description ();
563 copy_target_description (tdesc, parent_proc->tdesc);
564 child_proc->tdesc = tdesc;
565
566 /* Clone arch-specific process data. */
567 if (the_low_target.new_fork != NULL)
568 the_low_target.new_fork (parent_proc, child_proc);
569
570 /* Save fork info in the parent thread. */
571 if (event == PTRACE_EVENT_FORK)
572 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
573 else if (event == PTRACE_EVENT_VFORK)
574 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
575
576 event_lwp->waitstatus.value.related_pid = ptid;
577
578 /* The status_pending field contains bits denoting the
579 extended event, so when the pending event is handled,
580 the handler will look at lwp->waitstatus. */
581 event_lwp->status_pending_p = 1;
582 event_lwp->status_pending = wstat;
583
584 /* Link the threads until the parent event is passed on to
585 higher layers. */
586 event_lwp->fork_relative = child_lwp;
587 child_lwp->fork_relative = event_lwp;
588
589 /* If the parent thread is doing step-over with single-step
590 breakpoints, the list of single-step breakpoints are cloned
591 from the parent's. Remove them from the child process.
592 In case of vfork, we'll reinsert them back once vforked
593 child is done. */
594 if (event_lwp->bp_reinsert != 0
595 && can_software_single_step ())
596 {
597 /* The child process is forked and stopped, so it is safe
598 to access its memory without stopping all other threads
599 from other processes. */
600 delete_single_step_breakpoints (child_thr);
601
602 gdb_assert (has_single_step_breakpoints (event_thr));
603 gdb_assert (!has_single_step_breakpoints (child_thr));
604 }
605
606 /* Report the event. */
607 return 0;
608 }
609
610 if (debug_threads)
611 debug_printf ("HEW: Got clone event "
612 "from LWP %ld, new child is LWP %ld\n",
613 lwpid_of (event_thr), new_pid);
614
615 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
616 new_lwp = add_lwp (ptid);
617
618 /* Either we're going to immediately resume the new thread
619 or leave it stopped. linux_resume_one_lwp is a nop if it
620 thinks the thread is currently running, so set this first
621 before calling linux_resume_one_lwp. */
622 new_lwp->stopped = 1;
623
624 /* If we're suspending all threads, leave this one suspended
625 too. If the fork/clone parent is stepping over a breakpoint,
626 all other threads have been suspended already. Leave the
627 child suspended too. */
628 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
629 || event_lwp->bp_reinsert != 0)
630 new_lwp->suspended = 1;
631
632 /* Normally we will get the pending SIGSTOP. But in some cases
633 we might get another signal delivered to the group first.
634 If we do get another signal, be sure not to lose it. */
635 if (WSTOPSIG (status) != SIGSTOP)
636 {
637 new_lwp->stop_expected = 1;
638 new_lwp->status_pending_p = 1;
639 new_lwp->status_pending = status;
640 }
641 else if (cs.report_thread_events)
642 {
643 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
644 new_lwp->status_pending_p = 1;
645 new_lwp->status_pending = status;
646 }
647
648 #ifdef USE_THREAD_DB
649 thread_db_notice_clone (event_thr, ptid);
650 #endif
651
652 /* Don't report the event. */
653 return 1;
654 }
655 else if (event == PTRACE_EVENT_VFORK_DONE)
656 {
657 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
658
659 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
660 {
661 reinsert_single_step_breakpoints (event_thr);
662
663 gdb_assert (has_single_step_breakpoints (event_thr));
664 }
665
666 /* Report the event. */
667 return 0;
668 }
669 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
670 {
671 struct process_info *proc;
672 std::vector<int> syscalls_to_catch;
673 ptid_t event_ptid;
674 pid_t event_pid;
675
676 if (debug_threads)
677 {
678 debug_printf ("HEW: Got exec event from LWP %ld\n",
679 lwpid_of (event_thr));
680 }
681
682 /* Get the event ptid. */
683 event_ptid = ptid_of (event_thr);
684 event_pid = event_ptid.pid ();
685
686 /* Save the syscall list from the execing process. */
687 proc = get_thread_process (event_thr);
688 syscalls_to_catch = std::move (proc->syscalls_to_catch);
689
690 /* Delete the execing process and all its threads. */
691 mourn (proc);
692 current_thread = NULL;
693
694 /* Create a new process/lwp/thread. */
695 proc = linux_add_process (event_pid, 0);
696 event_lwp = add_lwp (event_ptid);
697 event_thr = get_lwp_thread (event_lwp);
698 gdb_assert (current_thread == event_thr);
699 arch_setup_thread (event_thr);
700
701 /* Set the event status. */
702 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
703 event_lwp->waitstatus.value.execd_pathname
704 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
705
706 /* Mark the exec status as pending. */
707 event_lwp->stopped = 1;
708 event_lwp->status_pending_p = 1;
709 event_lwp->status_pending = wstat;
710 event_thr->last_resume_kind = resume_continue;
711 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
712
713 /* Update syscall state in the new lwp, effectively mid-syscall too. */
714 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
715
716 /* Restore the list to catch. Don't rely on the client, which is free
717 to avoid sending a new list when the architecture doesn't change.
718 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
719 proc->syscalls_to_catch = std::move (syscalls_to_catch);
720
721 /* Report the event. */
722 *orig_event_lwp = event_lwp;
723 return 0;
724 }
725
726 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
727 }
728
729 /* Return the PC as read from the regcache of LWP, without any
730 adjustment. */
731
732 static CORE_ADDR
733 get_pc (struct lwp_info *lwp)
734 {
735 struct thread_info *saved_thread;
736 struct regcache *regcache;
737 CORE_ADDR pc;
738
739 if (the_low_target.get_pc == NULL)
740 return 0;
741
742 saved_thread = current_thread;
743 current_thread = get_lwp_thread (lwp);
744
745 regcache = get_thread_regcache (current_thread, 1);
746 pc = (*the_low_target.get_pc) (regcache);
747
748 if (debug_threads)
749 debug_printf ("pc is 0x%lx\n", (long) pc);
750
751 current_thread = saved_thread;
752 return pc;
753 }
754
755 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
756 Fill *SYSNO with the syscall nr trapped. */
757
758 static void
759 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
760 {
761 struct thread_info *saved_thread;
762 struct regcache *regcache;
763
764 if (the_low_target.get_syscall_trapinfo == NULL)
765 {
766 /* If we cannot get the syscall trapinfo, report an unknown
767 system call number. */
768 *sysno = UNKNOWN_SYSCALL;
769 return;
770 }
771
772 saved_thread = current_thread;
773 current_thread = get_lwp_thread (lwp);
774
775 regcache = get_thread_regcache (current_thread, 1);
776 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
777
778 if (debug_threads)
779 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
780
781 current_thread = saved_thread;
782 }
783
784 static int check_stopped_by_watchpoint (struct lwp_info *child);
785
786 /* Called when the LWP stopped for a signal/trap. If it stopped for a
787 trap check what caused it (breakpoint, watchpoint, trace, etc.),
788 and save the result in the LWP's stop_reason field. If it stopped
789 for a breakpoint, decrement the PC if necessary on the lwp's
790 architecture. Returns true if we now have the LWP's stop PC. */
791
792 static int
793 save_stop_reason (struct lwp_info *lwp)
794 {
795 CORE_ADDR pc;
796 CORE_ADDR sw_breakpoint_pc;
797 struct thread_info *saved_thread;
798 #if USE_SIGTRAP_SIGINFO
799 siginfo_t siginfo;
800 #endif
801
802 if (the_low_target.get_pc == NULL)
803 return 0;
804
805 pc = get_pc (lwp);
806 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
807
808 /* breakpoint_at reads from the current thread. */
809 saved_thread = current_thread;
810 current_thread = get_lwp_thread (lwp);
811
812 #if USE_SIGTRAP_SIGINFO
813 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
814 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
815 {
816 if (siginfo.si_signo == SIGTRAP)
817 {
818 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
819 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
820 {
821 /* The si_code is ambiguous on this arch -- check debug
822 registers. */
823 if (!check_stopped_by_watchpoint (lwp))
824 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
825 }
826 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
827 {
828 /* If we determine the LWP stopped for a SW breakpoint,
829 trust it. Particularly don't check watchpoint
830 registers, because at least on s390, we'd find
831 stopped-by-watchpoint as long as there's a watchpoint
832 set. */
833 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
834 }
835 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
836 {
837 /* This can indicate either a hardware breakpoint or
838 hardware watchpoint. Check debug registers. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
841 }
842 else if (siginfo.si_code == TRAP_TRACE)
843 {
844 /* We may have single stepped an instruction that
845 triggered a watchpoint. In that case, on some
846 architectures (such as x86), instead of TRAP_HWBKPT,
847 si_code indicates TRAP_TRACE, and we need to check
848 the debug registers separately. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
851 }
852 }
853 }
854 #else
855 /* We may have just stepped a breakpoint instruction. E.g., in
856 non-stop mode, GDB first tells the thread A to step a range, and
857 then the user inserts a breakpoint inside the range. In that
858 case we need to report the breakpoint PC. */
859 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
860 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
861 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
862
863 if (hardware_breakpoint_inserted_here (pc))
864 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
865
866 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
867 check_stopped_by_watchpoint (lwp);
868 #endif
869
870 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
871 {
872 if (debug_threads)
873 {
874 struct thread_info *thr = get_lwp_thread (lwp);
875
876 debug_printf ("CSBB: %s stopped by software breakpoint\n",
877 target_pid_to_str (ptid_of (thr)));
878 }
879
880 /* Back up the PC if necessary. */
881 if (pc != sw_breakpoint_pc)
882 {
883 struct regcache *regcache
884 = get_thread_regcache (current_thread, 1);
885 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
886 }
887
888 /* Update this so we record the correct stop PC below. */
889 pc = sw_breakpoint_pc;
890 }
891 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
892 {
893 if (debug_threads)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
896
897 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
898 target_pid_to_str (ptid_of (thr)));
899 }
900 }
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
906
907 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
910 }
911 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
912 {
913 if (debug_threads)
914 {
915 struct thread_info *thr = get_lwp_thread (lwp);
916
917 debug_printf ("CSBB: %s stopped by trace\n",
918 target_pid_to_str (ptid_of (thr)));
919 }
920 }
921
922 lwp->stop_pc = pc;
923 current_thread = saved_thread;
924 return 1;
925 }
926
927 static struct lwp_info *
928 add_lwp (ptid_t ptid)
929 {
930 struct lwp_info *lwp;
931
932 lwp = XCNEW (struct lwp_info);
933
934 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
935
936 lwp->thread = add_thread (ptid, lwp);
937
938 if (the_low_target.new_thread != NULL)
939 the_low_target.new_thread (lwp);
940
941 return lwp;
942 }
943
944 /* Callback to be used when calling fork_inferior, responsible for
945 actually initiating the tracing of the inferior. */
946
947 static void
948 linux_ptrace_fun ()
949 {
950 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
951 (PTRACE_TYPE_ARG4) 0) < 0)
952 trace_start_error_with_name ("ptrace");
953
954 if (setpgid (0, 0) < 0)
955 trace_start_error_with_name ("setpgid");
956
957 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
958 stdout to stderr so that inferior i/o doesn't corrupt the connection.
959 Also, redirect stdin to /dev/null. */
960 if (remote_connection_is_stdio ())
961 {
962 if (close (0) < 0)
963 trace_start_error_with_name ("close");
964 if (open ("/dev/null", O_RDONLY) < 0)
965 trace_start_error_with_name ("open");
966 if (dup2 (2, 1) < 0)
967 trace_start_error_with_name ("dup2");
968 if (write (2, "stdin/stdout redirected\n",
969 sizeof ("stdin/stdout redirected\n") - 1) < 0)
970 {
971 /* Errors ignored. */;
972 }
973 }
974 }
975
976 /* Start an inferior process and returns its pid.
977 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
978 are its arguments. */
979
980 int
981 linux_process_target::create_inferior (const char *program,
982 const std::vector<char *> &program_args)
983 {
984 client_state &cs = get_client_state ();
985 struct lwp_info *new_lwp;
986 int pid;
987 ptid_t ptid;
988
989 {
990 maybe_disable_address_space_randomization restore_personality
991 (cs.disable_randomization);
992 std::string str_program_args = stringify_argv (program_args);
993
994 pid = fork_inferior (program,
995 str_program_args.c_str (),
996 get_environ ()->envp (), linux_ptrace_fun,
997 NULL, NULL, NULL, NULL);
998 }
999
1000 linux_add_process (pid, 0);
1001
1002 ptid = ptid_t (pid, pid, 0);
1003 new_lwp = add_lwp (ptid);
1004 new_lwp->must_set_ptrace_flags = 1;
1005
1006 post_fork_inferior (pid, program);
1007
1008 return pid;
1009 }
1010
1011 /* Implement the post_create_inferior target_ops method. */
1012
1013 void
1014 linux_process_target::post_create_inferior ()
1015 {
1016 struct lwp_info *lwp = get_thread_lwp (current_thread);
1017
1018 low_arch_setup ();
1019
1020 if (lwp->must_set_ptrace_flags)
1021 {
1022 struct process_info *proc = current_process ();
1023 int options = linux_low_ptrace_options (proc->attached);
1024
1025 linux_enable_event_reporting (lwpid_of (current_thread), options);
1026 lwp->must_set_ptrace_flags = 0;
1027 }
1028 }
1029
1030 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1031 error. */
1032
1033 int
1034 linux_attach_lwp (ptid_t ptid)
1035 {
1036 struct lwp_info *new_lwp;
1037 int lwpid = ptid.lwp ();
1038
1039 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1040 != 0)
1041 return errno;
1042
1043 new_lwp = add_lwp (ptid);
1044
1045 /* We need to wait for SIGSTOP before being able to make the next
1046 ptrace call on this LWP. */
1047 new_lwp->must_set_ptrace_flags = 1;
1048
1049 if (linux_proc_pid_is_stopped (lwpid))
1050 {
1051 if (debug_threads)
1052 debug_printf ("Attached to a stopped process\n");
1053
1054 /* The process is definitely stopped. It is in a job control
1055 stop, unless the kernel predates the TASK_STOPPED /
1056 TASK_TRACED distinction, in which case it might be in a
1057 ptrace stop. Make sure it is in a ptrace stop; from there we
1058 can kill it, signal it, et cetera.
1059
1060 First make sure there is a pending SIGSTOP. Since we are
1061 already attached, the process can not transition from stopped
1062 to running without a PTRACE_CONT; so we know this signal will
1063 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1064 probably already in the queue (unless this kernel is old
1065 enough to use TASK_STOPPED for ptrace stops); but since
1066 SIGSTOP is not an RT signal, it can only be queued once. */
1067 kill_lwp (lwpid, SIGSTOP);
1068
1069 /* Finally, resume the stopped process. This will deliver the
1070 SIGSTOP (or a higher priority signal, just like normal
1071 PTRACE_ATTACH), which we'll catch later on. */
1072 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1073 }
1074
1075 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1076 brings it to a halt.
1077
1078 There are several cases to consider here:
1079
1080 1) gdbserver has already attached to the process and is being notified
1081 of a new thread that is being created.
1082 In this case we should ignore that SIGSTOP and resume the
1083 process. This is handled below by setting stop_expected = 1,
1084 and the fact that add_thread sets last_resume_kind ==
1085 resume_continue.
1086
1087 2) This is the first thread (the process thread), and we're attaching
1088 to it via attach_inferior.
1089 In this case we want the process thread to stop.
1090 This is handled by having linux_attach set last_resume_kind ==
1091 resume_stop after we return.
1092
1093 If the pid we are attaching to is also the tgid, we attach to and
1094 stop all the existing threads. Otherwise, we attach to pid and
1095 ignore any other threads in the same group as this pid.
1096
1097 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1098 existing threads.
1099 In this case we want the thread to stop.
1100 FIXME: This case is currently not properly handled.
1101 We should wait for the SIGSTOP but don't. Things work apparently
1102 because enough time passes between when we ptrace (ATTACH) and when
1103 gdb makes the next ptrace call on the thread.
1104
1105 On the other hand, if we are currently trying to stop all threads, we
1106 should treat the new thread as if we had sent it a SIGSTOP. This works
1107 because we are guaranteed that the add_lwp call above added us to the
1108 end of the list, and so the new thread has not yet reached
1109 wait_for_sigstop (but will). */
1110 new_lwp->stop_expected = 1;
1111
1112 return 0;
1113 }
1114
1115 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1116 already attached. Returns true if a new LWP is found, false
1117 otherwise. */
1118
1119 static int
1120 attach_proc_task_lwp_callback (ptid_t ptid)
1121 {
1122 /* Is this a new thread? */
1123 if (find_thread_ptid (ptid) == NULL)
1124 {
1125 int lwpid = ptid.lwp ();
1126 int err;
1127
1128 if (debug_threads)
1129 debug_printf ("Found new lwp %d\n", lwpid);
1130
1131 err = linux_attach_lwp (ptid);
1132
1133 /* Be quiet if we simply raced with the thread exiting. EPERM
1134 is returned if the thread's task still exists, and is marked
1135 as exited or zombie, as well as other conditions, so in that
1136 case, confirm the status in /proc/PID/status. */
1137 if (err == ESRCH
1138 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1139 {
1140 if (debug_threads)
1141 {
1142 debug_printf ("Cannot attach to lwp %d: "
1143 "thread is gone (%d: %s)\n",
1144 lwpid, err, safe_strerror (err));
1145 }
1146 }
1147 else if (err != 0)
1148 {
1149 std::string reason
1150 = linux_ptrace_attach_fail_reason_string (ptid, err);
1151
1152 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1153 }
1154
1155 return 1;
1156 }
1157 return 0;
1158 }
1159
1160 static void async_file_mark (void);
1161
1162 /* Attach to PID. If PID is the tgid, attach to it and all
1163 of its threads. */
1164
1165 int
1166 linux_process_target::attach (unsigned long pid)
1167 {
1168 struct process_info *proc;
1169 struct thread_info *initial_thread;
1170 ptid_t ptid = ptid_t (pid, pid, 0);
1171 int err;
1172
1173 proc = linux_add_process (pid, 1);
1174
1175 /* Attach to PID. We will check for other threads
1176 soon. */
1177 err = linux_attach_lwp (ptid);
1178 if (err != 0)
1179 {
1180 remove_process (proc);
1181
1182 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1183 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1184 }
1185
1186 /* Don't ignore the initial SIGSTOP if we just attached to this
1187 process. It will be collected by wait shortly. */
1188 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1189 initial_thread->last_resume_kind = resume_stop;
1190
1191 /* We must attach to every LWP. If /proc is mounted, use that to
1192 find them now. On the one hand, the inferior may be using raw
1193 clone instead of using pthreads. On the other hand, even if it
1194 is using pthreads, GDB may not be connected yet (thread_db needs
1195 to do symbol lookups, through qSymbol). Also, thread_db walks
1196 structures in the inferior's address space to find the list of
1197 threads/LWPs, and those structures may well be corrupted. Note
1198 that once thread_db is loaded, we'll still use it to list threads
1199 and associate pthread info with each LWP. */
1200 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1201
1202 /* GDB will shortly read the xml target description for this
1203 process, to figure out the process' architecture. But the target
1204 description is only filled in when the first process/thread in
1205 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1206 that now, otherwise, if GDB is fast enough, it could read the
1207 target description _before_ that initial stop. */
1208 if (non_stop)
1209 {
1210 struct lwp_info *lwp;
1211 int wstat, lwpid;
1212 ptid_t pid_ptid = ptid_t (pid);
1213
1214 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1215 gdb_assert (lwpid > 0);
1216
1217 lwp = find_lwp_pid (ptid_t (lwpid));
1218
1219 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1220 {
1221 lwp->status_pending_p = 1;
1222 lwp->status_pending = wstat;
1223 }
1224
1225 initial_thread->last_resume_kind = resume_continue;
1226
1227 async_file_mark ();
1228
1229 gdb_assert (proc->tdesc != NULL);
1230 }
1231
1232 return 0;
1233 }
1234
1235 static int
1236 last_thread_of_process_p (int pid)
1237 {
1238 bool seen_one = false;
1239
1240 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1241 {
1242 if (!seen_one)
1243 {
1244 /* This is the first thread of this process we see. */
1245 seen_one = true;
1246 return false;
1247 }
1248 else
1249 {
1250 /* This is the second thread of this process we see. */
1251 return true;
1252 }
1253 });
1254
1255 return thread == NULL;
1256 }
1257
1258 /* Kill LWP. */
1259
1260 static void
1261 linux_kill_one_lwp (struct lwp_info *lwp)
1262 {
1263 struct thread_info *thr = get_lwp_thread (lwp);
1264 int pid = lwpid_of (thr);
1265
1266 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1267 there is no signal context, and ptrace(PTRACE_KILL) (or
1268 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1269 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1270 alternative is to kill with SIGKILL. We only need one SIGKILL
1271 per process, not one for each thread. But since we still support
1272 support debugging programs using raw clone without CLONE_THREAD,
1273 we send one for each thread. For years, we used PTRACE_KILL
1274 only, so we're being a bit paranoid about some old kernels where
1275 PTRACE_KILL might work better (dubious if there are any such, but
1276 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1277 second, and so we're fine everywhere. */
1278
1279 errno = 0;
1280 kill_lwp (pid, SIGKILL);
1281 if (debug_threads)
1282 {
1283 int save_errno = errno;
1284
1285 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1286 target_pid_to_str (ptid_of (thr)),
1287 save_errno ? safe_strerror (save_errno) : "OK");
1288 }
1289
1290 errno = 0;
1291 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1292 if (debug_threads)
1293 {
1294 int save_errno = errno;
1295
1296 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1297 target_pid_to_str (ptid_of (thr)),
1298 save_errno ? safe_strerror (save_errno) : "OK");
1299 }
1300 }
1301
1302 /* Kill LWP and wait for it to die. */
1303
1304 static void
1305 kill_wait_lwp (struct lwp_info *lwp)
1306 {
1307 struct thread_info *thr = get_lwp_thread (lwp);
1308 int pid = ptid_of (thr).pid ();
1309 int lwpid = ptid_of (thr).lwp ();
1310 int wstat;
1311 int res;
1312
1313 if (debug_threads)
1314 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1315
1316 do
1317 {
1318 linux_kill_one_lwp (lwp);
1319
1320 /* Make sure it died. Notes:
1321
1322 - The loop is most likely unnecessary.
1323
1324 - We don't use wait_for_event as that could delete lwps
1325 while we're iterating over them. We're not interested in
1326 any pending status at this point, only in making sure all
1327 wait status on the kernel side are collected until the
1328 process is reaped.
1329
1330 - We don't use __WALL here as the __WALL emulation relies on
1331 SIGCHLD, and killing a stopped process doesn't generate
1332 one, nor an exit status.
1333 */
1334 res = my_waitpid (lwpid, &wstat, 0);
1335 if (res == -1 && errno == ECHILD)
1336 res = my_waitpid (lwpid, &wstat, __WCLONE);
1337 } while (res > 0 && WIFSTOPPED (wstat));
1338
1339 /* Even if it was stopped, the child may have already disappeared.
1340 E.g., if it was killed by SIGKILL. */
1341 if (res < 0 && errno != ECHILD)
1342 perror_with_name ("kill_wait_lwp");
1343 }
1344
1345 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1346 except the leader. */
1347
1348 static void
1349 kill_one_lwp_callback (thread_info *thread, int pid)
1350 {
1351 struct lwp_info *lwp = get_thread_lwp (thread);
1352
1353 /* We avoid killing the first thread here, because of a Linux kernel (at
1354 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1355 the children get a chance to be reaped, it will remain a zombie
1356 forever. */
1357
1358 if (lwpid_of (thread) == pid)
1359 {
1360 if (debug_threads)
1361 debug_printf ("lkop: is last of process %s\n",
1362 target_pid_to_str (thread->id));
1363 return;
1364 }
1365
1366 kill_wait_lwp (lwp);
1367 }
1368
1369 int
1370 linux_process_target::kill (process_info *process)
1371 {
1372 int pid = process->pid;
1373
1374 /* If we're killing a running inferior, make sure it is stopped
1375 first, as PTRACE_KILL will not work otherwise. */
1376 stop_all_lwps (0, NULL);
1377
1378 for_each_thread (pid, [&] (thread_info *thread)
1379 {
1380 kill_one_lwp_callback (thread, pid);
1381 });
1382
1383 /* See the comment in linux_kill_one_lwp. We did not kill the first
1384 thread in the list, so do so now. */
1385 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1386
1387 if (lwp == NULL)
1388 {
1389 if (debug_threads)
1390 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1391 pid);
1392 }
1393 else
1394 kill_wait_lwp (lwp);
1395
1396 mourn (process);
1397
1398 /* Since we presently can only stop all lwps of all processes, we
1399 need to unstop lwps of other processes. */
1400 unstop_all_lwps (0, NULL);
1401 return 0;
1402 }
1403
1404 /* Get pending signal of THREAD, for detaching purposes. This is the
1405 signal the thread last stopped for, which we need to deliver to the
1406 thread when detaching, otherwise, it'd be suppressed/lost. */
1407
1408 static int
1409 get_detach_signal (struct thread_info *thread)
1410 {
1411 client_state &cs = get_client_state ();
1412 enum gdb_signal signo = GDB_SIGNAL_0;
1413 int status;
1414 struct lwp_info *lp = get_thread_lwp (thread);
1415
1416 if (lp->status_pending_p)
1417 status = lp->status_pending;
1418 else
1419 {
1420 /* If the thread had been suspended by gdbserver, and it stopped
1421 cleanly, then it'll have stopped with SIGSTOP. But we don't
1422 want to deliver that SIGSTOP. */
1423 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1424 || thread->last_status.value.sig == GDB_SIGNAL_0)
1425 return 0;
1426
1427 /* Otherwise, we may need to deliver the signal we
1428 intercepted. */
1429 status = lp->last_status;
1430 }
1431
1432 if (!WIFSTOPPED (status))
1433 {
1434 if (debug_threads)
1435 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1436 target_pid_to_str (ptid_of (thread)));
1437 return 0;
1438 }
1439
1440 /* Extended wait statuses aren't real SIGTRAPs. */
1441 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1442 {
1443 if (debug_threads)
1444 debug_printf ("GPS: lwp %s had stopped with extended "
1445 "status: no pending signal\n",
1446 target_pid_to_str (ptid_of (thread)));
1447 return 0;
1448 }
1449
1450 signo = gdb_signal_from_host (WSTOPSIG (status));
1451
1452 if (cs.program_signals_p && !cs.program_signals[signo])
1453 {
1454 if (debug_threads)
1455 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1456 target_pid_to_str (ptid_of (thread)),
1457 gdb_signal_to_string (signo));
1458 return 0;
1459 }
1460 else if (!cs.program_signals_p
1461 /* If we have no way to know which signals GDB does not
1462 want to have passed to the program, assume
1463 SIGTRAP/SIGINT, which is GDB's default. */
1464 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1465 {
1466 if (debug_threads)
1467 debug_printf ("GPS: lwp %s had signal %s, "
1468 "but we don't know if we should pass it. "
1469 "Default to not.\n",
1470 target_pid_to_str (ptid_of (thread)),
1471 gdb_signal_to_string (signo));
1472 return 0;
1473 }
1474 else
1475 {
1476 if (debug_threads)
1477 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1478 target_pid_to_str (ptid_of (thread)),
1479 gdb_signal_to_string (signo));
1480
1481 return WSTOPSIG (status);
1482 }
1483 }
1484
1485 /* Detach from LWP. */
1486
1487 static void
1488 linux_detach_one_lwp (struct lwp_info *lwp)
1489 {
1490 struct thread_info *thread = get_lwp_thread (lwp);
1491 int sig;
1492 int lwpid;
1493
1494 /* If there is a pending SIGSTOP, get rid of it. */
1495 if (lwp->stop_expected)
1496 {
1497 if (debug_threads)
1498 debug_printf ("Sending SIGCONT to %s\n",
1499 target_pid_to_str (ptid_of (thread)));
1500
1501 kill_lwp (lwpid_of (thread), SIGCONT);
1502 lwp->stop_expected = 0;
1503 }
1504
1505 /* Pass on any pending signal for this thread. */
1506 sig = get_detach_signal (thread);
1507
1508 /* Preparing to resume may try to write registers, and fail if the
1509 lwp is zombie. If that happens, ignore the error. We'll handle
1510 it below, when detach fails with ESRCH. */
1511 try
1512 {
1513 /* Flush any pending changes to the process's registers. */
1514 regcache_invalidate_thread (thread);
1515
1516 /* Finally, let it resume. */
1517 if (the_low_target.prepare_to_resume != NULL)
1518 the_low_target.prepare_to_resume (lwp);
1519 }
1520 catch (const gdb_exception_error &ex)
1521 {
1522 if (!check_ptrace_stopped_lwp_gone (lwp))
1523 throw;
1524 }
1525
1526 lwpid = lwpid_of (thread);
1527 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1528 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1529 {
1530 int save_errno = errno;
1531
1532 /* We know the thread exists, so ESRCH must mean the lwp is
1533 zombie. This can happen if one of the already-detached
1534 threads exits the whole thread group. In that case we're
1535 still attached, and must reap the lwp. */
1536 if (save_errno == ESRCH)
1537 {
1538 int ret, status;
1539
1540 ret = my_waitpid (lwpid, &status, __WALL);
1541 if (ret == -1)
1542 {
1543 warning (_("Couldn't reap LWP %d while detaching: %s"),
1544 lwpid, safe_strerror (errno));
1545 }
1546 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1547 {
1548 warning (_("Reaping LWP %d while detaching "
1549 "returned unexpected status 0x%x"),
1550 lwpid, status);
1551 }
1552 }
1553 else
1554 {
1555 error (_("Can't detach %s: %s"),
1556 target_pid_to_str (ptid_of (thread)),
1557 safe_strerror (save_errno));
1558 }
1559 }
1560 else if (debug_threads)
1561 {
1562 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1563 target_pid_to_str (ptid_of (thread)),
1564 strsignal (sig));
1565 }
1566
1567 delete_lwp (lwp);
1568 }
1569
1570 /* Callback for for_each_thread. Detaches from non-leader threads of a
1571 given process. */
1572
1573 static void
1574 linux_detach_lwp_callback (thread_info *thread)
1575 {
1576 /* We don't actually detach from the thread group leader just yet.
1577 If the thread group exits, we must reap the zombie clone lwps
1578 before we're able to reap the leader. */
1579 if (thread->id.pid () == thread->id.lwp ())
1580 return;
1581
1582 lwp_info *lwp = get_thread_lwp (thread);
1583 linux_detach_one_lwp (lwp);
1584 }
1585
1586 int
1587 linux_process_target::detach (process_info *process)
1588 {
1589 struct lwp_info *main_lwp;
1590
1591 /* As there's a step over already in progress, let it finish first,
1592 otherwise nesting a stabilize_threads operation on top gets real
1593 messy. */
1594 complete_ongoing_step_over ();
1595
1596 /* Stop all threads before detaching. First, ptrace requires that
1597 the thread is stopped to successfully detach. Second, thread_db
1598 may need to uninstall thread event breakpoints from memory, which
1599 only works with a stopped process anyway. */
1600 stop_all_lwps (0, NULL);
1601
1602 #ifdef USE_THREAD_DB
1603 thread_db_detach (process);
1604 #endif
1605
1606 /* Stabilize threads (move out of jump pads). */
1607 target_stabilize_threads ();
1608
1609 /* Detach from the clone lwps first. If the thread group exits just
1610 while we're detaching, we must reap the clone lwps before we're
1611 able to reap the leader. */
1612 for_each_thread (process->pid, linux_detach_lwp_callback);
1613
1614 main_lwp = find_lwp_pid (ptid_t (process->pid));
1615 linux_detach_one_lwp (main_lwp);
1616
1617 mourn (process);
1618
1619 /* Since we presently can only stop all lwps of all processes, we
1620 need to unstop lwps of other processes. */
1621 unstop_all_lwps (0, NULL);
1622 return 0;
1623 }
1624
1625 /* Remove all LWPs that belong to process PROC from the lwp list. */
1626
1627 void
1628 linux_process_target::mourn (process_info *process)
1629 {
1630 struct process_info_private *priv;
1631
1632 #ifdef USE_THREAD_DB
1633 thread_db_mourn (process);
1634 #endif
1635
1636 for_each_thread (process->pid, [] (thread_info *thread)
1637 {
1638 delete_lwp (get_thread_lwp (thread));
1639 });
1640
1641 /* Freeing all private data. */
1642 priv = process->priv;
1643 if (the_low_target.delete_process != NULL)
1644 the_low_target.delete_process (priv->arch_private);
1645 else
1646 gdb_assert (priv->arch_private == NULL);
1647 free (priv);
1648 process->priv = NULL;
1649
1650 remove_process (process);
1651 }
1652
1653 void
1654 linux_process_target::join (int pid)
1655 {
1656 int status, ret;
1657
1658 do {
1659 ret = my_waitpid (pid, &status, 0);
1660 if (WIFEXITED (status) || WIFSIGNALED (status))
1661 break;
1662 } while (ret != -1 || errno != ECHILD);
1663 }
1664
1665 /* Return true if the given thread is still alive. */
1666
1667 bool
1668 linux_process_target::thread_alive (ptid_t ptid)
1669 {
1670 struct lwp_info *lwp = find_lwp_pid (ptid);
1671
1672 /* We assume we always know if a thread exits. If a whole process
1673 exited but we still haven't been able to report it to GDB, we'll
1674 hold on to the last lwp of the dead process. */
1675 if (lwp != NULL)
1676 return !lwp_is_marked_dead (lwp);
1677 else
1678 return 0;
1679 }
1680
1681 /* Return 1 if this lwp still has an interesting status pending. If
1682 not (e.g., it had stopped for a breakpoint that is gone), return
1683 false. */
1684
1685 static int
1686 thread_still_has_status_pending_p (struct thread_info *thread)
1687 {
1688 struct lwp_info *lp = get_thread_lwp (thread);
1689
1690 if (!lp->status_pending_p)
1691 return 0;
1692
1693 if (thread->last_resume_kind != resume_stop
1694 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1695 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1696 {
1697 struct thread_info *saved_thread;
1698 CORE_ADDR pc;
1699 int discard = 0;
1700
1701 gdb_assert (lp->last_status != 0);
1702
1703 pc = get_pc (lp);
1704
1705 saved_thread = current_thread;
1706 current_thread = thread;
1707
1708 if (pc != lp->stop_pc)
1709 {
1710 if (debug_threads)
1711 debug_printf ("PC of %ld changed\n",
1712 lwpid_of (thread));
1713 discard = 1;
1714 }
1715
1716 #if !USE_SIGTRAP_SIGINFO
1717 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1718 && !(*the_low_target.breakpoint_at) (pc))
1719 {
1720 if (debug_threads)
1721 debug_printf ("previous SW breakpoint of %ld gone\n",
1722 lwpid_of (thread));
1723 discard = 1;
1724 }
1725 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1726 && !hardware_breakpoint_inserted_here (pc))
1727 {
1728 if (debug_threads)
1729 debug_printf ("previous HW breakpoint of %ld gone\n",
1730 lwpid_of (thread));
1731 discard = 1;
1732 }
1733 #endif
1734
1735 current_thread = saved_thread;
1736
1737 if (discard)
1738 {
1739 if (debug_threads)
1740 debug_printf ("discarding pending breakpoint status\n");
1741 lp->status_pending_p = 0;
1742 return 0;
1743 }
1744 }
1745
1746 return 1;
1747 }
1748
1749 /* Returns true if LWP is resumed from the client's perspective. */
1750
1751 static int
1752 lwp_resumed (struct lwp_info *lwp)
1753 {
1754 struct thread_info *thread = get_lwp_thread (lwp);
1755
1756 if (thread->last_resume_kind != resume_stop)
1757 return 1;
1758
1759 /* Did gdb send us a `vCont;t', but we haven't reported the
1760 corresponding stop to gdb yet? If so, the thread is still
1761 resumed/running from gdb's perspective. */
1762 if (thread->last_resume_kind == resume_stop
1763 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1764 return 1;
1765
1766 return 0;
1767 }
1768
1769 /* Return true if this lwp has an interesting status pending. */
1770 static bool
1771 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1772 {
1773 struct lwp_info *lp = get_thread_lwp (thread);
1774
1775 /* Check if we're only interested in events from a specific process
1776 or a specific LWP. */
1777 if (!thread->id.matches (ptid))
1778 return 0;
1779
1780 if (!lwp_resumed (lp))
1781 return 0;
1782
1783 if (lp->status_pending_p
1784 && !thread_still_has_status_pending_p (thread))
1785 {
1786 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1787 return 0;
1788 }
1789
1790 return lp->status_pending_p;
1791 }
1792
1793 struct lwp_info *
1794 find_lwp_pid (ptid_t ptid)
1795 {
1796 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1797 {
1798 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1799 return thr_arg->id.lwp () == lwp;
1800 });
1801
1802 if (thread == NULL)
1803 return NULL;
1804
1805 return get_thread_lwp (thread);
1806 }
1807
1808 /* Return the number of known LWPs in the tgid given by PID. */
1809
1810 static int
1811 num_lwps (int pid)
1812 {
1813 int count = 0;
1814
1815 for_each_thread (pid, [&] (thread_info *thread)
1816 {
1817 count++;
1818 });
1819
1820 return count;
1821 }
1822
1823 /* See nat/linux-nat.h. */
1824
1825 struct lwp_info *
1826 iterate_over_lwps (ptid_t filter,
1827 gdb::function_view<iterate_over_lwps_ftype> callback)
1828 {
1829 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1830 {
1831 lwp_info *lwp = get_thread_lwp (thr_arg);
1832
1833 return callback (lwp);
1834 });
1835
1836 if (thread == NULL)
1837 return NULL;
1838
1839 return get_thread_lwp (thread);
1840 }
1841
1842 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1843 their exits until all other threads in the group have exited. */
1844
1845 static void
1846 check_zombie_leaders (void)
1847 {
1848 for_each_process ([] (process_info *proc) {
1849 pid_t leader_pid = pid_of (proc);
1850 struct lwp_info *leader_lp;
1851
1852 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1853
1854 if (debug_threads)
1855 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1856 "num_lwps=%d, zombie=%d\n",
1857 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1858 linux_proc_pid_is_zombie (leader_pid));
1859
1860 if (leader_lp != NULL && !leader_lp->stopped
1861 /* Check if there are other threads in the group, as we may
1862 have raced with the inferior simply exiting. */
1863 && !last_thread_of_process_p (leader_pid)
1864 && linux_proc_pid_is_zombie (leader_pid))
1865 {
1866 /* A leader zombie can mean one of two things:
1867
1868 - It exited, and there's an exit status pending
1869 available, or only the leader exited (not the whole
1870 program). In the latter case, we can't waitpid the
1871 leader's exit status until all other threads are gone.
1872
1873 - There are 3 or more threads in the group, and a thread
1874 other than the leader exec'd. On an exec, the Linux
1875 kernel destroys all other threads (except the execing
1876 one) in the thread group, and resets the execing thread's
1877 tid to the tgid. No exit notification is sent for the
1878 execing thread -- from the ptracer's perspective, it
1879 appears as though the execing thread just vanishes.
1880 Until we reap all other threads except the leader and the
1881 execing thread, the leader will be zombie, and the
1882 execing thread will be in `D (disc sleep)'. As soon as
1883 all other threads are reaped, the execing thread changes
1884 it's tid to the tgid, and the previous (zombie) leader
1885 vanishes, giving place to the "new" leader. We could try
1886 distinguishing the exit and exec cases, by waiting once
1887 more, and seeing if something comes out, but it doesn't
1888 sound useful. The previous leader _does_ go away, and
1889 we'll re-add the new one once we see the exec event
1890 (which is just the same as what would happen if the
1891 previous leader did exit voluntarily before some other
1892 thread execs). */
1893
1894 if (debug_threads)
1895 debug_printf ("CZL: Thread group leader %d zombie "
1896 "(it exited, or another thread execd).\n",
1897 leader_pid);
1898
1899 delete_lwp (leader_lp);
1900 }
1901 });
1902 }
1903
1904 /* Callback for `find_thread'. Returns the first LWP that is not
1905 stopped. */
1906
1907 static bool
1908 not_stopped_callback (thread_info *thread, ptid_t filter)
1909 {
1910 if (!thread->id.matches (filter))
1911 return false;
1912
1913 lwp_info *lwp = get_thread_lwp (thread);
1914
1915 return !lwp->stopped;
1916 }
1917
1918 /* Increment LWP's suspend count. */
1919
1920 static void
1921 lwp_suspended_inc (struct lwp_info *lwp)
1922 {
1923 lwp->suspended++;
1924
1925 if (debug_threads && lwp->suspended > 4)
1926 {
1927 struct thread_info *thread = get_lwp_thread (lwp);
1928
1929 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1930 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1931 }
1932 }
1933
1934 /* Decrement LWP's suspend count. */
1935
1936 static void
1937 lwp_suspended_decr (struct lwp_info *lwp)
1938 {
1939 lwp->suspended--;
1940
1941 if (lwp->suspended < 0)
1942 {
1943 struct thread_info *thread = get_lwp_thread (lwp);
1944
1945 internal_error (__FILE__, __LINE__,
1946 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1947 lwp->suspended);
1948 }
1949 }
1950
1951 /* This function should only be called if the LWP got a SIGTRAP.
1952
1953 Handle any tracepoint steps or hits. Return true if a tracepoint
1954 event was handled, 0 otherwise. */
1955
1956 static int
1957 handle_tracepoints (struct lwp_info *lwp)
1958 {
1959 struct thread_info *tinfo = get_lwp_thread (lwp);
1960 int tpoint_related_event = 0;
1961
1962 gdb_assert (lwp->suspended == 0);
1963
1964 /* If this tracepoint hit causes a tracing stop, we'll immediately
1965 uninsert tracepoints. To do this, we temporarily pause all
1966 threads, unpatch away, and then unpause threads. We need to make
1967 sure the unpausing doesn't resume LWP too. */
1968 lwp_suspended_inc (lwp);
1969
1970 /* And we need to be sure that any all-threads-stopping doesn't try
1971 to move threads out of the jump pads, as it could deadlock the
1972 inferior (LWP could be in the jump pad, maybe even holding the
1973 lock.) */
1974
1975 /* Do any necessary step collect actions. */
1976 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1977
1978 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1979
1980 /* See if we just hit a tracepoint and do its main collect
1981 actions. */
1982 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1983
1984 lwp_suspended_decr (lwp);
1985
1986 gdb_assert (lwp->suspended == 0);
1987 gdb_assert (!stabilizing_threads
1988 || (lwp->collecting_fast_tracepoint
1989 != fast_tpoint_collect_result::not_collecting));
1990
1991 if (tpoint_related_event)
1992 {
1993 if (debug_threads)
1994 debug_printf ("got a tracepoint event\n");
1995 return 1;
1996 }
1997
1998 return 0;
1999 }
2000
2001 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2002 collection status. */
2003
2004 static fast_tpoint_collect_result
2005 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2006 struct fast_tpoint_collect_status *status)
2007 {
2008 CORE_ADDR thread_area;
2009 struct thread_info *thread = get_lwp_thread (lwp);
2010
2011 if (the_low_target.get_thread_area == NULL)
2012 return fast_tpoint_collect_result::not_collecting;
2013
2014 /* Get the thread area address. This is used to recognize which
2015 thread is which when tracing with the in-process agent library.
2016 We don't read anything from the address, and treat it as opaque;
2017 it's the address itself that we assume is unique per-thread. */
2018 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2019 return fast_tpoint_collect_result::not_collecting;
2020
2021 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2022 }
2023
2024 bool
2025 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2026 {
2027 struct thread_info *saved_thread;
2028
2029 saved_thread = current_thread;
2030 current_thread = get_lwp_thread (lwp);
2031
2032 if ((wstat == NULL
2033 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2034 && supports_fast_tracepoints ()
2035 && agent_loaded_p ())
2036 {
2037 struct fast_tpoint_collect_status status;
2038
2039 if (debug_threads)
2040 debug_printf ("Checking whether LWP %ld needs to move out of the "
2041 "jump pad.\n",
2042 lwpid_of (current_thread));
2043
2044 fast_tpoint_collect_result r
2045 = linux_fast_tracepoint_collecting (lwp, &status);
2046
2047 if (wstat == NULL
2048 || (WSTOPSIG (*wstat) != SIGILL
2049 && WSTOPSIG (*wstat) != SIGFPE
2050 && WSTOPSIG (*wstat) != SIGSEGV
2051 && WSTOPSIG (*wstat) != SIGBUS))
2052 {
2053 lwp->collecting_fast_tracepoint = r;
2054
2055 if (r != fast_tpoint_collect_result::not_collecting)
2056 {
2057 if (r == fast_tpoint_collect_result::before_insn
2058 && lwp->exit_jump_pad_bkpt == NULL)
2059 {
2060 /* Haven't executed the original instruction yet.
2061 Set breakpoint there, and wait till it's hit,
2062 then single-step until exiting the jump pad. */
2063 lwp->exit_jump_pad_bkpt
2064 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2065 }
2066
2067 if (debug_threads)
2068 debug_printf ("Checking whether LWP %ld needs to move out of "
2069 "the jump pad...it does\n",
2070 lwpid_of (current_thread));
2071 current_thread = saved_thread;
2072
2073 return true;
2074 }
2075 }
2076 else
2077 {
2078 /* If we get a synchronous signal while collecting, *and*
2079 while executing the (relocated) original instruction,
2080 reset the PC to point at the tpoint address, before
2081 reporting to GDB. Otherwise, it's an IPA lib bug: just
2082 report the signal to GDB, and pray for the best. */
2083
2084 lwp->collecting_fast_tracepoint
2085 = fast_tpoint_collect_result::not_collecting;
2086
2087 if (r != fast_tpoint_collect_result::not_collecting
2088 && (status.adjusted_insn_addr <= lwp->stop_pc
2089 && lwp->stop_pc < status.adjusted_insn_addr_end))
2090 {
2091 siginfo_t info;
2092 struct regcache *regcache;
2093
2094 /* The si_addr on a few signals references the address
2095 of the faulting instruction. Adjust that as
2096 well. */
2097 if ((WSTOPSIG (*wstat) == SIGILL
2098 || WSTOPSIG (*wstat) == SIGFPE
2099 || WSTOPSIG (*wstat) == SIGBUS
2100 || WSTOPSIG (*wstat) == SIGSEGV)
2101 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2102 (PTRACE_TYPE_ARG3) 0, &info) == 0
2103 /* Final check just to make sure we don't clobber
2104 the siginfo of non-kernel-sent signals. */
2105 && (uintptr_t) info.si_addr == lwp->stop_pc)
2106 {
2107 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2108 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2109 (PTRACE_TYPE_ARG3) 0, &info);
2110 }
2111
2112 regcache = get_thread_regcache (current_thread, 1);
2113 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2114 lwp->stop_pc = status.tpoint_addr;
2115
2116 /* Cancel any fast tracepoint lock this thread was
2117 holding. */
2118 force_unlock_trace_buffer ();
2119 }
2120
2121 if (lwp->exit_jump_pad_bkpt != NULL)
2122 {
2123 if (debug_threads)
2124 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2125 "stopping all threads momentarily.\n");
2126
2127 stop_all_lwps (1, lwp);
2128
2129 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2130 lwp->exit_jump_pad_bkpt = NULL;
2131
2132 unstop_all_lwps (1, lwp);
2133
2134 gdb_assert (lwp->suspended >= 0);
2135 }
2136 }
2137 }
2138
2139 if (debug_threads)
2140 debug_printf ("Checking whether LWP %ld needs to move out of the "
2141 "jump pad...no\n",
2142 lwpid_of (current_thread));
2143
2144 current_thread = saved_thread;
2145 return false;
2146 }
2147
2148 /* Enqueue one signal in the "signals to report later when out of the
2149 jump pad" list. */
2150
2151 static void
2152 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2153 {
2154 struct pending_signals *p_sig;
2155 struct thread_info *thread = get_lwp_thread (lwp);
2156
2157 if (debug_threads)
2158 debug_printf ("Deferring signal %d for LWP %ld.\n",
2159 WSTOPSIG (*wstat), lwpid_of (thread));
2160
2161 if (debug_threads)
2162 {
2163 struct pending_signals *sig;
2164
2165 for (sig = lwp->pending_signals_to_report;
2166 sig != NULL;
2167 sig = sig->prev)
2168 debug_printf (" Already queued %d\n",
2169 sig->signal);
2170
2171 debug_printf (" (no more currently queued signals)\n");
2172 }
2173
2174 /* Don't enqueue non-RT signals if they are already in the deferred
2175 queue. (SIGSTOP being the easiest signal to see ending up here
2176 twice) */
2177 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2178 {
2179 struct pending_signals *sig;
2180
2181 for (sig = lwp->pending_signals_to_report;
2182 sig != NULL;
2183 sig = sig->prev)
2184 {
2185 if (sig->signal == WSTOPSIG (*wstat))
2186 {
2187 if (debug_threads)
2188 debug_printf ("Not requeuing already queued non-RT signal %d"
2189 " for LWP %ld\n",
2190 sig->signal,
2191 lwpid_of (thread));
2192 return;
2193 }
2194 }
2195 }
2196
2197 p_sig = XCNEW (struct pending_signals);
2198 p_sig->prev = lwp->pending_signals_to_report;
2199 p_sig->signal = WSTOPSIG (*wstat);
2200
2201 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2202 &p_sig->info);
2203
2204 lwp->pending_signals_to_report = p_sig;
2205 }
2206
2207 /* Dequeue one signal from the "signals to report later when out of
2208 the jump pad" list. */
2209
2210 static int
2211 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2212 {
2213 struct thread_info *thread = get_lwp_thread (lwp);
2214
2215 if (lwp->pending_signals_to_report != NULL)
2216 {
2217 struct pending_signals **p_sig;
2218
2219 p_sig = &lwp->pending_signals_to_report;
2220 while ((*p_sig)->prev != NULL)
2221 p_sig = &(*p_sig)->prev;
2222
2223 *wstat = W_STOPCODE ((*p_sig)->signal);
2224 if ((*p_sig)->info.si_signo != 0)
2225 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2226 &(*p_sig)->info);
2227 free (*p_sig);
2228 *p_sig = NULL;
2229
2230 if (debug_threads)
2231 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2232 WSTOPSIG (*wstat), lwpid_of (thread));
2233
2234 if (debug_threads)
2235 {
2236 struct pending_signals *sig;
2237
2238 for (sig = lwp->pending_signals_to_report;
2239 sig != NULL;
2240 sig = sig->prev)
2241 debug_printf (" Still queued %d\n",
2242 sig->signal);
2243
2244 debug_printf (" (no more queued signals)\n");
2245 }
2246
2247 return 1;
2248 }
2249
2250 return 0;
2251 }
2252
2253 /* Fetch the possibly triggered data watchpoint info and store it in
2254 CHILD.
2255
2256 On some archs, like x86, that use debug registers to set
2257 watchpoints, it's possible that the way to know which watched
2258 address trapped, is to check the register that is used to select
2259 which address to watch. Problem is, between setting the watchpoint
2260 and reading back which data address trapped, the user may change
2261 the set of watchpoints, and, as a consequence, GDB changes the
2262 debug registers in the inferior. To avoid reading back a stale
2263 stopped-data-address when that happens, we cache in LP the fact
2264 that a watchpoint trapped, and the corresponding data address, as
2265 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2266 registers meanwhile, we have the cached data we can rely on. */
2267
2268 static int
2269 check_stopped_by_watchpoint (struct lwp_info *child)
2270 {
2271 if (the_low_target.stopped_by_watchpoint != NULL)
2272 {
2273 struct thread_info *saved_thread;
2274
2275 saved_thread = current_thread;
2276 current_thread = get_lwp_thread (child);
2277
2278 if (the_low_target.stopped_by_watchpoint ())
2279 {
2280 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2281
2282 if (the_low_target.stopped_data_address != NULL)
2283 child->stopped_data_address
2284 = the_low_target.stopped_data_address ();
2285 else
2286 child->stopped_data_address = 0;
2287 }
2288
2289 current_thread = saved_thread;
2290 }
2291
2292 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2293 }
2294
2295 /* Return the ptrace options that we want to try to enable. */
2296
2297 static int
2298 linux_low_ptrace_options (int attached)
2299 {
2300 client_state &cs = get_client_state ();
2301 int options = 0;
2302
2303 if (!attached)
2304 options |= PTRACE_O_EXITKILL;
2305
2306 if (cs.report_fork_events)
2307 options |= PTRACE_O_TRACEFORK;
2308
2309 if (cs.report_vfork_events)
2310 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2311
2312 if (cs.report_exec_events)
2313 options |= PTRACE_O_TRACEEXEC;
2314
2315 options |= PTRACE_O_TRACESYSGOOD;
2316
2317 return options;
2318 }
2319
2320 lwp_info *
2321 linux_process_target::filter_event (int lwpid, int wstat)
2322 {
2323 client_state &cs = get_client_state ();
2324 struct lwp_info *child;
2325 struct thread_info *thread;
2326 int have_stop_pc = 0;
2327
2328 child = find_lwp_pid (ptid_t (lwpid));
2329
2330 /* Check for stop events reported by a process we didn't already
2331 know about - anything not already in our LWP list.
2332
2333 If we're expecting to receive stopped processes after
2334 fork, vfork, and clone events, then we'll just add the
2335 new one to our list and go back to waiting for the event
2336 to be reported - the stopped process might be returned
2337 from waitpid before or after the event is.
2338
2339 But note the case of a non-leader thread exec'ing after the
2340 leader having exited, and gone from our lists (because
2341 check_zombie_leaders deleted it). The non-leader thread
2342 changes its tid to the tgid. */
2343
2344 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2345 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2346 {
2347 ptid_t child_ptid;
2348
2349 /* A multi-thread exec after we had seen the leader exiting. */
2350 if (debug_threads)
2351 {
2352 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2353 "after exec.\n", lwpid);
2354 }
2355
2356 child_ptid = ptid_t (lwpid, lwpid, 0);
2357 child = add_lwp (child_ptid);
2358 child->stopped = 1;
2359 current_thread = child->thread;
2360 }
2361
2362 /* If we didn't find a process, one of two things presumably happened:
2363 - A process we started and then detached from has exited. Ignore it.
2364 - A process we are controlling has forked and the new child's stop
2365 was reported to us by the kernel. Save its PID. */
2366 if (child == NULL && WIFSTOPPED (wstat))
2367 {
2368 add_to_pid_list (&stopped_pids, lwpid, wstat);
2369 return NULL;
2370 }
2371 else if (child == NULL)
2372 return NULL;
2373
2374 thread = get_lwp_thread (child);
2375
2376 child->stopped = 1;
2377
2378 child->last_status = wstat;
2379
2380 /* Check if the thread has exited. */
2381 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2382 {
2383 if (debug_threads)
2384 debug_printf ("LLFE: %d exited.\n", lwpid);
2385
2386 if (finish_step_over (child))
2387 {
2388 /* Unsuspend all other LWPs, and set them back running again. */
2389 unsuspend_all_lwps (child);
2390 }
2391
2392 /* If there is at least one more LWP, then the exit signal was
2393 not the end of the debugged application and should be
2394 ignored, unless GDB wants to hear about thread exits. */
2395 if (cs.report_thread_events
2396 || last_thread_of_process_p (pid_of (thread)))
2397 {
2398 /* Since events are serialized to GDB core, and we can't
2399 report this one right now. Leave the status pending for
2400 the next time we're able to report it. */
2401 mark_lwp_dead (child, wstat);
2402 return child;
2403 }
2404 else
2405 {
2406 delete_lwp (child);
2407 return NULL;
2408 }
2409 }
2410
2411 gdb_assert (WIFSTOPPED (wstat));
2412
2413 if (WIFSTOPPED (wstat))
2414 {
2415 struct process_info *proc;
2416
2417 /* Architecture-specific setup after inferior is running. */
2418 proc = find_process_pid (pid_of (thread));
2419 if (proc->tdesc == NULL)
2420 {
2421 if (proc->attached)
2422 {
2423 /* This needs to happen after we have attached to the
2424 inferior and it is stopped for the first time, but
2425 before we access any inferior registers. */
2426 arch_setup_thread (thread);
2427 }
2428 else
2429 {
2430 /* The process is started, but GDBserver will do
2431 architecture-specific setup after the program stops at
2432 the first instruction. */
2433 child->status_pending_p = 1;
2434 child->status_pending = wstat;
2435 return child;
2436 }
2437 }
2438 }
2439
2440 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2441 {
2442 struct process_info *proc = find_process_pid (pid_of (thread));
2443 int options = linux_low_ptrace_options (proc->attached);
2444
2445 linux_enable_event_reporting (lwpid, options);
2446 child->must_set_ptrace_flags = 0;
2447 }
2448
2449 /* Always update syscall_state, even if it will be filtered later. */
2450 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2451 {
2452 child->syscall_state
2453 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2454 ? TARGET_WAITKIND_SYSCALL_RETURN
2455 : TARGET_WAITKIND_SYSCALL_ENTRY);
2456 }
2457 else
2458 {
2459 /* Almost all other ptrace-stops are known to be outside of system
2460 calls, with further exceptions in handle_extended_wait. */
2461 child->syscall_state = TARGET_WAITKIND_IGNORE;
2462 }
2463
2464 /* Be careful to not overwrite stop_pc until save_stop_reason is
2465 called. */
2466 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2467 && linux_is_extended_waitstatus (wstat))
2468 {
2469 child->stop_pc = get_pc (child);
2470 if (handle_extended_wait (&child, wstat))
2471 {
2472 /* The event has been handled, so just return without
2473 reporting it. */
2474 return NULL;
2475 }
2476 }
2477
2478 if (linux_wstatus_maybe_breakpoint (wstat))
2479 {
2480 if (save_stop_reason (child))
2481 have_stop_pc = 1;
2482 }
2483
2484 if (!have_stop_pc)
2485 child->stop_pc = get_pc (child);
2486
2487 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2488 && child->stop_expected)
2489 {
2490 if (debug_threads)
2491 debug_printf ("Expected stop.\n");
2492 child->stop_expected = 0;
2493
2494 if (thread->last_resume_kind == resume_stop)
2495 {
2496 /* We want to report the stop to the core. Treat the
2497 SIGSTOP as a normal event. */
2498 if (debug_threads)
2499 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2500 target_pid_to_str (ptid_of (thread)));
2501 }
2502 else if (stopping_threads != NOT_STOPPING_THREADS)
2503 {
2504 /* Stopping threads. We don't want this SIGSTOP to end up
2505 pending. */
2506 if (debug_threads)
2507 debug_printf ("LLW: SIGSTOP caught for %s "
2508 "while stopping threads.\n",
2509 target_pid_to_str (ptid_of (thread)));
2510 return NULL;
2511 }
2512 else
2513 {
2514 /* This is a delayed SIGSTOP. Filter out the event. */
2515 if (debug_threads)
2516 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2517 child->stepping ? "step" : "continue",
2518 target_pid_to_str (ptid_of (thread)));
2519
2520 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2521 return NULL;
2522 }
2523 }
2524
2525 child->status_pending_p = 1;
2526 child->status_pending = wstat;
2527 return child;
2528 }
2529
2530 /* Return true if THREAD is doing hardware single step. */
2531
2532 static int
2533 maybe_hw_step (struct thread_info *thread)
2534 {
2535 if (can_hardware_single_step ())
2536 return 1;
2537 else
2538 {
2539 /* GDBserver must insert single-step breakpoint for software
2540 single step. */
2541 gdb_assert (has_single_step_breakpoints (thread));
2542 return 0;
2543 }
2544 }
2545
2546 /* Resume LWPs that are currently stopped without any pending status
2547 to report, but are resumed from the core's perspective. */
2548
2549 static void
2550 resume_stopped_resumed_lwps (thread_info *thread)
2551 {
2552 struct lwp_info *lp = get_thread_lwp (thread);
2553
2554 if (lp->stopped
2555 && !lp->suspended
2556 && !lp->status_pending_p
2557 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2558 {
2559 int step = 0;
2560
2561 if (thread->last_resume_kind == resume_step)
2562 step = maybe_hw_step (thread);
2563
2564 if (debug_threads)
2565 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2566 target_pid_to_str (ptid_of (thread)),
2567 paddress (lp->stop_pc),
2568 step);
2569
2570 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2571 }
2572 }
2573
2574 int
2575 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2576 ptid_t filter_ptid,
2577 int *wstatp, int options)
2578 {
2579 struct thread_info *event_thread;
2580 struct lwp_info *event_child, *requested_child;
2581 sigset_t block_mask, prev_mask;
2582
2583 retry:
2584 /* N.B. event_thread points to the thread_info struct that contains
2585 event_child. Keep them in sync. */
2586 event_thread = NULL;
2587 event_child = NULL;
2588 requested_child = NULL;
2589
2590 /* Check for a lwp with a pending status. */
2591
2592 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2593 {
2594 event_thread = find_thread_in_random ([&] (thread_info *thread)
2595 {
2596 return status_pending_p_callback (thread, filter_ptid);
2597 });
2598
2599 if (event_thread != NULL)
2600 event_child = get_thread_lwp (event_thread);
2601 if (debug_threads && event_thread)
2602 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2603 }
2604 else if (filter_ptid != null_ptid)
2605 {
2606 requested_child = find_lwp_pid (filter_ptid);
2607
2608 if (stopping_threads == NOT_STOPPING_THREADS
2609 && requested_child->status_pending_p
2610 && (requested_child->collecting_fast_tracepoint
2611 != fast_tpoint_collect_result::not_collecting))
2612 {
2613 enqueue_one_deferred_signal (requested_child,
2614 &requested_child->status_pending);
2615 requested_child->status_pending_p = 0;
2616 requested_child->status_pending = 0;
2617 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2618 }
2619
2620 if (requested_child->suspended
2621 && requested_child->status_pending_p)
2622 {
2623 internal_error (__FILE__, __LINE__,
2624 "requesting an event out of a"
2625 " suspended child?");
2626 }
2627
2628 if (requested_child->status_pending_p)
2629 {
2630 event_child = requested_child;
2631 event_thread = get_lwp_thread (event_child);
2632 }
2633 }
2634
2635 if (event_child != NULL)
2636 {
2637 if (debug_threads)
2638 debug_printf ("Got an event from pending child %ld (%04x)\n",
2639 lwpid_of (event_thread), event_child->status_pending);
2640 *wstatp = event_child->status_pending;
2641 event_child->status_pending_p = 0;
2642 event_child->status_pending = 0;
2643 current_thread = event_thread;
2644 return lwpid_of (event_thread);
2645 }
2646
2647 /* But if we don't find a pending event, we'll have to wait.
2648
2649 We only enter this loop if no process has a pending wait status.
2650 Thus any action taken in response to a wait status inside this
2651 loop is responding as soon as we detect the status, not after any
2652 pending events. */
2653
2654 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2655 all signals while here. */
2656 sigfillset (&block_mask);
2657 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2658
2659 /* Always pull all events out of the kernel. We'll randomly select
2660 an event LWP out of all that have events, to prevent
2661 starvation. */
2662 while (event_child == NULL)
2663 {
2664 pid_t ret = 0;
2665
2666 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2667 quirks:
2668
2669 - If the thread group leader exits while other threads in the
2670 thread group still exist, waitpid(TGID, ...) hangs. That
2671 waitpid won't return an exit status until the other threads
2672 in the group are reaped.
2673
2674 - When a non-leader thread execs, that thread just vanishes
2675 without reporting an exit (so we'd hang if we waited for it
2676 explicitly in that case). The exec event is reported to
2677 the TGID pid. */
2678 errno = 0;
2679 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2680
2681 if (debug_threads)
2682 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2683 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2684
2685 if (ret > 0)
2686 {
2687 if (debug_threads)
2688 {
2689 debug_printf ("LLW: waitpid %ld received %s\n",
2690 (long) ret, status_to_str (*wstatp));
2691 }
2692
2693 /* Filter all events. IOW, leave all events pending. We'll
2694 randomly select an event LWP out of all that have events
2695 below. */
2696 filter_event (ret, *wstatp);
2697 /* Retry until nothing comes out of waitpid. A single
2698 SIGCHLD can indicate more than one child stopped. */
2699 continue;
2700 }
2701
2702 /* Now that we've pulled all events out of the kernel, resume
2703 LWPs that don't have an interesting event to report. */
2704 if (stopping_threads == NOT_STOPPING_THREADS)
2705 for_each_thread (resume_stopped_resumed_lwps);
2706
2707 /* ... and find an LWP with a status to report to the core, if
2708 any. */
2709 event_thread = find_thread_in_random ([&] (thread_info *thread)
2710 {
2711 return status_pending_p_callback (thread, filter_ptid);
2712 });
2713
2714 if (event_thread != NULL)
2715 {
2716 event_child = get_thread_lwp (event_thread);
2717 *wstatp = event_child->status_pending;
2718 event_child->status_pending_p = 0;
2719 event_child->status_pending = 0;
2720 break;
2721 }
2722
2723 /* Check for zombie thread group leaders. Those can't be reaped
2724 until all other threads in the thread group are. */
2725 check_zombie_leaders ();
2726
2727 auto not_stopped = [&] (thread_info *thread)
2728 {
2729 return not_stopped_callback (thread, wait_ptid);
2730 };
2731
2732 /* If there are no resumed children left in the set of LWPs we
2733 want to wait for, bail. We can't just block in
2734 waitpid/sigsuspend, because lwps might have been left stopped
2735 in trace-stop state, and we'd be stuck forever waiting for
2736 their status to change (which would only happen if we resumed
2737 them). Even if WNOHANG is set, this return code is preferred
2738 over 0 (below), as it is more detailed. */
2739 if (find_thread (not_stopped) == NULL)
2740 {
2741 if (debug_threads)
2742 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2743 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2744 return -1;
2745 }
2746
2747 /* No interesting event to report to the caller. */
2748 if ((options & WNOHANG))
2749 {
2750 if (debug_threads)
2751 debug_printf ("WNOHANG set, no event found\n");
2752
2753 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2754 return 0;
2755 }
2756
2757 /* Block until we get an event reported with SIGCHLD. */
2758 if (debug_threads)
2759 debug_printf ("sigsuspend'ing\n");
2760
2761 sigsuspend (&prev_mask);
2762 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2763 goto retry;
2764 }
2765
2766 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2767
2768 current_thread = event_thread;
2769
2770 return lwpid_of (event_thread);
2771 }
2772
2773 int
2774 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2775 {
2776 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2777 }
2778
2779 /* Select one LWP out of those that have events pending. */
2780
2781 static void
2782 select_event_lwp (struct lwp_info **orig_lp)
2783 {
2784 struct thread_info *event_thread = NULL;
2785
2786 /* In all-stop, give preference to the LWP that is being
2787 single-stepped. There will be at most one, and it's the LWP that
2788 the core is most interested in. If we didn't do this, then we'd
2789 have to handle pending step SIGTRAPs somehow in case the core
2790 later continues the previously-stepped thread, otherwise we'd
2791 report the pending SIGTRAP, and the core, not having stepped the
2792 thread, wouldn't understand what the trap was for, and therefore
2793 would report it to the user as a random signal. */
2794 if (!non_stop)
2795 {
2796 event_thread = find_thread ([] (thread_info *thread)
2797 {
2798 lwp_info *lp = get_thread_lwp (thread);
2799
2800 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2801 && thread->last_resume_kind == resume_step
2802 && lp->status_pending_p);
2803 });
2804
2805 if (event_thread != NULL)
2806 {
2807 if (debug_threads)
2808 debug_printf ("SEL: Select single-step %s\n",
2809 target_pid_to_str (ptid_of (event_thread)));
2810 }
2811 }
2812 if (event_thread == NULL)
2813 {
2814 /* No single-stepping LWP. Select one at random, out of those
2815 which have had events. */
2816
2817 event_thread = find_thread_in_random ([&] (thread_info *thread)
2818 {
2819 lwp_info *lp = get_thread_lwp (thread);
2820
2821 /* Only resumed LWPs that have an event pending. */
2822 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2823 && lp->status_pending_p);
2824 });
2825 }
2826
2827 if (event_thread != NULL)
2828 {
2829 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2830
2831 /* Switch the event LWP. */
2832 *orig_lp = event_lp;
2833 }
2834 }
2835
2836 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2837 NULL. */
2838
2839 static void
2840 unsuspend_all_lwps (struct lwp_info *except)
2841 {
2842 for_each_thread ([&] (thread_info *thread)
2843 {
2844 lwp_info *lwp = get_thread_lwp (thread);
2845
2846 if (lwp != except)
2847 lwp_suspended_decr (lwp);
2848 });
2849 }
2850
2851 static bool stuck_in_jump_pad_callback (thread_info *thread);
2852 static bool lwp_running (thread_info *thread);
2853
2854 /* Stabilize threads (move out of jump pads).
2855
2856 If a thread is midway collecting a fast tracepoint, we need to
2857 finish the collection and move it out of the jump pad before
2858 reporting the signal.
2859
2860 This avoids recursion while collecting (when a signal arrives
2861 midway, and the signal handler itself collects), which would trash
2862 the trace buffer. In case the user set a breakpoint in a signal
2863 handler, this avoids the backtrace showing the jump pad, etc..
2864 Most importantly, there are certain things we can't do safely if
2865 threads are stopped in a jump pad (or in its callee's). For
2866 example:
2867
2868 - starting a new trace run. A thread still collecting the
2869 previous run, could trash the trace buffer when resumed. The trace
2870 buffer control structures would have been reset but the thread had
2871 no way to tell. The thread could even midway memcpy'ing to the
2872 buffer, which would mean that when resumed, it would clobber the
2873 trace buffer that had been set for a new run.
2874
2875 - we can't rewrite/reuse the jump pads for new tracepoints
2876 safely. Say you do tstart while a thread is stopped midway while
2877 collecting. When the thread is later resumed, it finishes the
2878 collection, and returns to the jump pad, to execute the original
2879 instruction that was under the tracepoint jump at the time the
2880 older run had been started. If the jump pad had been rewritten
2881 since for something else in the new run, the thread would now
2882 execute the wrong / random instructions. */
2883
2884 void
2885 linux_process_target::stabilize_threads ()
2886 {
2887 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2888
2889 if (thread_stuck != NULL)
2890 {
2891 if (debug_threads)
2892 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2893 lwpid_of (thread_stuck));
2894 return;
2895 }
2896
2897 thread_info *saved_thread = current_thread;
2898
2899 stabilizing_threads = 1;
2900
2901 /* Kick 'em all. */
2902 for_each_thread ([this] (thread_info *thread)
2903 {
2904 move_out_of_jump_pad (thread);
2905 });
2906
2907 /* Loop until all are stopped out of the jump pads. */
2908 while (find_thread (lwp_running) != NULL)
2909 {
2910 struct target_waitstatus ourstatus;
2911 struct lwp_info *lwp;
2912 int wstat;
2913
2914 /* Note that we go through the full wait even loop. While
2915 moving threads out of jump pad, we need to be able to step
2916 over internal breakpoints and such. */
2917 wait_1 (minus_one_ptid, &ourstatus, 0);
2918
2919 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2920 {
2921 lwp = get_thread_lwp (current_thread);
2922
2923 /* Lock it. */
2924 lwp_suspended_inc (lwp);
2925
2926 if (ourstatus.value.sig != GDB_SIGNAL_0
2927 || current_thread->last_resume_kind == resume_stop)
2928 {
2929 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2930 enqueue_one_deferred_signal (lwp, &wstat);
2931 }
2932 }
2933 }
2934
2935 unsuspend_all_lwps (NULL);
2936
2937 stabilizing_threads = 0;
2938
2939 current_thread = saved_thread;
2940
2941 if (debug_threads)
2942 {
2943 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2944
2945 if (thread_stuck != NULL)
2946 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2947 lwpid_of (thread_stuck));
2948 }
2949 }
2950
2951 /* Convenience function that is called when the kernel reports an
2952 event that is not passed out to GDB. */
2953
2954 static ptid_t
2955 ignore_event (struct target_waitstatus *ourstatus)
2956 {
2957 /* If we got an event, there may still be others, as a single
2958 SIGCHLD can indicate more than one child stopped. This forces
2959 another target_wait call. */
2960 async_file_mark ();
2961
2962 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2963 return null_ptid;
2964 }
2965
2966 /* Convenience function that is called when the kernel reports an exit
2967 event. This decides whether to report the event to GDB as a
2968 process exit event, a thread exit event, or to suppress the
2969 event. */
2970
2971 static ptid_t
2972 filter_exit_event (struct lwp_info *event_child,
2973 struct target_waitstatus *ourstatus)
2974 {
2975 client_state &cs = get_client_state ();
2976 struct thread_info *thread = get_lwp_thread (event_child);
2977 ptid_t ptid = ptid_of (thread);
2978
2979 if (!last_thread_of_process_p (pid_of (thread)))
2980 {
2981 if (cs.report_thread_events)
2982 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2983 else
2984 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2985
2986 delete_lwp (event_child);
2987 }
2988 return ptid;
2989 }
2990
2991 /* Returns 1 if GDB is interested in any event_child syscalls. */
2992
2993 static int
2994 gdb_catching_syscalls_p (struct lwp_info *event_child)
2995 {
2996 struct thread_info *thread = get_lwp_thread (event_child);
2997 struct process_info *proc = get_thread_process (thread);
2998
2999 return !proc->syscalls_to_catch.empty ();
3000 }
3001
3002 /* Returns 1 if GDB is interested in the event_child syscall.
3003 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3004
3005 static int
3006 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3007 {
3008 int sysno;
3009 struct thread_info *thread = get_lwp_thread (event_child);
3010 struct process_info *proc = get_thread_process (thread);
3011
3012 if (proc->syscalls_to_catch.empty ())
3013 return 0;
3014
3015 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3016 return 1;
3017
3018 get_syscall_trapinfo (event_child, &sysno);
3019
3020 for (int iter : proc->syscalls_to_catch)
3021 if (iter == sysno)
3022 return 1;
3023
3024 return 0;
3025 }
3026
3027 ptid_t
3028 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3029 int target_options)
3030 {
3031 client_state &cs = get_client_state ();
3032 int w;
3033 struct lwp_info *event_child;
3034 int options;
3035 int pid;
3036 int step_over_finished;
3037 int bp_explains_trap;
3038 int maybe_internal_trap;
3039 int report_to_gdb;
3040 int trace_event;
3041 int in_step_range;
3042 int any_resumed;
3043
3044 if (debug_threads)
3045 {
3046 debug_enter ();
3047 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3048 }
3049
3050 /* Translate generic target options into linux options. */
3051 options = __WALL;
3052 if (target_options & TARGET_WNOHANG)
3053 options |= WNOHANG;
3054
3055 bp_explains_trap = 0;
3056 trace_event = 0;
3057 in_step_range = 0;
3058 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3059
3060 auto status_pending_p_any = [&] (thread_info *thread)
3061 {
3062 return status_pending_p_callback (thread, minus_one_ptid);
3063 };
3064
3065 auto not_stopped = [&] (thread_info *thread)
3066 {
3067 return not_stopped_callback (thread, minus_one_ptid);
3068 };
3069
3070 /* Find a resumed LWP, if any. */
3071 if (find_thread (status_pending_p_any) != NULL)
3072 any_resumed = 1;
3073 else if (find_thread (not_stopped) != NULL)
3074 any_resumed = 1;
3075 else
3076 any_resumed = 0;
3077
3078 if (step_over_bkpt == null_ptid)
3079 pid = wait_for_event (ptid, &w, options);
3080 else
3081 {
3082 if (debug_threads)
3083 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3084 target_pid_to_str (step_over_bkpt));
3085 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3086 }
3087
3088 if (pid == 0 || (pid == -1 && !any_resumed))
3089 {
3090 gdb_assert (target_options & TARGET_WNOHANG);
3091
3092 if (debug_threads)
3093 {
3094 debug_printf ("wait_1 ret = null_ptid, "
3095 "TARGET_WAITKIND_IGNORE\n");
3096 debug_exit ();
3097 }
3098
3099 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3100 return null_ptid;
3101 }
3102 else if (pid == -1)
3103 {
3104 if (debug_threads)
3105 {
3106 debug_printf ("wait_1 ret = null_ptid, "
3107 "TARGET_WAITKIND_NO_RESUMED\n");
3108 debug_exit ();
3109 }
3110
3111 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3112 return null_ptid;
3113 }
3114
3115 event_child = get_thread_lwp (current_thread);
3116
3117 /* wait_for_event only returns an exit status for the last
3118 child of a process. Report it. */
3119 if (WIFEXITED (w) || WIFSIGNALED (w))
3120 {
3121 if (WIFEXITED (w))
3122 {
3123 ourstatus->kind = TARGET_WAITKIND_EXITED;
3124 ourstatus->value.integer = WEXITSTATUS (w);
3125
3126 if (debug_threads)
3127 {
3128 debug_printf ("wait_1 ret = %s, exited with "
3129 "retcode %d\n",
3130 target_pid_to_str (ptid_of (current_thread)),
3131 WEXITSTATUS (w));
3132 debug_exit ();
3133 }
3134 }
3135 else
3136 {
3137 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3138 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3139
3140 if (debug_threads)
3141 {
3142 debug_printf ("wait_1 ret = %s, terminated with "
3143 "signal %d\n",
3144 target_pid_to_str (ptid_of (current_thread)),
3145 WTERMSIG (w));
3146 debug_exit ();
3147 }
3148 }
3149
3150 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3151 return filter_exit_event (event_child, ourstatus);
3152
3153 return ptid_of (current_thread);
3154 }
3155
3156 /* If step-over executes a breakpoint instruction, in the case of a
3157 hardware single step it means a gdb/gdbserver breakpoint had been
3158 planted on top of a permanent breakpoint, in the case of a software
3159 single step it may just mean that gdbserver hit the reinsert breakpoint.
3160 The PC has been adjusted by save_stop_reason to point at
3161 the breakpoint address.
3162 So in the case of the hardware single step advance the PC manually
3163 past the breakpoint and in the case of software single step advance only
3164 if it's not the single_step_breakpoint we are hitting.
3165 This avoids that a program would keep trapping a permanent breakpoint
3166 forever. */
3167 if (step_over_bkpt != null_ptid
3168 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3169 && (event_child->stepping
3170 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3171 {
3172 int increment_pc = 0;
3173 int breakpoint_kind = 0;
3174 CORE_ADDR stop_pc = event_child->stop_pc;
3175
3176 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3177 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3178
3179 if (debug_threads)
3180 {
3181 debug_printf ("step-over for %s executed software breakpoint\n",
3182 target_pid_to_str (ptid_of (current_thread)));
3183 }
3184
3185 if (increment_pc != 0)
3186 {
3187 struct regcache *regcache
3188 = get_thread_regcache (current_thread, 1);
3189
3190 event_child->stop_pc += increment_pc;
3191 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3192
3193 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3194 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3195 }
3196 }
3197
3198 /* If this event was not handled before, and is not a SIGTRAP, we
3199 report it. SIGILL and SIGSEGV are also treated as traps in case
3200 a breakpoint is inserted at the current PC. If this target does
3201 not support internal breakpoints at all, we also report the
3202 SIGTRAP without further processing; it's of no concern to us. */
3203 maybe_internal_trap
3204 = (supports_breakpoints ()
3205 && (WSTOPSIG (w) == SIGTRAP
3206 || ((WSTOPSIG (w) == SIGILL
3207 || WSTOPSIG (w) == SIGSEGV)
3208 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3209
3210 if (maybe_internal_trap)
3211 {
3212 /* Handle anything that requires bookkeeping before deciding to
3213 report the event or continue waiting. */
3214
3215 /* First check if we can explain the SIGTRAP with an internal
3216 breakpoint, or if we should possibly report the event to GDB.
3217 Do this before anything that may remove or insert a
3218 breakpoint. */
3219 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3220
3221 /* We have a SIGTRAP, possibly a step-over dance has just
3222 finished. If so, tweak the state machine accordingly,
3223 reinsert breakpoints and delete any single-step
3224 breakpoints. */
3225 step_over_finished = finish_step_over (event_child);
3226
3227 /* Now invoke the callbacks of any internal breakpoints there. */
3228 check_breakpoints (event_child->stop_pc);
3229
3230 /* Handle tracepoint data collecting. This may overflow the
3231 trace buffer, and cause a tracing stop, removing
3232 breakpoints. */
3233 trace_event = handle_tracepoints (event_child);
3234
3235 if (bp_explains_trap)
3236 {
3237 if (debug_threads)
3238 debug_printf ("Hit a gdbserver breakpoint.\n");
3239 }
3240 }
3241 else
3242 {
3243 /* We have some other signal, possibly a step-over dance was in
3244 progress, and it should be cancelled too. */
3245 step_over_finished = finish_step_over (event_child);
3246 }
3247
3248 /* We have all the data we need. Either report the event to GDB, or
3249 resume threads and keep waiting for more. */
3250
3251 /* If we're collecting a fast tracepoint, finish the collection and
3252 move out of the jump pad before delivering a signal. See
3253 linux_stabilize_threads. */
3254
3255 if (WIFSTOPPED (w)
3256 && WSTOPSIG (w) != SIGTRAP
3257 && supports_fast_tracepoints ()
3258 && agent_loaded_p ())
3259 {
3260 if (debug_threads)
3261 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3262 "to defer or adjust it.\n",
3263 WSTOPSIG (w), lwpid_of (current_thread));
3264
3265 /* Allow debugging the jump pad itself. */
3266 if (current_thread->last_resume_kind != resume_step
3267 && maybe_move_out_of_jump_pad (event_child, &w))
3268 {
3269 enqueue_one_deferred_signal (event_child, &w);
3270
3271 if (debug_threads)
3272 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3273 WSTOPSIG (w), lwpid_of (current_thread));
3274
3275 linux_resume_one_lwp (event_child, 0, 0, NULL);
3276
3277 if (debug_threads)
3278 debug_exit ();
3279 return ignore_event (ourstatus);
3280 }
3281 }
3282
3283 if (event_child->collecting_fast_tracepoint
3284 != fast_tpoint_collect_result::not_collecting)
3285 {
3286 if (debug_threads)
3287 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3288 "Check if we're already there.\n",
3289 lwpid_of (current_thread),
3290 (int) event_child->collecting_fast_tracepoint);
3291
3292 trace_event = 1;
3293
3294 event_child->collecting_fast_tracepoint
3295 = linux_fast_tracepoint_collecting (event_child, NULL);
3296
3297 if (event_child->collecting_fast_tracepoint
3298 != fast_tpoint_collect_result::before_insn)
3299 {
3300 /* No longer need this breakpoint. */
3301 if (event_child->exit_jump_pad_bkpt != NULL)
3302 {
3303 if (debug_threads)
3304 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3305 "stopping all threads momentarily.\n");
3306
3307 /* Other running threads could hit this breakpoint.
3308 We don't handle moribund locations like GDB does,
3309 instead we always pause all threads when removing
3310 breakpoints, so that any step-over or
3311 decr_pc_after_break adjustment is always taken
3312 care of while the breakpoint is still
3313 inserted. */
3314 stop_all_lwps (1, event_child);
3315
3316 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3317 event_child->exit_jump_pad_bkpt = NULL;
3318
3319 unstop_all_lwps (1, event_child);
3320
3321 gdb_assert (event_child->suspended >= 0);
3322 }
3323 }
3324
3325 if (event_child->collecting_fast_tracepoint
3326 == fast_tpoint_collect_result::not_collecting)
3327 {
3328 if (debug_threads)
3329 debug_printf ("fast tracepoint finished "
3330 "collecting successfully.\n");
3331
3332 /* We may have a deferred signal to report. */
3333 if (dequeue_one_deferred_signal (event_child, &w))
3334 {
3335 if (debug_threads)
3336 debug_printf ("dequeued one signal.\n");
3337 }
3338 else
3339 {
3340 if (debug_threads)
3341 debug_printf ("no deferred signals.\n");
3342
3343 if (stabilizing_threads)
3344 {
3345 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3346 ourstatus->value.sig = GDB_SIGNAL_0;
3347
3348 if (debug_threads)
3349 {
3350 debug_printf ("wait_1 ret = %s, stopped "
3351 "while stabilizing threads\n",
3352 target_pid_to_str (ptid_of (current_thread)));
3353 debug_exit ();
3354 }
3355
3356 return ptid_of (current_thread);
3357 }
3358 }
3359 }
3360 }
3361
3362 /* Check whether GDB would be interested in this event. */
3363
3364 /* Check if GDB is interested in this syscall. */
3365 if (WIFSTOPPED (w)
3366 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3367 && !gdb_catch_this_syscall_p (event_child))
3368 {
3369 if (debug_threads)
3370 {
3371 debug_printf ("Ignored syscall for LWP %ld.\n",
3372 lwpid_of (current_thread));
3373 }
3374
3375 linux_resume_one_lwp (event_child, event_child->stepping,
3376 0, NULL);
3377
3378 if (debug_threads)
3379 debug_exit ();
3380 return ignore_event (ourstatus);
3381 }
3382
3383 /* If GDB is not interested in this signal, don't stop other
3384 threads, and don't report it to GDB. Just resume the inferior
3385 right away. We do this for threading-related signals as well as
3386 any that GDB specifically requested we ignore. But never ignore
3387 SIGSTOP if we sent it ourselves, and do not ignore signals when
3388 stepping - they may require special handling to skip the signal
3389 handler. Also never ignore signals that could be caused by a
3390 breakpoint. */
3391 if (WIFSTOPPED (w)
3392 && current_thread->last_resume_kind != resume_step
3393 && (
3394 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3395 (current_process ()->priv->thread_db != NULL
3396 && (WSTOPSIG (w) == __SIGRTMIN
3397 || WSTOPSIG (w) == __SIGRTMIN + 1))
3398 ||
3399 #endif
3400 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3401 && !(WSTOPSIG (w) == SIGSTOP
3402 && current_thread->last_resume_kind == resume_stop)
3403 && !linux_wstatus_maybe_breakpoint (w))))
3404 {
3405 siginfo_t info, *info_p;
3406
3407 if (debug_threads)
3408 debug_printf ("Ignored signal %d for LWP %ld.\n",
3409 WSTOPSIG (w), lwpid_of (current_thread));
3410
3411 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3412 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3413 info_p = &info;
3414 else
3415 info_p = NULL;
3416
3417 if (step_over_finished)
3418 {
3419 /* We cancelled this thread's step-over above. We still
3420 need to unsuspend all other LWPs, and set them back
3421 running again while the signal handler runs. */
3422 unsuspend_all_lwps (event_child);
3423
3424 /* Enqueue the pending signal info so that proceed_all_lwps
3425 doesn't lose it. */
3426 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3427
3428 proceed_all_lwps ();
3429 }
3430 else
3431 {
3432 linux_resume_one_lwp (event_child, event_child->stepping,
3433 WSTOPSIG (w), info_p);
3434 }
3435
3436 if (debug_threads)
3437 debug_exit ();
3438
3439 return ignore_event (ourstatus);
3440 }
3441
3442 /* Note that all addresses are always "out of the step range" when
3443 there's no range to begin with. */
3444 in_step_range = lwp_in_step_range (event_child);
3445
3446 /* If GDB wanted this thread to single step, and the thread is out
3447 of the step range, we always want to report the SIGTRAP, and let
3448 GDB handle it. Watchpoints should always be reported. So should
3449 signals we can't explain. A SIGTRAP we can't explain could be a
3450 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3451 do, we're be able to handle GDB breakpoints on top of internal
3452 breakpoints, by handling the internal breakpoint and still
3453 reporting the event to GDB. If we don't, we're out of luck, GDB
3454 won't see the breakpoint hit. If we see a single-step event but
3455 the thread should be continuing, don't pass the trap to gdb.
3456 That indicates that we had previously finished a single-step but
3457 left the single-step pending -- see
3458 complete_ongoing_step_over. */
3459 report_to_gdb = (!maybe_internal_trap
3460 || (current_thread->last_resume_kind == resume_step
3461 && !in_step_range)
3462 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3463 || (!in_step_range
3464 && !bp_explains_trap
3465 && !trace_event
3466 && !step_over_finished
3467 && !(current_thread->last_resume_kind == resume_continue
3468 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3469 || (gdb_breakpoint_here (event_child->stop_pc)
3470 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3471 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3472 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3473
3474 run_breakpoint_commands (event_child->stop_pc);
3475
3476 /* We found no reason GDB would want us to stop. We either hit one
3477 of our own breakpoints, or finished an internal step GDB
3478 shouldn't know about. */
3479 if (!report_to_gdb)
3480 {
3481 if (debug_threads)
3482 {
3483 if (bp_explains_trap)
3484 debug_printf ("Hit a gdbserver breakpoint.\n");
3485 if (step_over_finished)
3486 debug_printf ("Step-over finished.\n");
3487 if (trace_event)
3488 debug_printf ("Tracepoint event.\n");
3489 if (lwp_in_step_range (event_child))
3490 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3491 paddress (event_child->stop_pc),
3492 paddress (event_child->step_range_start),
3493 paddress (event_child->step_range_end));
3494 }
3495
3496 /* We're not reporting this breakpoint to GDB, so apply the
3497 decr_pc_after_break adjustment to the inferior's regcache
3498 ourselves. */
3499
3500 if (the_low_target.set_pc != NULL)
3501 {
3502 struct regcache *regcache
3503 = get_thread_regcache (current_thread, 1);
3504 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3505 }
3506
3507 if (step_over_finished)
3508 {
3509 /* If we have finished stepping over a breakpoint, we've
3510 stopped and suspended all LWPs momentarily except the
3511 stepping one. This is where we resume them all again.
3512 We're going to keep waiting, so use proceed, which
3513 handles stepping over the next breakpoint. */
3514 unsuspend_all_lwps (event_child);
3515 }
3516 else
3517 {
3518 /* Remove the single-step breakpoints if any. Note that
3519 there isn't single-step breakpoint if we finished stepping
3520 over. */
3521 if (can_software_single_step ()
3522 && has_single_step_breakpoints (current_thread))
3523 {
3524 stop_all_lwps (0, event_child);
3525 delete_single_step_breakpoints (current_thread);
3526 unstop_all_lwps (0, event_child);
3527 }
3528 }
3529
3530 if (debug_threads)
3531 debug_printf ("proceeding all threads.\n");
3532 proceed_all_lwps ();
3533
3534 if (debug_threads)
3535 debug_exit ();
3536
3537 return ignore_event (ourstatus);
3538 }
3539
3540 if (debug_threads)
3541 {
3542 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3543 {
3544 std::string str
3545 = target_waitstatus_to_string (&event_child->waitstatus);
3546
3547 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3548 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3549 }
3550 if (current_thread->last_resume_kind == resume_step)
3551 {
3552 if (event_child->step_range_start == event_child->step_range_end)
3553 debug_printf ("GDB wanted to single-step, reporting event.\n");
3554 else if (!lwp_in_step_range (event_child))
3555 debug_printf ("Out of step range, reporting event.\n");
3556 }
3557 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3558 debug_printf ("Stopped by watchpoint.\n");
3559 else if (gdb_breakpoint_here (event_child->stop_pc))
3560 debug_printf ("Stopped by GDB breakpoint.\n");
3561 if (debug_threads)
3562 debug_printf ("Hit a non-gdbserver trap event.\n");
3563 }
3564
3565 /* Alright, we're going to report a stop. */
3566
3567 /* Remove single-step breakpoints. */
3568 if (can_software_single_step ())
3569 {
3570 /* Remove single-step breakpoints or not. It it is true, stop all
3571 lwps, so that other threads won't hit the breakpoint in the
3572 staled memory. */
3573 int remove_single_step_breakpoints_p = 0;
3574
3575 if (non_stop)
3576 {
3577 remove_single_step_breakpoints_p
3578 = has_single_step_breakpoints (current_thread);
3579 }
3580 else
3581 {
3582 /* In all-stop, a stop reply cancels all previous resume
3583 requests. Delete all single-step breakpoints. */
3584
3585 find_thread ([&] (thread_info *thread) {
3586 if (has_single_step_breakpoints (thread))
3587 {
3588 remove_single_step_breakpoints_p = 1;
3589 return true;
3590 }
3591
3592 return false;
3593 });
3594 }
3595
3596 if (remove_single_step_breakpoints_p)
3597 {
3598 /* If we remove single-step breakpoints from memory, stop all lwps,
3599 so that other threads won't hit the breakpoint in the staled
3600 memory. */
3601 stop_all_lwps (0, event_child);
3602
3603 if (non_stop)
3604 {
3605 gdb_assert (has_single_step_breakpoints (current_thread));
3606 delete_single_step_breakpoints (current_thread);
3607 }
3608 else
3609 {
3610 for_each_thread ([] (thread_info *thread){
3611 if (has_single_step_breakpoints (thread))
3612 delete_single_step_breakpoints (thread);
3613 });
3614 }
3615
3616 unstop_all_lwps (0, event_child);
3617 }
3618 }
3619
3620 if (!stabilizing_threads)
3621 {
3622 /* In all-stop, stop all threads. */
3623 if (!non_stop)
3624 stop_all_lwps (0, NULL);
3625
3626 if (step_over_finished)
3627 {
3628 if (!non_stop)
3629 {
3630 /* If we were doing a step-over, all other threads but
3631 the stepping one had been paused in start_step_over,
3632 with their suspend counts incremented. We don't want
3633 to do a full unstop/unpause, because we're in
3634 all-stop mode (so we want threads stopped), but we
3635 still need to unsuspend the other threads, to
3636 decrement their `suspended' count back. */
3637 unsuspend_all_lwps (event_child);
3638 }
3639 else
3640 {
3641 /* If we just finished a step-over, then all threads had
3642 been momentarily paused. In all-stop, that's fine,
3643 we want threads stopped by now anyway. In non-stop,
3644 we need to re-resume threads that GDB wanted to be
3645 running. */
3646 unstop_all_lwps (1, event_child);
3647 }
3648 }
3649
3650 /* If we're not waiting for a specific LWP, choose an event LWP
3651 from among those that have had events. Giving equal priority
3652 to all LWPs that have had events helps prevent
3653 starvation. */
3654 if (ptid == minus_one_ptid)
3655 {
3656 event_child->status_pending_p = 1;
3657 event_child->status_pending = w;
3658
3659 select_event_lwp (&event_child);
3660
3661 /* current_thread and event_child must stay in sync. */
3662 current_thread = get_lwp_thread (event_child);
3663
3664 event_child->status_pending_p = 0;
3665 w = event_child->status_pending;
3666 }
3667
3668
3669 /* Stabilize threads (move out of jump pads). */
3670 if (!non_stop)
3671 target_stabilize_threads ();
3672 }
3673 else
3674 {
3675 /* If we just finished a step-over, then all threads had been
3676 momentarily paused. In all-stop, that's fine, we want
3677 threads stopped by now anyway. In non-stop, we need to
3678 re-resume threads that GDB wanted to be running. */
3679 if (step_over_finished)
3680 unstop_all_lwps (1, event_child);
3681 }
3682
3683 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3684 {
3685 /* If the reported event is an exit, fork, vfork or exec, let
3686 GDB know. */
3687
3688 /* Break the unreported fork relationship chain. */
3689 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3690 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3691 {
3692 event_child->fork_relative->fork_relative = NULL;
3693 event_child->fork_relative = NULL;
3694 }
3695
3696 *ourstatus = event_child->waitstatus;
3697 /* Clear the event lwp's waitstatus since we handled it already. */
3698 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3699 }
3700 else
3701 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3702
3703 /* Now that we've selected our final event LWP, un-adjust its PC if
3704 it was a software breakpoint, and the client doesn't know we can
3705 adjust the breakpoint ourselves. */
3706 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3707 && !cs.swbreak_feature)
3708 {
3709 int decr_pc = the_low_target.decr_pc_after_break;
3710
3711 if (decr_pc != 0)
3712 {
3713 struct regcache *regcache
3714 = get_thread_regcache (current_thread, 1);
3715 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3716 }
3717 }
3718
3719 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3720 {
3721 get_syscall_trapinfo (event_child,
3722 &ourstatus->value.syscall_number);
3723 ourstatus->kind = event_child->syscall_state;
3724 }
3725 else if (current_thread->last_resume_kind == resume_stop
3726 && WSTOPSIG (w) == SIGSTOP)
3727 {
3728 /* A thread that has been requested to stop by GDB with vCont;t,
3729 and it stopped cleanly, so report as SIG0. The use of
3730 SIGSTOP is an implementation detail. */
3731 ourstatus->value.sig = GDB_SIGNAL_0;
3732 }
3733 else if (current_thread->last_resume_kind == resume_stop
3734 && WSTOPSIG (w) != SIGSTOP)
3735 {
3736 /* A thread that has been requested to stop by GDB with vCont;t,
3737 but, it stopped for other reasons. */
3738 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3739 }
3740 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3741 {
3742 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3743 }
3744
3745 gdb_assert (step_over_bkpt == null_ptid);
3746
3747 if (debug_threads)
3748 {
3749 debug_printf ("wait_1 ret = %s, %d, %d\n",
3750 target_pid_to_str (ptid_of (current_thread)),
3751 ourstatus->kind, ourstatus->value.sig);
3752 debug_exit ();
3753 }
3754
3755 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3756 return filter_exit_event (event_child, ourstatus);
3757
3758 return ptid_of (current_thread);
3759 }
3760
3761 /* Get rid of any pending event in the pipe. */
3762 static void
3763 async_file_flush (void)
3764 {
3765 int ret;
3766 char buf;
3767
3768 do
3769 ret = read (linux_event_pipe[0], &buf, 1);
3770 while (ret >= 0 || (ret == -1 && errno == EINTR));
3771 }
3772
3773 /* Put something in the pipe, so the event loop wakes up. */
3774 static void
3775 async_file_mark (void)
3776 {
3777 int ret;
3778
3779 async_file_flush ();
3780
3781 do
3782 ret = write (linux_event_pipe[1], "+", 1);
3783 while (ret == 0 || (ret == -1 && errno == EINTR));
3784
3785 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3786 be awakened anyway. */
3787 }
3788
3789 ptid_t
3790 linux_process_target::wait (ptid_t ptid,
3791 target_waitstatus *ourstatus,
3792 int target_options)
3793 {
3794 ptid_t event_ptid;
3795
3796 /* Flush the async file first. */
3797 if (target_is_async_p ())
3798 async_file_flush ();
3799
3800 do
3801 {
3802 event_ptid = wait_1 (ptid, ourstatus, target_options);
3803 }
3804 while ((target_options & TARGET_WNOHANG) == 0
3805 && event_ptid == null_ptid
3806 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3807
3808 /* If at least one stop was reported, there may be more. A single
3809 SIGCHLD can signal more than one child stop. */
3810 if (target_is_async_p ()
3811 && (target_options & TARGET_WNOHANG) != 0
3812 && event_ptid != null_ptid)
3813 async_file_mark ();
3814
3815 return event_ptid;
3816 }
3817
3818 /* Send a signal to an LWP. */
3819
3820 static int
3821 kill_lwp (unsigned long lwpid, int signo)
3822 {
3823 int ret;
3824
3825 errno = 0;
3826 ret = syscall (__NR_tkill, lwpid, signo);
3827 if (errno == ENOSYS)
3828 {
3829 /* If tkill fails, then we are not using nptl threads, a
3830 configuration we no longer support. */
3831 perror_with_name (("tkill"));
3832 }
3833 return ret;
3834 }
3835
3836 void
3837 linux_stop_lwp (struct lwp_info *lwp)
3838 {
3839 send_sigstop (lwp);
3840 }
3841
3842 static void
3843 send_sigstop (struct lwp_info *lwp)
3844 {
3845 int pid;
3846
3847 pid = lwpid_of (get_lwp_thread (lwp));
3848
3849 /* If we already have a pending stop signal for this process, don't
3850 send another. */
3851 if (lwp->stop_expected)
3852 {
3853 if (debug_threads)
3854 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3855
3856 return;
3857 }
3858
3859 if (debug_threads)
3860 debug_printf ("Sending sigstop to lwp %d\n", pid);
3861
3862 lwp->stop_expected = 1;
3863 kill_lwp (pid, SIGSTOP);
3864 }
3865
3866 static void
3867 send_sigstop (thread_info *thread, lwp_info *except)
3868 {
3869 struct lwp_info *lwp = get_thread_lwp (thread);
3870
3871 /* Ignore EXCEPT. */
3872 if (lwp == except)
3873 return;
3874
3875 if (lwp->stopped)
3876 return;
3877
3878 send_sigstop (lwp);
3879 }
3880
3881 /* Increment the suspend count of an LWP, and stop it, if not stopped
3882 yet. */
3883 static void
3884 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3885 {
3886 struct lwp_info *lwp = get_thread_lwp (thread);
3887
3888 /* Ignore EXCEPT. */
3889 if (lwp == except)
3890 return;
3891
3892 lwp_suspended_inc (lwp);
3893
3894 send_sigstop (thread, except);
3895 }
3896
3897 static void
3898 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3899 {
3900 /* Store the exit status for later. */
3901 lwp->status_pending_p = 1;
3902 lwp->status_pending = wstat;
3903
3904 /* Store in waitstatus as well, as there's nothing else to process
3905 for this event. */
3906 if (WIFEXITED (wstat))
3907 {
3908 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3909 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3910 }
3911 else if (WIFSIGNALED (wstat))
3912 {
3913 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3914 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3915 }
3916
3917 /* Prevent trying to stop it. */
3918 lwp->stopped = 1;
3919
3920 /* No further stops are expected from a dead lwp. */
3921 lwp->stop_expected = 0;
3922 }
3923
3924 /* Return true if LWP has exited already, and has a pending exit event
3925 to report to GDB. */
3926
3927 static int
3928 lwp_is_marked_dead (struct lwp_info *lwp)
3929 {
3930 return (lwp->status_pending_p
3931 && (WIFEXITED (lwp->status_pending)
3932 || WIFSIGNALED (lwp->status_pending)));
3933 }
3934
3935 void
3936 linux_process_target::wait_for_sigstop ()
3937 {
3938 struct thread_info *saved_thread;
3939 ptid_t saved_tid;
3940 int wstat;
3941 int ret;
3942
3943 saved_thread = current_thread;
3944 if (saved_thread != NULL)
3945 saved_tid = saved_thread->id;
3946 else
3947 saved_tid = null_ptid; /* avoid bogus unused warning */
3948
3949 if (debug_threads)
3950 debug_printf ("wait_for_sigstop: pulling events\n");
3951
3952 /* Passing NULL_PTID as filter indicates we want all events to be
3953 left pending. Eventually this returns when there are no
3954 unwaited-for children left. */
3955 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3956 gdb_assert (ret == -1);
3957
3958 if (saved_thread == NULL || mythread_alive (saved_tid))
3959 current_thread = saved_thread;
3960 else
3961 {
3962 if (debug_threads)
3963 debug_printf ("Previously current thread died.\n");
3964
3965 /* We can't change the current inferior behind GDB's back,
3966 otherwise, a subsequent command may apply to the wrong
3967 process. */
3968 current_thread = NULL;
3969 }
3970 }
3971
3972 /* Returns true if THREAD is stopped in a jump pad, and we can't
3973 move it out, because we need to report the stop event to GDB. For
3974 example, if the user puts a breakpoint in the jump pad, it's
3975 because she wants to debug it. */
3976
3977 static bool
3978 stuck_in_jump_pad_callback (thread_info *thread)
3979 {
3980 struct lwp_info *lwp = get_thread_lwp (thread);
3981
3982 if (lwp->suspended != 0)
3983 {
3984 internal_error (__FILE__, __LINE__,
3985 "LWP %ld is suspended, suspended=%d\n",
3986 lwpid_of (thread), lwp->suspended);
3987 }
3988 gdb_assert (lwp->stopped);
3989
3990 /* Allow debugging the jump pad, gdb_collect, etc.. */
3991 return (supports_fast_tracepoints ()
3992 && agent_loaded_p ()
3993 && (gdb_breakpoint_here (lwp->stop_pc)
3994 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3995 || thread->last_resume_kind == resume_step)
3996 && (linux_fast_tracepoint_collecting (lwp, NULL)
3997 != fast_tpoint_collect_result::not_collecting));
3998 }
3999
4000 void
4001 linux_process_target::move_out_of_jump_pad (thread_info *thread)
4002 {
4003 struct thread_info *saved_thread;
4004 struct lwp_info *lwp = get_thread_lwp (thread);
4005 int *wstat;
4006
4007 if (lwp->suspended != 0)
4008 {
4009 internal_error (__FILE__, __LINE__,
4010 "LWP %ld is suspended, suspended=%d\n",
4011 lwpid_of (thread), lwp->suspended);
4012 }
4013 gdb_assert (lwp->stopped);
4014
4015 /* For gdb_breakpoint_here. */
4016 saved_thread = current_thread;
4017 current_thread = thread;
4018
4019 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4020
4021 /* Allow debugging the jump pad, gdb_collect, etc. */
4022 if (!gdb_breakpoint_here (lwp->stop_pc)
4023 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4024 && thread->last_resume_kind != resume_step
4025 && maybe_move_out_of_jump_pad (lwp, wstat))
4026 {
4027 if (debug_threads)
4028 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4029 lwpid_of (thread));
4030
4031 if (wstat)
4032 {
4033 lwp->status_pending_p = 0;
4034 enqueue_one_deferred_signal (lwp, wstat);
4035
4036 if (debug_threads)
4037 debug_printf ("Signal %d for LWP %ld deferred "
4038 "(in jump pad)\n",
4039 WSTOPSIG (*wstat), lwpid_of (thread));
4040 }
4041
4042 linux_resume_one_lwp (lwp, 0, 0, NULL);
4043 }
4044 else
4045 lwp_suspended_inc (lwp);
4046
4047 current_thread = saved_thread;
4048 }
4049
4050 static bool
4051 lwp_running (thread_info *thread)
4052 {
4053 struct lwp_info *lwp = get_thread_lwp (thread);
4054
4055 if (lwp_is_marked_dead (lwp))
4056 return false;
4057
4058 return !lwp->stopped;
4059 }
4060
4061 void
4062 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4063 {
4064 /* Should not be called recursively. */
4065 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4066
4067 if (debug_threads)
4068 {
4069 debug_enter ();
4070 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4071 suspend ? "stop-and-suspend" : "stop",
4072 except != NULL
4073 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4074 : "none");
4075 }
4076
4077 stopping_threads = (suspend
4078 ? STOPPING_AND_SUSPENDING_THREADS
4079 : STOPPING_THREADS);
4080
4081 if (suspend)
4082 for_each_thread ([&] (thread_info *thread)
4083 {
4084 suspend_and_send_sigstop (thread, except);
4085 });
4086 else
4087 for_each_thread ([&] (thread_info *thread)
4088 {
4089 send_sigstop (thread, except);
4090 });
4091
4092 wait_for_sigstop ();
4093 stopping_threads = NOT_STOPPING_THREADS;
4094
4095 if (debug_threads)
4096 {
4097 debug_printf ("stop_all_lwps done, setting stopping_threads "
4098 "back to !stopping\n");
4099 debug_exit ();
4100 }
4101 }
4102
4103 /* Enqueue one signal in the chain of signals which need to be
4104 delivered to this process on next resume. */
4105
4106 static void
4107 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4108 {
4109 struct pending_signals *p_sig = XNEW (struct pending_signals);
4110
4111 p_sig->prev = lwp->pending_signals;
4112 p_sig->signal = signal;
4113 if (info == NULL)
4114 memset (&p_sig->info, 0, sizeof (siginfo_t));
4115 else
4116 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4117 lwp->pending_signals = p_sig;
4118 }
4119
4120 /* Install breakpoints for software single stepping. */
4121
4122 static void
4123 install_software_single_step_breakpoints (struct lwp_info *lwp)
4124 {
4125 struct thread_info *thread = get_lwp_thread (lwp);
4126 struct regcache *regcache = get_thread_regcache (thread, 1);
4127
4128 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4129
4130 current_thread = thread;
4131 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4132
4133 for (CORE_ADDR pc : next_pcs)
4134 set_single_step_breakpoint (pc, current_ptid);
4135 }
4136
4137 /* Single step via hardware or software single step.
4138 Return 1 if hardware single stepping, 0 if software single stepping
4139 or can't single step. */
4140
4141 static int
4142 single_step (struct lwp_info* lwp)
4143 {
4144 int step = 0;
4145
4146 if (can_hardware_single_step ())
4147 {
4148 step = 1;
4149 }
4150 else if (can_software_single_step ())
4151 {
4152 install_software_single_step_breakpoints (lwp);
4153 step = 0;
4154 }
4155 else
4156 {
4157 if (debug_threads)
4158 debug_printf ("stepping is not implemented on this target");
4159 }
4160
4161 return step;
4162 }
4163
4164 /* The signal can be delivered to the inferior if we are not trying to
4165 finish a fast tracepoint collect. Since signal can be delivered in
4166 the step-over, the program may go to signal handler and trap again
4167 after return from the signal handler. We can live with the spurious
4168 double traps. */
4169
4170 static int
4171 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4172 {
4173 return (lwp->collecting_fast_tracepoint
4174 == fast_tpoint_collect_result::not_collecting);
4175 }
4176
4177 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4178 SIGNAL is nonzero, give it that signal. */
4179
4180 static void
4181 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4182 int step, int signal, siginfo_t *info)
4183 {
4184 struct thread_info *thread = get_lwp_thread (lwp);
4185 struct thread_info *saved_thread;
4186 int ptrace_request;
4187 struct process_info *proc = get_thread_process (thread);
4188
4189 /* Note that target description may not be initialised
4190 (proc->tdesc == NULL) at this point because the program hasn't
4191 stopped at the first instruction yet. It means GDBserver skips
4192 the extra traps from the wrapper program (see option --wrapper).
4193 Code in this function that requires register access should be
4194 guarded by proc->tdesc == NULL or something else. */
4195
4196 if (lwp->stopped == 0)
4197 return;
4198
4199 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4200
4201 fast_tpoint_collect_result fast_tp_collecting
4202 = lwp->collecting_fast_tracepoint;
4203
4204 gdb_assert (!stabilizing_threads
4205 || (fast_tp_collecting
4206 != fast_tpoint_collect_result::not_collecting));
4207
4208 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4209 user used the "jump" command, or "set $pc = foo"). */
4210 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4211 {
4212 /* Collecting 'while-stepping' actions doesn't make sense
4213 anymore. */
4214 release_while_stepping_state_list (thread);
4215 }
4216
4217 /* If we have pending signals or status, and a new signal, enqueue the
4218 signal. Also enqueue the signal if it can't be delivered to the
4219 inferior right now. */
4220 if (signal != 0
4221 && (lwp->status_pending_p
4222 || lwp->pending_signals != NULL
4223 || !lwp_signal_can_be_delivered (lwp)))
4224 {
4225 enqueue_pending_signal (lwp, signal, info);
4226
4227 /* Postpone any pending signal. It was enqueued above. */
4228 signal = 0;
4229 }
4230
4231 if (lwp->status_pending_p)
4232 {
4233 if (debug_threads)
4234 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4235 " has pending status\n",
4236 lwpid_of (thread), step ? "step" : "continue",
4237 lwp->stop_expected ? "expected" : "not expected");
4238 return;
4239 }
4240
4241 saved_thread = current_thread;
4242 current_thread = thread;
4243
4244 /* This bit needs some thinking about. If we get a signal that
4245 we must report while a single-step reinsert is still pending,
4246 we often end up resuming the thread. It might be better to
4247 (ew) allow a stack of pending events; then we could be sure that
4248 the reinsert happened right away and not lose any signals.
4249
4250 Making this stack would also shrink the window in which breakpoints are
4251 uninserted (see comment in linux_wait_for_lwp) but not enough for
4252 complete correctness, so it won't solve that problem. It may be
4253 worthwhile just to solve this one, however. */
4254 if (lwp->bp_reinsert != 0)
4255 {
4256 if (debug_threads)
4257 debug_printf (" pending reinsert at 0x%s\n",
4258 paddress (lwp->bp_reinsert));
4259
4260 if (can_hardware_single_step ())
4261 {
4262 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4263 {
4264 if (step == 0)
4265 warning ("BAD - reinserting but not stepping.");
4266 if (lwp->suspended)
4267 warning ("BAD - reinserting and suspended(%d).",
4268 lwp->suspended);
4269 }
4270 }
4271
4272 step = maybe_hw_step (thread);
4273 }
4274
4275 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4276 {
4277 if (debug_threads)
4278 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4279 " (exit-jump-pad-bkpt)\n",
4280 lwpid_of (thread));
4281 }
4282 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4283 {
4284 if (debug_threads)
4285 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4286 " single-stepping\n",
4287 lwpid_of (thread));
4288
4289 if (can_hardware_single_step ())
4290 step = 1;
4291 else
4292 {
4293 internal_error (__FILE__, __LINE__,
4294 "moving out of jump pad single-stepping"
4295 " not implemented on this target");
4296 }
4297 }
4298
4299 /* If we have while-stepping actions in this thread set it stepping.
4300 If we have a signal to deliver, it may or may not be set to
4301 SIG_IGN, we don't know. Assume so, and allow collecting
4302 while-stepping into a signal handler. A possible smart thing to
4303 do would be to set an internal breakpoint at the signal return
4304 address, continue, and carry on catching this while-stepping
4305 action only when that breakpoint is hit. A future
4306 enhancement. */
4307 if (thread->while_stepping != NULL)
4308 {
4309 if (debug_threads)
4310 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4311 lwpid_of (thread));
4312
4313 step = single_step (lwp);
4314 }
4315
4316 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4317 {
4318 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4319
4320 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4321
4322 if (debug_threads)
4323 {
4324 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4325 (long) lwp->stop_pc);
4326 }
4327 }
4328
4329 /* If we have pending signals, consume one if it can be delivered to
4330 the inferior. */
4331 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4332 {
4333 struct pending_signals **p_sig;
4334
4335 p_sig = &lwp->pending_signals;
4336 while ((*p_sig)->prev != NULL)
4337 p_sig = &(*p_sig)->prev;
4338
4339 signal = (*p_sig)->signal;
4340 if ((*p_sig)->info.si_signo != 0)
4341 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4342 &(*p_sig)->info);
4343
4344 free (*p_sig);
4345 *p_sig = NULL;
4346 }
4347
4348 if (debug_threads)
4349 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4350 lwpid_of (thread), step ? "step" : "continue", signal,
4351 lwp->stop_expected ? "expected" : "not expected");
4352
4353 if (the_low_target.prepare_to_resume != NULL)
4354 the_low_target.prepare_to_resume (lwp);
4355
4356 regcache_invalidate_thread (thread);
4357 errno = 0;
4358 lwp->stepping = step;
4359 if (step)
4360 ptrace_request = PTRACE_SINGLESTEP;
4361 else if (gdb_catching_syscalls_p (lwp))
4362 ptrace_request = PTRACE_SYSCALL;
4363 else
4364 ptrace_request = PTRACE_CONT;
4365 ptrace (ptrace_request,
4366 lwpid_of (thread),
4367 (PTRACE_TYPE_ARG3) 0,
4368 /* Coerce to a uintptr_t first to avoid potential gcc warning
4369 of coercing an 8 byte integer to a 4 byte pointer. */
4370 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4371
4372 current_thread = saved_thread;
4373 if (errno)
4374 perror_with_name ("resuming thread");
4375
4376 /* Successfully resumed. Clear state that no longer makes sense,
4377 and mark the LWP as running. Must not do this before resuming
4378 otherwise if that fails other code will be confused. E.g., we'd
4379 later try to stop the LWP and hang forever waiting for a stop
4380 status. Note that we must not throw after this is cleared,
4381 otherwise handle_zombie_lwp_error would get confused. */
4382 lwp->stopped = 0;
4383 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4384 }
4385
4386 /* Called when we try to resume a stopped LWP and that errors out. If
4387 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4388 or about to become), discard the error, clear any pending status
4389 the LWP may have, and return true (we'll collect the exit status
4390 soon enough). Otherwise, return false. */
4391
4392 static int
4393 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4394 {
4395 struct thread_info *thread = get_lwp_thread (lp);
4396
4397 /* If we get an error after resuming the LWP successfully, we'd
4398 confuse !T state for the LWP being gone. */
4399 gdb_assert (lp->stopped);
4400
4401 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4402 because even if ptrace failed with ESRCH, the tracee may be "not
4403 yet fully dead", but already refusing ptrace requests. In that
4404 case the tracee has 'R (Running)' state for a little bit
4405 (observed in Linux 3.18). See also the note on ESRCH in the
4406 ptrace(2) man page. Instead, check whether the LWP has any state
4407 other than ptrace-stopped. */
4408
4409 /* Don't assume anything if /proc/PID/status can't be read. */
4410 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4411 {
4412 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4413 lp->status_pending_p = 0;
4414 return 1;
4415 }
4416 return 0;
4417 }
4418
4419 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4420 disappears while we try to resume it. */
4421
4422 static void
4423 linux_resume_one_lwp (struct lwp_info *lwp,
4424 int step, int signal, siginfo_t *info)
4425 {
4426 try
4427 {
4428 linux_resume_one_lwp_throw (lwp, step, signal, info);
4429 }
4430 catch (const gdb_exception_error &ex)
4431 {
4432 if (!check_ptrace_stopped_lwp_gone (lwp))
4433 throw;
4434 }
4435 }
4436
4437 /* This function is called once per thread via for_each_thread.
4438 We look up which resume request applies to THREAD and mark it with a
4439 pointer to the appropriate resume request.
4440
4441 This algorithm is O(threads * resume elements), but resume elements
4442 is small (and will remain small at least until GDB supports thread
4443 suspension). */
4444
4445 static void
4446 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4447 {
4448 struct lwp_info *lwp = get_thread_lwp (thread);
4449
4450 for (int ndx = 0; ndx < n; ndx++)
4451 {
4452 ptid_t ptid = resume[ndx].thread;
4453 if (ptid == minus_one_ptid
4454 || ptid == thread->id
4455 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4456 of PID'. */
4457 || (ptid.pid () == pid_of (thread)
4458 && (ptid.is_pid ()
4459 || ptid.lwp () == -1)))
4460 {
4461 if (resume[ndx].kind == resume_stop
4462 && thread->last_resume_kind == resume_stop)
4463 {
4464 if (debug_threads)
4465 debug_printf ("already %s LWP %ld at GDB's request\n",
4466 (thread->last_status.kind
4467 == TARGET_WAITKIND_STOPPED)
4468 ? "stopped"
4469 : "stopping",
4470 lwpid_of (thread));
4471
4472 continue;
4473 }
4474
4475 /* Ignore (wildcard) resume requests for already-resumed
4476 threads. */
4477 if (resume[ndx].kind != resume_stop
4478 && thread->last_resume_kind != resume_stop)
4479 {
4480 if (debug_threads)
4481 debug_printf ("already %s LWP %ld at GDB's request\n",
4482 (thread->last_resume_kind
4483 == resume_step)
4484 ? "stepping"
4485 : "continuing",
4486 lwpid_of (thread));
4487 continue;
4488 }
4489
4490 /* Don't let wildcard resumes resume fork children that GDB
4491 does not yet know are new fork children. */
4492 if (lwp->fork_relative != NULL)
4493 {
4494 struct lwp_info *rel = lwp->fork_relative;
4495
4496 if (rel->status_pending_p
4497 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4498 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4499 {
4500 if (debug_threads)
4501 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4502 lwpid_of (thread));
4503 continue;
4504 }
4505 }
4506
4507 /* If the thread has a pending event that has already been
4508 reported to GDBserver core, but GDB has not pulled the
4509 event out of the vStopped queue yet, likewise, ignore the
4510 (wildcard) resume request. */
4511 if (in_queued_stop_replies (thread->id))
4512 {
4513 if (debug_threads)
4514 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4515 lwpid_of (thread));
4516 continue;
4517 }
4518
4519 lwp->resume = &resume[ndx];
4520 thread->last_resume_kind = lwp->resume->kind;
4521
4522 lwp->step_range_start = lwp->resume->step_range_start;
4523 lwp->step_range_end = lwp->resume->step_range_end;
4524
4525 /* If we had a deferred signal to report, dequeue one now.
4526 This can happen if LWP gets more than one signal while
4527 trying to get out of a jump pad. */
4528 if (lwp->stopped
4529 && !lwp->status_pending_p
4530 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4531 {
4532 lwp->status_pending_p = 1;
4533
4534 if (debug_threads)
4535 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4536 "leaving status pending.\n",
4537 WSTOPSIG (lwp->status_pending),
4538 lwpid_of (thread));
4539 }
4540
4541 return;
4542 }
4543 }
4544
4545 /* No resume action for this thread. */
4546 lwp->resume = NULL;
4547 }
4548
4549 /* find_thread callback for linux_resume. Return true if this lwp has an
4550 interesting status pending. */
4551
4552 static bool
4553 resume_status_pending_p (thread_info *thread)
4554 {
4555 struct lwp_info *lwp = get_thread_lwp (thread);
4556
4557 /* LWPs which will not be resumed are not interesting, because
4558 we might not wait for them next time through linux_wait. */
4559 if (lwp->resume == NULL)
4560 return false;
4561
4562 return thread_still_has_status_pending_p (thread);
4563 }
4564
4565 /* Return 1 if this lwp that GDB wants running is stopped at an
4566 internal breakpoint that we need to step over. It assumes that any
4567 required STOP_PC adjustment has already been propagated to the
4568 inferior's regcache. */
4569
4570 static bool
4571 need_step_over_p (thread_info *thread)
4572 {
4573 struct lwp_info *lwp = get_thread_lwp (thread);
4574 struct thread_info *saved_thread;
4575 CORE_ADDR pc;
4576 struct process_info *proc = get_thread_process (thread);
4577
4578 /* GDBserver is skipping the extra traps from the wrapper program,
4579 don't have to do step over. */
4580 if (proc->tdesc == NULL)
4581 return false;
4582
4583 /* LWPs which will not be resumed are not interesting, because we
4584 might not wait for them next time through linux_wait. */
4585
4586 if (!lwp->stopped)
4587 {
4588 if (debug_threads)
4589 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4590 lwpid_of (thread));
4591 return false;
4592 }
4593
4594 if (thread->last_resume_kind == resume_stop)
4595 {
4596 if (debug_threads)
4597 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4598 " stopped\n",
4599 lwpid_of (thread));
4600 return false;
4601 }
4602
4603 gdb_assert (lwp->suspended >= 0);
4604
4605 if (lwp->suspended)
4606 {
4607 if (debug_threads)
4608 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4609 lwpid_of (thread));
4610 return false;
4611 }
4612
4613 if (lwp->status_pending_p)
4614 {
4615 if (debug_threads)
4616 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4617 " status.\n",
4618 lwpid_of (thread));
4619 return false;
4620 }
4621
4622 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4623 or we have. */
4624 pc = get_pc (lwp);
4625
4626 /* If the PC has changed since we stopped, then don't do anything,
4627 and let the breakpoint/tracepoint be hit. This happens if, for
4628 instance, GDB handled the decr_pc_after_break subtraction itself,
4629 GDB is OOL stepping this thread, or the user has issued a "jump"
4630 command, or poked thread's registers herself. */
4631 if (pc != lwp->stop_pc)
4632 {
4633 if (debug_threads)
4634 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4635 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4636 lwpid_of (thread),
4637 paddress (lwp->stop_pc), paddress (pc));
4638 return false;
4639 }
4640
4641 /* On software single step target, resume the inferior with signal
4642 rather than stepping over. */
4643 if (can_software_single_step ()
4644 && lwp->pending_signals != NULL
4645 && lwp_signal_can_be_delivered (lwp))
4646 {
4647 if (debug_threads)
4648 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4649 " signals.\n",
4650 lwpid_of (thread));
4651
4652 return false;
4653 }
4654
4655 saved_thread = current_thread;
4656 current_thread = thread;
4657
4658 /* We can only step over breakpoints we know about. */
4659 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4660 {
4661 /* Don't step over a breakpoint that GDB expects to hit
4662 though. If the condition is being evaluated on the target's side
4663 and it evaluate to false, step over this breakpoint as well. */
4664 if (gdb_breakpoint_here (pc)
4665 && gdb_condition_true_at_breakpoint (pc)
4666 && gdb_no_commands_at_breakpoint (pc))
4667 {
4668 if (debug_threads)
4669 debug_printf ("Need step over [LWP %ld]? yes, but found"
4670 " GDB breakpoint at 0x%s; skipping step over\n",
4671 lwpid_of (thread), paddress (pc));
4672
4673 current_thread = saved_thread;
4674 return false;
4675 }
4676 else
4677 {
4678 if (debug_threads)
4679 debug_printf ("Need step over [LWP %ld]? yes, "
4680 "found breakpoint at 0x%s\n",
4681 lwpid_of (thread), paddress (pc));
4682
4683 /* We've found an lwp that needs stepping over --- return 1 so
4684 that find_thread stops looking. */
4685 current_thread = saved_thread;
4686
4687 return true;
4688 }
4689 }
4690
4691 current_thread = saved_thread;
4692
4693 if (debug_threads)
4694 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4695 " at 0x%s\n",
4696 lwpid_of (thread), paddress (pc));
4697
4698 return false;
4699 }
4700
4701 void
4702 linux_process_target::start_step_over (lwp_info *lwp)
4703 {
4704 struct thread_info *thread = get_lwp_thread (lwp);
4705 struct thread_info *saved_thread;
4706 CORE_ADDR pc;
4707 int step;
4708
4709 if (debug_threads)
4710 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4711 lwpid_of (thread));
4712
4713 stop_all_lwps (1, lwp);
4714
4715 if (lwp->suspended != 0)
4716 {
4717 internal_error (__FILE__, __LINE__,
4718 "LWP %ld suspended=%d\n", lwpid_of (thread),
4719 lwp->suspended);
4720 }
4721
4722 if (debug_threads)
4723 debug_printf ("Done stopping all threads for step-over.\n");
4724
4725 /* Note, we should always reach here with an already adjusted PC,
4726 either by GDB (if we're resuming due to GDB's request), or by our
4727 caller, if we just finished handling an internal breakpoint GDB
4728 shouldn't care about. */
4729 pc = get_pc (lwp);
4730
4731 saved_thread = current_thread;
4732 current_thread = thread;
4733
4734 lwp->bp_reinsert = pc;
4735 uninsert_breakpoints_at (pc);
4736 uninsert_fast_tracepoint_jumps_at (pc);
4737
4738 step = single_step (lwp);
4739
4740 current_thread = saved_thread;
4741
4742 linux_resume_one_lwp (lwp, step, 0, NULL);
4743
4744 /* Require next event from this LWP. */
4745 step_over_bkpt = thread->id;
4746 }
4747
4748 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4749 start_step_over, if still there, and delete any single-step
4750 breakpoints we've set, on non hardware single-step targets. */
4751
4752 static int
4753 finish_step_over (struct lwp_info *lwp)
4754 {
4755 if (lwp->bp_reinsert != 0)
4756 {
4757 struct thread_info *saved_thread = current_thread;
4758
4759 if (debug_threads)
4760 debug_printf ("Finished step over.\n");
4761
4762 current_thread = get_lwp_thread (lwp);
4763
4764 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4765 may be no breakpoint to reinsert there by now. */
4766 reinsert_breakpoints_at (lwp->bp_reinsert);
4767 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4768
4769 lwp->bp_reinsert = 0;
4770
4771 /* Delete any single-step breakpoints. No longer needed. We
4772 don't have to worry about other threads hitting this trap,
4773 and later not being able to explain it, because we were
4774 stepping over a breakpoint, and we hold all threads but
4775 LWP stopped while doing that. */
4776 if (!can_hardware_single_step ())
4777 {
4778 gdb_assert (has_single_step_breakpoints (current_thread));
4779 delete_single_step_breakpoints (current_thread);
4780 }
4781
4782 step_over_bkpt = null_ptid;
4783 current_thread = saved_thread;
4784 return 1;
4785 }
4786 else
4787 return 0;
4788 }
4789
4790 void
4791 linux_process_target::complete_ongoing_step_over ()
4792 {
4793 if (step_over_bkpt != null_ptid)
4794 {
4795 struct lwp_info *lwp;
4796 int wstat;
4797 int ret;
4798
4799 if (debug_threads)
4800 debug_printf ("detach: step over in progress, finish it first\n");
4801
4802 /* Passing NULL_PTID as filter indicates we want all events to
4803 be left pending. Eventually this returns when there are no
4804 unwaited-for children left. */
4805 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4806 __WALL);
4807 gdb_assert (ret == -1);
4808
4809 lwp = find_lwp_pid (step_over_bkpt);
4810 if (lwp != NULL)
4811 finish_step_over (lwp);
4812 step_over_bkpt = null_ptid;
4813 unsuspend_all_lwps (lwp);
4814 }
4815 }
4816
4817 /* This function is called once per thread. We check the thread's resume
4818 request, which will tell us whether to resume, step, or leave the thread
4819 stopped; and what signal, if any, it should be sent.
4820
4821 For threads which we aren't explicitly told otherwise, we preserve
4822 the stepping flag; this is used for stepping over gdbserver-placed
4823 breakpoints.
4824
4825 If pending_flags was set in any thread, we queue any needed
4826 signals, since we won't actually resume. We already have a pending
4827 event to report, so we don't need to preserve any step requests;
4828 they should be re-issued if necessary. */
4829
4830 static void
4831 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4832 {
4833 struct lwp_info *lwp = get_thread_lwp (thread);
4834 int leave_pending;
4835
4836 if (lwp->resume == NULL)
4837 return;
4838
4839 if (lwp->resume->kind == resume_stop)
4840 {
4841 if (debug_threads)
4842 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4843
4844 if (!lwp->stopped)
4845 {
4846 if (debug_threads)
4847 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4848
4849 /* Stop the thread, and wait for the event asynchronously,
4850 through the event loop. */
4851 send_sigstop (lwp);
4852 }
4853 else
4854 {
4855 if (debug_threads)
4856 debug_printf ("already stopped LWP %ld\n",
4857 lwpid_of (thread));
4858
4859 /* The LWP may have been stopped in an internal event that
4860 was not meant to be notified back to GDB (e.g., gdbserver
4861 breakpoint), so we should be reporting a stop event in
4862 this case too. */
4863
4864 /* If the thread already has a pending SIGSTOP, this is a
4865 no-op. Otherwise, something later will presumably resume
4866 the thread and this will cause it to cancel any pending
4867 operation, due to last_resume_kind == resume_stop. If
4868 the thread already has a pending status to report, we
4869 will still report it the next time we wait - see
4870 status_pending_p_callback. */
4871
4872 /* If we already have a pending signal to report, then
4873 there's no need to queue a SIGSTOP, as this means we're
4874 midway through moving the LWP out of the jumppad, and we
4875 will report the pending signal as soon as that is
4876 finished. */
4877 if (lwp->pending_signals_to_report == NULL)
4878 send_sigstop (lwp);
4879 }
4880
4881 /* For stop requests, we're done. */
4882 lwp->resume = NULL;
4883 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4884 return;
4885 }
4886
4887 /* If this thread which is about to be resumed has a pending status,
4888 then don't resume it - we can just report the pending status.
4889 Likewise if it is suspended, because e.g., another thread is
4890 stepping past a breakpoint. Make sure to queue any signals that
4891 would otherwise be sent. In all-stop mode, we do this decision
4892 based on if *any* thread has a pending status. If there's a
4893 thread that needs the step-over-breakpoint dance, then don't
4894 resume any other thread but that particular one. */
4895 leave_pending = (lwp->suspended
4896 || lwp->status_pending_p
4897 || leave_all_stopped);
4898
4899 /* If we have a new signal, enqueue the signal. */
4900 if (lwp->resume->sig != 0)
4901 {
4902 siginfo_t info, *info_p;
4903
4904 /* If this is the same signal we were previously stopped by,
4905 make sure to queue its siginfo. */
4906 if (WIFSTOPPED (lwp->last_status)
4907 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4908 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4909 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4910 info_p = &info;
4911 else
4912 info_p = NULL;
4913
4914 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4915 }
4916
4917 if (!leave_pending)
4918 {
4919 if (debug_threads)
4920 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4921
4922 proceed_one_lwp (thread, NULL);
4923 }
4924 else
4925 {
4926 if (debug_threads)
4927 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4928 }
4929
4930 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4931 lwp->resume = NULL;
4932 }
4933
4934 void
4935 linux_process_target::resume (thread_resume *resume_info, size_t n)
4936 {
4937 struct thread_info *need_step_over = NULL;
4938
4939 if (debug_threads)
4940 {
4941 debug_enter ();
4942 debug_printf ("linux_resume:\n");
4943 }
4944
4945 for_each_thread ([&] (thread_info *thread)
4946 {
4947 linux_set_resume_request (thread, resume_info, n);
4948 });
4949
4950 /* If there is a thread which would otherwise be resumed, which has
4951 a pending status, then don't resume any threads - we can just
4952 report the pending status. Make sure to queue any signals that
4953 would otherwise be sent. In non-stop mode, we'll apply this
4954 logic to each thread individually. We consume all pending events
4955 before considering to start a step-over (in all-stop). */
4956 bool any_pending = false;
4957 if (!non_stop)
4958 any_pending = find_thread (resume_status_pending_p) != NULL;
4959
4960 /* If there is a thread which would otherwise be resumed, which is
4961 stopped at a breakpoint that needs stepping over, then don't
4962 resume any threads - have it step over the breakpoint with all
4963 other threads stopped, then resume all threads again. Make sure
4964 to queue any signals that would otherwise be delivered or
4965 queued. */
4966 if (!any_pending && supports_breakpoints ())
4967 need_step_over = find_thread (need_step_over_p);
4968
4969 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4970
4971 if (debug_threads)
4972 {
4973 if (need_step_over != NULL)
4974 debug_printf ("Not resuming all, need step over\n");
4975 else if (any_pending)
4976 debug_printf ("Not resuming, all-stop and found "
4977 "an LWP with pending status\n");
4978 else
4979 debug_printf ("Resuming, no pending status or step over needed\n");
4980 }
4981
4982 /* Even if we're leaving threads stopped, queue all signals we'd
4983 otherwise deliver. */
4984 for_each_thread ([&] (thread_info *thread)
4985 {
4986 linux_resume_one_thread (thread, leave_all_stopped);
4987 });
4988
4989 if (need_step_over)
4990 start_step_over (get_thread_lwp (need_step_over));
4991
4992 if (debug_threads)
4993 {
4994 debug_printf ("linux_resume done\n");
4995 debug_exit ();
4996 }
4997
4998 /* We may have events that were pending that can/should be sent to
4999 the client now. Trigger a linux_wait call. */
5000 if (target_is_async_p ())
5001 async_file_mark ();
5002 }
5003
5004 /* This function is called once per thread. We check the thread's
5005 last resume request, which will tell us whether to resume, step, or
5006 leave the thread stopped. Any signal the client requested to be
5007 delivered has already been enqueued at this point.
5008
5009 If any thread that GDB wants running is stopped at an internal
5010 breakpoint that needs stepping over, we start a step-over operation
5011 on that particular thread, and leave all others stopped. */
5012
5013 static void
5014 proceed_one_lwp (thread_info *thread, lwp_info *except)
5015 {
5016 struct lwp_info *lwp = get_thread_lwp (thread);
5017 int step;
5018
5019 if (lwp == except)
5020 return;
5021
5022 if (debug_threads)
5023 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5024
5025 if (!lwp->stopped)
5026 {
5027 if (debug_threads)
5028 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5029 return;
5030 }
5031
5032 if (thread->last_resume_kind == resume_stop
5033 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5034 {
5035 if (debug_threads)
5036 debug_printf (" client wants LWP to remain %ld stopped\n",
5037 lwpid_of (thread));
5038 return;
5039 }
5040
5041 if (lwp->status_pending_p)
5042 {
5043 if (debug_threads)
5044 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5045 lwpid_of (thread));
5046 return;
5047 }
5048
5049 gdb_assert (lwp->suspended >= 0);
5050
5051 if (lwp->suspended)
5052 {
5053 if (debug_threads)
5054 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5055 return;
5056 }
5057
5058 if (thread->last_resume_kind == resume_stop
5059 && lwp->pending_signals_to_report == NULL
5060 && (lwp->collecting_fast_tracepoint
5061 == fast_tpoint_collect_result::not_collecting))
5062 {
5063 /* We haven't reported this LWP as stopped yet (otherwise, the
5064 last_status.kind check above would catch it, and we wouldn't
5065 reach here. This LWP may have been momentarily paused by a
5066 stop_all_lwps call while handling for example, another LWP's
5067 step-over. In that case, the pending expected SIGSTOP signal
5068 that was queued at vCont;t handling time will have already
5069 been consumed by wait_for_sigstop, and so we need to requeue
5070 another one here. Note that if the LWP already has a SIGSTOP
5071 pending, this is a no-op. */
5072
5073 if (debug_threads)
5074 debug_printf ("Client wants LWP %ld to stop. "
5075 "Making sure it has a SIGSTOP pending\n",
5076 lwpid_of (thread));
5077
5078 send_sigstop (lwp);
5079 }
5080
5081 if (thread->last_resume_kind == resume_step)
5082 {
5083 if (debug_threads)
5084 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5085 lwpid_of (thread));
5086
5087 /* If resume_step is requested by GDB, install single-step
5088 breakpoints when the thread is about to be actually resumed if
5089 the single-step breakpoints weren't removed. */
5090 if (can_software_single_step ()
5091 && !has_single_step_breakpoints (thread))
5092 install_software_single_step_breakpoints (lwp);
5093
5094 step = maybe_hw_step (thread);
5095 }
5096 else if (lwp->bp_reinsert != 0)
5097 {
5098 if (debug_threads)
5099 debug_printf (" stepping LWP %ld, reinsert set\n",
5100 lwpid_of (thread));
5101
5102 step = maybe_hw_step (thread);
5103 }
5104 else
5105 step = 0;
5106
5107 linux_resume_one_lwp (lwp, step, 0, NULL);
5108 }
5109
5110 static void
5111 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5112 {
5113 struct lwp_info *lwp = get_thread_lwp (thread);
5114
5115 if (lwp == except)
5116 return;
5117
5118 lwp_suspended_decr (lwp);
5119
5120 proceed_one_lwp (thread, except);
5121 }
5122
5123 void
5124 linux_process_target::proceed_all_lwps ()
5125 {
5126 struct thread_info *need_step_over;
5127
5128 /* If there is a thread which would otherwise be resumed, which is
5129 stopped at a breakpoint that needs stepping over, then don't
5130 resume any threads - have it step over the breakpoint with all
5131 other threads stopped, then resume all threads again. */
5132
5133 if (supports_breakpoints ())
5134 {
5135 need_step_over = find_thread (need_step_over_p);
5136
5137 if (need_step_over != NULL)
5138 {
5139 if (debug_threads)
5140 debug_printf ("proceed_all_lwps: found "
5141 "thread %ld needing a step-over\n",
5142 lwpid_of (need_step_over));
5143
5144 start_step_over (get_thread_lwp (need_step_over));
5145 return;
5146 }
5147 }
5148
5149 if (debug_threads)
5150 debug_printf ("Proceeding, no step-over needed\n");
5151
5152 for_each_thread ([] (thread_info *thread)
5153 {
5154 proceed_one_lwp (thread, NULL);
5155 });
5156 }
5157
5158 void
5159 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5160 {
5161 if (debug_threads)
5162 {
5163 debug_enter ();
5164 if (except)
5165 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5166 lwpid_of (get_lwp_thread (except)));
5167 else
5168 debug_printf ("unstopping all lwps\n");
5169 }
5170
5171 if (unsuspend)
5172 for_each_thread ([&] (thread_info *thread)
5173 {
5174 unsuspend_and_proceed_one_lwp (thread, except);
5175 });
5176 else
5177 for_each_thread ([&] (thread_info *thread)
5178 {
5179 proceed_one_lwp (thread, except);
5180 });
5181
5182 if (debug_threads)
5183 {
5184 debug_printf ("unstop_all_lwps done\n");
5185 debug_exit ();
5186 }
5187 }
5188
5189
5190 #ifdef HAVE_LINUX_REGSETS
5191
5192 #define use_linux_regsets 1
5193
5194 /* Returns true if REGSET has been disabled. */
5195
5196 static int
5197 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5198 {
5199 return (info->disabled_regsets != NULL
5200 && info->disabled_regsets[regset - info->regsets]);
5201 }
5202
5203 /* Disable REGSET. */
5204
5205 static void
5206 disable_regset (struct regsets_info *info, struct regset_info *regset)
5207 {
5208 int dr_offset;
5209
5210 dr_offset = regset - info->regsets;
5211 if (info->disabled_regsets == NULL)
5212 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5213 info->disabled_regsets[dr_offset] = 1;
5214 }
5215
5216 static int
5217 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5218 struct regcache *regcache)
5219 {
5220 struct regset_info *regset;
5221 int saw_general_regs = 0;
5222 int pid;
5223 struct iovec iov;
5224
5225 pid = lwpid_of (current_thread);
5226 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5227 {
5228 void *buf, *data;
5229 int nt_type, res;
5230
5231 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5232 continue;
5233
5234 buf = xmalloc (regset->size);
5235
5236 nt_type = regset->nt_type;
5237 if (nt_type)
5238 {
5239 iov.iov_base = buf;
5240 iov.iov_len = regset->size;
5241 data = (void *) &iov;
5242 }
5243 else
5244 data = buf;
5245
5246 #ifndef __sparc__
5247 res = ptrace (regset->get_request, pid,
5248 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5249 #else
5250 res = ptrace (regset->get_request, pid, data, nt_type);
5251 #endif
5252 if (res < 0)
5253 {
5254 if (errno == EIO
5255 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5256 {
5257 /* If we get EIO on a regset, or an EINVAL and the regset is
5258 optional, do not try it again for this process mode. */
5259 disable_regset (regsets_info, regset);
5260 }
5261 else if (errno == ENODATA)
5262 {
5263 /* ENODATA may be returned if the regset is currently
5264 not "active". This can happen in normal operation,
5265 so suppress the warning in this case. */
5266 }
5267 else if (errno == ESRCH)
5268 {
5269 /* At this point, ESRCH should mean the process is
5270 already gone, in which case we simply ignore attempts
5271 to read its registers. */
5272 }
5273 else
5274 {
5275 char s[256];
5276 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5277 pid);
5278 perror (s);
5279 }
5280 }
5281 else
5282 {
5283 if (regset->type == GENERAL_REGS)
5284 saw_general_regs = 1;
5285 regset->store_function (regcache, buf);
5286 }
5287 free (buf);
5288 }
5289 if (saw_general_regs)
5290 return 0;
5291 else
5292 return 1;
5293 }
5294
5295 static int
5296 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5297 struct regcache *regcache)
5298 {
5299 struct regset_info *regset;
5300 int saw_general_regs = 0;
5301 int pid;
5302 struct iovec iov;
5303
5304 pid = lwpid_of (current_thread);
5305 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5306 {
5307 void *buf, *data;
5308 int nt_type, res;
5309
5310 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5311 || regset->fill_function == NULL)
5312 continue;
5313
5314 buf = xmalloc (regset->size);
5315
5316 /* First fill the buffer with the current register set contents,
5317 in case there are any items in the kernel's regset that are
5318 not in gdbserver's regcache. */
5319
5320 nt_type = regset->nt_type;
5321 if (nt_type)
5322 {
5323 iov.iov_base = buf;
5324 iov.iov_len = regset->size;
5325 data = (void *) &iov;
5326 }
5327 else
5328 data = buf;
5329
5330 #ifndef __sparc__
5331 res = ptrace (regset->get_request, pid,
5332 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5333 #else
5334 res = ptrace (regset->get_request, pid, data, nt_type);
5335 #endif
5336
5337 if (res == 0)
5338 {
5339 /* Then overlay our cached registers on that. */
5340 regset->fill_function (regcache, buf);
5341
5342 /* Only now do we write the register set. */
5343 #ifndef __sparc__
5344 res = ptrace (regset->set_request, pid,
5345 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5346 #else
5347 res = ptrace (regset->set_request, pid, data, nt_type);
5348 #endif
5349 }
5350
5351 if (res < 0)
5352 {
5353 if (errno == EIO
5354 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5355 {
5356 /* If we get EIO on a regset, or an EINVAL and the regset is
5357 optional, do not try it again for this process mode. */
5358 disable_regset (regsets_info, regset);
5359 }
5360 else if (errno == ESRCH)
5361 {
5362 /* At this point, ESRCH should mean the process is
5363 already gone, in which case we simply ignore attempts
5364 to change its registers. See also the related
5365 comment in linux_resume_one_lwp. */
5366 free (buf);
5367 return 0;
5368 }
5369 else
5370 {
5371 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5372 }
5373 }
5374 else if (regset->type == GENERAL_REGS)
5375 saw_general_regs = 1;
5376 free (buf);
5377 }
5378 if (saw_general_regs)
5379 return 0;
5380 else
5381 return 1;
5382 }
5383
5384 #else /* !HAVE_LINUX_REGSETS */
5385
5386 #define use_linux_regsets 0
5387 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5388 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5389
5390 #endif
5391
5392 /* Return 1 if register REGNO is supported by one of the regset ptrace
5393 calls or 0 if it has to be transferred individually. */
5394
5395 static int
5396 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5397 {
5398 unsigned char mask = 1 << (regno % 8);
5399 size_t index = regno / 8;
5400
5401 return (use_linux_regsets
5402 && (regs_info->regset_bitmap == NULL
5403 || (regs_info->regset_bitmap[index] & mask) != 0));
5404 }
5405
5406 #ifdef HAVE_LINUX_USRREGS
5407
5408 static int
5409 register_addr (const struct usrregs_info *usrregs, int regnum)
5410 {
5411 int addr;
5412
5413 if (regnum < 0 || regnum >= usrregs->num_regs)
5414 error ("Invalid register number %d.", regnum);
5415
5416 addr = usrregs->regmap[regnum];
5417
5418 return addr;
5419 }
5420
5421
5422 void
5423 linux_process_target::fetch_register (const usrregs_info *usrregs,
5424 regcache *regcache, int regno)
5425 {
5426 CORE_ADDR regaddr;
5427 int i, size;
5428 char *buf;
5429 int pid;
5430
5431 if (regno >= usrregs->num_regs)
5432 return;
5433 if (low_cannot_fetch_register (regno))
5434 return;
5435
5436 regaddr = register_addr (usrregs, regno);
5437 if (regaddr == -1)
5438 return;
5439
5440 size = ((register_size (regcache->tdesc, regno)
5441 + sizeof (PTRACE_XFER_TYPE) - 1)
5442 & -sizeof (PTRACE_XFER_TYPE));
5443 buf = (char *) alloca (size);
5444
5445 pid = lwpid_of (current_thread);
5446 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5447 {
5448 errno = 0;
5449 *(PTRACE_XFER_TYPE *) (buf + i) =
5450 ptrace (PTRACE_PEEKUSER, pid,
5451 /* Coerce to a uintptr_t first to avoid potential gcc warning
5452 of coercing an 8 byte integer to a 4 byte pointer. */
5453 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5454 regaddr += sizeof (PTRACE_XFER_TYPE);
5455 if (errno != 0)
5456 {
5457 /* Mark register REGNO unavailable. */
5458 supply_register (regcache, regno, NULL);
5459 return;
5460 }
5461 }
5462
5463 if (the_low_target.supply_ptrace_register)
5464 the_low_target.supply_ptrace_register (regcache, regno, buf);
5465 else
5466 supply_register (regcache, regno, buf);
5467 }
5468
5469 void
5470 linux_process_target::store_register (const usrregs_info *usrregs,
5471 regcache *regcache, int regno)
5472 {
5473 CORE_ADDR regaddr;
5474 int i, size;
5475 char *buf;
5476 int pid;
5477
5478 if (regno >= usrregs->num_regs)
5479 return;
5480 if (low_cannot_store_register (regno))
5481 return;
5482
5483 regaddr = register_addr (usrregs, regno);
5484 if (regaddr == -1)
5485 return;
5486
5487 size = ((register_size (regcache->tdesc, regno)
5488 + sizeof (PTRACE_XFER_TYPE) - 1)
5489 & -sizeof (PTRACE_XFER_TYPE));
5490 buf = (char *) alloca (size);
5491 memset (buf, 0, size);
5492
5493 if (the_low_target.collect_ptrace_register)
5494 the_low_target.collect_ptrace_register (regcache, regno, buf);
5495 else
5496 collect_register (regcache, regno, buf);
5497
5498 pid = lwpid_of (current_thread);
5499 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5500 {
5501 errno = 0;
5502 ptrace (PTRACE_POKEUSER, pid,
5503 /* Coerce to a uintptr_t first to avoid potential gcc warning
5504 about coercing an 8 byte integer to a 4 byte pointer. */
5505 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5506 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5507 if (errno != 0)
5508 {
5509 /* At this point, ESRCH should mean the process is
5510 already gone, in which case we simply ignore attempts
5511 to change its registers. See also the related
5512 comment in linux_resume_one_lwp. */
5513 if (errno == ESRCH)
5514 return;
5515
5516
5517 if (!low_cannot_store_register (regno))
5518 error ("writing register %d: %s", regno, safe_strerror (errno));
5519 }
5520 regaddr += sizeof (PTRACE_XFER_TYPE);
5521 }
5522 }
5523 #endif /* HAVE_LINUX_USRREGS */
5524
5525 void
5526 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5527 regcache *regcache,
5528 int regno, int all)
5529 {
5530 #ifdef HAVE_LINUX_USRREGS
5531 struct usrregs_info *usr = regs_info->usrregs;
5532
5533 if (regno == -1)
5534 {
5535 for (regno = 0; regno < usr->num_regs; regno++)
5536 if (all || !linux_register_in_regsets (regs_info, regno))
5537 fetch_register (usr, regcache, regno);
5538 }
5539 else
5540 fetch_register (usr, regcache, regno);
5541 #endif
5542 }
5543
5544 void
5545 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5546 regcache *regcache,
5547 int regno, int all)
5548 {
5549 #ifdef HAVE_LINUX_USRREGS
5550 struct usrregs_info *usr = regs_info->usrregs;
5551
5552 if (regno == -1)
5553 {
5554 for (regno = 0; regno < usr->num_regs; regno++)
5555 if (all || !linux_register_in_regsets (regs_info, regno))
5556 store_register (usr, regcache, regno);
5557 }
5558 else
5559 store_register (usr, regcache, regno);
5560 #endif
5561 }
5562
5563 void
5564 linux_process_target::fetch_registers (regcache *regcache, int regno)
5565 {
5566 int use_regsets;
5567 int all = 0;
5568 const regs_info *regs_info = get_regs_info ();
5569
5570 if (regno == -1)
5571 {
5572 if (regs_info->usrregs != NULL)
5573 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5574 low_fetch_register (regcache, regno);
5575
5576 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5577 if (regs_info->usrregs != NULL)
5578 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5579 }
5580 else
5581 {
5582 if (low_fetch_register (regcache, regno))
5583 return;
5584
5585 use_regsets = linux_register_in_regsets (regs_info, regno);
5586 if (use_regsets)
5587 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5588 regcache);
5589 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5590 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5591 }
5592 }
5593
5594 void
5595 linux_process_target::store_registers (regcache *regcache, int regno)
5596 {
5597 int use_regsets;
5598 int all = 0;
5599 const regs_info *regs_info = get_regs_info ();
5600
5601 if (regno == -1)
5602 {
5603 all = regsets_store_inferior_registers (regs_info->regsets_info,
5604 regcache);
5605 if (regs_info->usrregs != NULL)
5606 usr_store_inferior_registers (regs_info, regcache, regno, all);
5607 }
5608 else
5609 {
5610 use_regsets = linux_register_in_regsets (regs_info, regno);
5611 if (use_regsets)
5612 all = regsets_store_inferior_registers (regs_info->regsets_info,
5613 regcache);
5614 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5615 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5616 }
5617 }
5618
5619 bool
5620 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5621 {
5622 return false;
5623 }
5624
5625 /* A wrapper for the read_memory target op. */
5626
5627 static int
5628 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5629 {
5630 return the_target->read_memory (memaddr, myaddr, len);
5631 }
5632
5633 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5634 to debugger memory starting at MYADDR. */
5635
5636 int
5637 linux_process_target::read_memory (CORE_ADDR memaddr,
5638 unsigned char *myaddr, int len)
5639 {
5640 int pid = lwpid_of (current_thread);
5641 PTRACE_XFER_TYPE *buffer;
5642 CORE_ADDR addr;
5643 int count;
5644 char filename[64];
5645 int i;
5646 int ret;
5647 int fd;
5648
5649 /* Try using /proc. Don't bother for one word. */
5650 if (len >= 3 * sizeof (long))
5651 {
5652 int bytes;
5653
5654 /* We could keep this file open and cache it - possibly one per
5655 thread. That requires some juggling, but is even faster. */
5656 sprintf (filename, "/proc/%d/mem", pid);
5657 fd = open (filename, O_RDONLY | O_LARGEFILE);
5658 if (fd == -1)
5659 goto no_proc;
5660
5661 /* If pread64 is available, use it. It's faster if the kernel
5662 supports it (only one syscall), and it's 64-bit safe even on
5663 32-bit platforms (for instance, SPARC debugging a SPARC64
5664 application). */
5665 #ifdef HAVE_PREAD64
5666 bytes = pread64 (fd, myaddr, len, memaddr);
5667 #else
5668 bytes = -1;
5669 if (lseek (fd, memaddr, SEEK_SET) != -1)
5670 bytes = read (fd, myaddr, len);
5671 #endif
5672
5673 close (fd);
5674 if (bytes == len)
5675 return 0;
5676
5677 /* Some data was read, we'll try to get the rest with ptrace. */
5678 if (bytes > 0)
5679 {
5680 memaddr += bytes;
5681 myaddr += bytes;
5682 len -= bytes;
5683 }
5684 }
5685
5686 no_proc:
5687 /* Round starting address down to longword boundary. */
5688 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5689 /* Round ending address up; get number of longwords that makes. */
5690 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5691 / sizeof (PTRACE_XFER_TYPE));
5692 /* Allocate buffer of that many longwords. */
5693 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5694
5695 /* Read all the longwords */
5696 errno = 0;
5697 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5698 {
5699 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5700 about coercing an 8 byte integer to a 4 byte pointer. */
5701 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5702 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5703 (PTRACE_TYPE_ARG4) 0);
5704 if (errno)
5705 break;
5706 }
5707 ret = errno;
5708
5709 /* Copy appropriate bytes out of the buffer. */
5710 if (i > 0)
5711 {
5712 i *= sizeof (PTRACE_XFER_TYPE);
5713 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5714 memcpy (myaddr,
5715 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5716 i < len ? i : len);
5717 }
5718
5719 return ret;
5720 }
5721
5722 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5723 memory at MEMADDR. On failure (cannot write to the inferior)
5724 returns the value of errno. Always succeeds if LEN is zero. */
5725
5726 int
5727 linux_process_target::write_memory (CORE_ADDR memaddr,
5728 const unsigned char *myaddr, int len)
5729 {
5730 int i;
5731 /* Round starting address down to longword boundary. */
5732 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5733 /* Round ending address up; get number of longwords that makes. */
5734 int count
5735 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5736 / sizeof (PTRACE_XFER_TYPE);
5737
5738 /* Allocate buffer of that many longwords. */
5739 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5740
5741 int pid = lwpid_of (current_thread);
5742
5743 if (len == 0)
5744 {
5745 /* Zero length write always succeeds. */
5746 return 0;
5747 }
5748
5749 if (debug_threads)
5750 {
5751 /* Dump up to four bytes. */
5752 char str[4 * 2 + 1];
5753 char *p = str;
5754 int dump = len < 4 ? len : 4;
5755
5756 for (i = 0; i < dump; i++)
5757 {
5758 sprintf (p, "%02x", myaddr[i]);
5759 p += 2;
5760 }
5761 *p = '\0';
5762
5763 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5764 str, (long) memaddr, pid);
5765 }
5766
5767 /* Fill start and end extra bytes of buffer with existing memory data. */
5768
5769 errno = 0;
5770 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5771 about coercing an 8 byte integer to a 4 byte pointer. */
5772 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5773 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5774 (PTRACE_TYPE_ARG4) 0);
5775 if (errno)
5776 return errno;
5777
5778 if (count > 1)
5779 {
5780 errno = 0;
5781 buffer[count - 1]
5782 = ptrace (PTRACE_PEEKTEXT, pid,
5783 /* Coerce to a uintptr_t first to avoid potential gcc warning
5784 about coercing an 8 byte integer to a 4 byte pointer. */
5785 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5786 * sizeof (PTRACE_XFER_TYPE)),
5787 (PTRACE_TYPE_ARG4) 0);
5788 if (errno)
5789 return errno;
5790 }
5791
5792 /* Copy data to be written over corresponding part of buffer. */
5793
5794 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5795 myaddr, len);
5796
5797 /* Write the entire buffer. */
5798
5799 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5800 {
5801 errno = 0;
5802 ptrace (PTRACE_POKETEXT, pid,
5803 /* Coerce to a uintptr_t first to avoid potential gcc warning
5804 about coercing an 8 byte integer to a 4 byte pointer. */
5805 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5806 (PTRACE_TYPE_ARG4) buffer[i]);
5807 if (errno)
5808 return errno;
5809 }
5810
5811 return 0;
5812 }
5813
5814 void
5815 linux_process_target::look_up_symbols ()
5816 {
5817 #ifdef USE_THREAD_DB
5818 struct process_info *proc = current_process ();
5819
5820 if (proc->priv->thread_db != NULL)
5821 return;
5822
5823 thread_db_init ();
5824 #endif
5825 }
5826
5827 void
5828 linux_process_target::request_interrupt ()
5829 {
5830 /* Send a SIGINT to the process group. This acts just like the user
5831 typed a ^C on the controlling terminal. */
5832 ::kill (-signal_pid, SIGINT);
5833 }
5834
5835 bool
5836 linux_process_target::supports_read_auxv ()
5837 {
5838 return true;
5839 }
5840
5841 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5842 to debugger memory starting at MYADDR. */
5843
5844 int
5845 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5846 unsigned int len)
5847 {
5848 char filename[PATH_MAX];
5849 int fd, n;
5850 int pid = lwpid_of (current_thread);
5851
5852 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5853
5854 fd = open (filename, O_RDONLY);
5855 if (fd < 0)
5856 return -1;
5857
5858 if (offset != (CORE_ADDR) 0
5859 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5860 n = -1;
5861 else
5862 n = read (fd, myaddr, len);
5863
5864 close (fd);
5865
5866 return n;
5867 }
5868
5869 /* These breakpoint and watchpoint related wrapper functions simply
5870 pass on the function call if the target has registered a
5871 corresponding function. */
5872
5873 bool
5874 linux_process_target::supports_z_point_type (char z_type)
5875 {
5876 return (the_low_target.supports_z_point_type != NULL
5877 && the_low_target.supports_z_point_type (z_type));
5878 }
5879
5880 int
5881 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5882 int size, raw_breakpoint *bp)
5883 {
5884 if (type == raw_bkpt_type_sw)
5885 return insert_memory_breakpoint (bp);
5886 else if (the_low_target.insert_point != NULL)
5887 return the_low_target.insert_point (type, addr, size, bp);
5888 else
5889 /* Unsupported (see target.h). */
5890 return 1;
5891 }
5892
5893 int
5894 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5895 int size, raw_breakpoint *bp)
5896 {
5897 if (type == raw_bkpt_type_sw)
5898 return remove_memory_breakpoint (bp);
5899 else if (the_low_target.remove_point != NULL)
5900 return the_low_target.remove_point (type, addr, size, bp);
5901 else
5902 /* Unsupported (see target.h). */
5903 return 1;
5904 }
5905
5906 /* Implement the stopped_by_sw_breakpoint target_ops
5907 method. */
5908
5909 bool
5910 linux_process_target::stopped_by_sw_breakpoint ()
5911 {
5912 struct lwp_info *lwp = get_thread_lwp (current_thread);
5913
5914 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5915 }
5916
5917 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5918 method. */
5919
5920 bool
5921 linux_process_target::supports_stopped_by_sw_breakpoint ()
5922 {
5923 return USE_SIGTRAP_SIGINFO;
5924 }
5925
5926 /* Implement the stopped_by_hw_breakpoint target_ops
5927 method. */
5928
5929 bool
5930 linux_process_target::stopped_by_hw_breakpoint ()
5931 {
5932 struct lwp_info *lwp = get_thread_lwp (current_thread);
5933
5934 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5935 }
5936
5937 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5938 method. */
5939
5940 bool
5941 linux_process_target::supports_stopped_by_hw_breakpoint ()
5942 {
5943 return USE_SIGTRAP_SIGINFO;
5944 }
5945
5946 /* Implement the supports_hardware_single_step target_ops method. */
5947
5948 bool
5949 linux_process_target::supports_hardware_single_step ()
5950 {
5951 return can_hardware_single_step ();
5952 }
5953
5954 bool
5955 linux_process_target::supports_software_single_step ()
5956 {
5957 return can_software_single_step ();
5958 }
5959
5960 bool
5961 linux_process_target::stopped_by_watchpoint ()
5962 {
5963 struct lwp_info *lwp = get_thread_lwp (current_thread);
5964
5965 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5966 }
5967
5968 CORE_ADDR
5969 linux_process_target::stopped_data_address ()
5970 {
5971 struct lwp_info *lwp = get_thread_lwp (current_thread);
5972
5973 return lwp->stopped_data_address;
5974 }
5975
5976 /* This is only used for targets that define PT_TEXT_ADDR,
5977 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5978 the target has different ways of acquiring this information, like
5979 loadmaps. */
5980
5981 bool
5982 linux_process_target::supports_read_offsets ()
5983 {
5984 #ifdef SUPPORTS_READ_OFFSETS
5985 return true;
5986 #else
5987 return false;
5988 #endif
5989 }
5990
5991 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5992 to tell gdb about. */
5993
5994 int
5995 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5996 {
5997 #ifdef SUPPORTS_READ_OFFSETS
5998 unsigned long text, text_end, data;
5999 int pid = lwpid_of (current_thread);
6000
6001 errno = 0;
6002
6003 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6004 (PTRACE_TYPE_ARG4) 0);
6005 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6006 (PTRACE_TYPE_ARG4) 0);
6007 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6008 (PTRACE_TYPE_ARG4) 0);
6009
6010 if (errno == 0)
6011 {
6012 /* Both text and data offsets produced at compile-time (and so
6013 used by gdb) are relative to the beginning of the program,
6014 with the data segment immediately following the text segment.
6015 However, the actual runtime layout in memory may put the data
6016 somewhere else, so when we send gdb a data base-address, we
6017 use the real data base address and subtract the compile-time
6018 data base-address from it (which is just the length of the
6019 text segment). BSS immediately follows data in both
6020 cases. */
6021 *text_p = text;
6022 *data_p = data - (text_end - text);
6023
6024 return 1;
6025 }
6026 return 0;
6027 #else
6028 gdb_assert_not_reached ("target op read_offsets not supported");
6029 #endif
6030 }
6031
6032 bool
6033 linux_process_target::supports_get_tls_address ()
6034 {
6035 #ifdef USE_THREAD_DB
6036 return true;
6037 #else
6038 return false;
6039 #endif
6040 }
6041
6042 int
6043 linux_process_target::get_tls_address (thread_info *thread,
6044 CORE_ADDR offset,
6045 CORE_ADDR load_module,
6046 CORE_ADDR *address)
6047 {
6048 #ifdef USE_THREAD_DB
6049 return thread_db_get_tls_address (thread, offset, load_module, address);
6050 #else
6051 return -1;
6052 #endif
6053 }
6054
6055 bool
6056 linux_process_target::supports_qxfer_osdata ()
6057 {
6058 return true;
6059 }
6060
6061 int
6062 linux_process_target::qxfer_osdata (const char *annex,
6063 unsigned char *readbuf,
6064 unsigned const char *writebuf,
6065 CORE_ADDR offset, int len)
6066 {
6067 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6068 }
6069
6070 /* Convert a native/host siginfo object, into/from the siginfo in the
6071 layout of the inferiors' architecture. */
6072
6073 static void
6074 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6075 {
6076 int done = 0;
6077
6078 if (the_low_target.siginfo_fixup != NULL)
6079 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6080
6081 /* If there was no callback, or the callback didn't do anything,
6082 then just do a straight memcpy. */
6083 if (!done)
6084 {
6085 if (direction == 1)
6086 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6087 else
6088 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6089 }
6090 }
6091
6092 bool
6093 linux_process_target::supports_qxfer_siginfo ()
6094 {
6095 return true;
6096 }
6097
6098 int
6099 linux_process_target::qxfer_siginfo (const char *annex,
6100 unsigned char *readbuf,
6101 unsigned const char *writebuf,
6102 CORE_ADDR offset, int len)
6103 {
6104 int pid;
6105 siginfo_t siginfo;
6106 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6107
6108 if (current_thread == NULL)
6109 return -1;
6110
6111 pid = lwpid_of (current_thread);
6112
6113 if (debug_threads)
6114 debug_printf ("%s siginfo for lwp %d.\n",
6115 readbuf != NULL ? "Reading" : "Writing",
6116 pid);
6117
6118 if (offset >= sizeof (siginfo))
6119 return -1;
6120
6121 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6122 return -1;
6123
6124 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6125 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6126 inferior with a 64-bit GDBSERVER should look the same as debugging it
6127 with a 32-bit GDBSERVER, we need to convert it. */
6128 siginfo_fixup (&siginfo, inf_siginfo, 0);
6129
6130 if (offset + len > sizeof (siginfo))
6131 len = sizeof (siginfo) - offset;
6132
6133 if (readbuf != NULL)
6134 memcpy (readbuf, inf_siginfo + offset, len);
6135 else
6136 {
6137 memcpy (inf_siginfo + offset, writebuf, len);
6138
6139 /* Convert back to ptrace layout before flushing it out. */
6140 siginfo_fixup (&siginfo, inf_siginfo, 1);
6141
6142 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6143 return -1;
6144 }
6145
6146 return len;
6147 }
6148
6149 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6150 so we notice when children change state; as the handler for the
6151 sigsuspend in my_waitpid. */
6152
6153 static void
6154 sigchld_handler (int signo)
6155 {
6156 int old_errno = errno;
6157
6158 if (debug_threads)
6159 {
6160 do
6161 {
6162 /* Use the async signal safe debug function. */
6163 if (debug_write ("sigchld_handler\n",
6164 sizeof ("sigchld_handler\n") - 1) < 0)
6165 break; /* just ignore */
6166 } while (0);
6167 }
6168
6169 if (target_is_async_p ())
6170 async_file_mark (); /* trigger a linux_wait */
6171
6172 errno = old_errno;
6173 }
6174
6175 bool
6176 linux_process_target::supports_non_stop ()
6177 {
6178 return true;
6179 }
6180
6181 bool
6182 linux_process_target::async (bool enable)
6183 {
6184 bool previous = target_is_async_p ();
6185
6186 if (debug_threads)
6187 debug_printf ("linux_async (%d), previous=%d\n",
6188 enable, previous);
6189
6190 if (previous != enable)
6191 {
6192 sigset_t mask;
6193 sigemptyset (&mask);
6194 sigaddset (&mask, SIGCHLD);
6195
6196 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6197
6198 if (enable)
6199 {
6200 if (pipe (linux_event_pipe) == -1)
6201 {
6202 linux_event_pipe[0] = -1;
6203 linux_event_pipe[1] = -1;
6204 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6205
6206 warning ("creating event pipe failed.");
6207 return previous;
6208 }
6209
6210 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6211 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6212
6213 /* Register the event loop handler. */
6214 add_file_handler (linux_event_pipe[0],
6215 handle_target_event, NULL);
6216
6217 /* Always trigger a linux_wait. */
6218 async_file_mark ();
6219 }
6220 else
6221 {
6222 delete_file_handler (linux_event_pipe[0]);
6223
6224 close (linux_event_pipe[0]);
6225 close (linux_event_pipe[1]);
6226 linux_event_pipe[0] = -1;
6227 linux_event_pipe[1] = -1;
6228 }
6229
6230 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6231 }
6232
6233 return previous;
6234 }
6235
6236 int
6237 linux_process_target::start_non_stop (bool nonstop)
6238 {
6239 /* Register or unregister from event-loop accordingly. */
6240 target_async (nonstop);
6241
6242 if (target_is_async_p () != (nonstop != false))
6243 return -1;
6244
6245 return 0;
6246 }
6247
6248 bool
6249 linux_process_target::supports_multi_process ()
6250 {
6251 return true;
6252 }
6253
6254 /* Check if fork events are supported. */
6255
6256 bool
6257 linux_process_target::supports_fork_events ()
6258 {
6259 return linux_supports_tracefork ();
6260 }
6261
6262 /* Check if vfork events are supported. */
6263
6264 bool
6265 linux_process_target::supports_vfork_events ()
6266 {
6267 return linux_supports_tracefork ();
6268 }
6269
6270 /* Check if exec events are supported. */
6271
6272 bool
6273 linux_process_target::supports_exec_events ()
6274 {
6275 return linux_supports_traceexec ();
6276 }
6277
6278 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6279 ptrace flags for all inferiors. This is in case the new GDB connection
6280 doesn't support the same set of events that the previous one did. */
6281
6282 void
6283 linux_process_target::handle_new_gdb_connection ()
6284 {
6285 /* Request that all the lwps reset their ptrace options. */
6286 for_each_thread ([] (thread_info *thread)
6287 {
6288 struct lwp_info *lwp = get_thread_lwp (thread);
6289
6290 if (!lwp->stopped)
6291 {
6292 /* Stop the lwp so we can modify its ptrace options. */
6293 lwp->must_set_ptrace_flags = 1;
6294 linux_stop_lwp (lwp);
6295 }
6296 else
6297 {
6298 /* Already stopped; go ahead and set the ptrace options. */
6299 struct process_info *proc = find_process_pid (pid_of (thread));
6300 int options = linux_low_ptrace_options (proc->attached);
6301
6302 linux_enable_event_reporting (lwpid_of (thread), options);
6303 lwp->must_set_ptrace_flags = 0;
6304 }
6305 });
6306 }
6307
6308 int
6309 linux_process_target::handle_monitor_command (char *mon)
6310 {
6311 #ifdef USE_THREAD_DB
6312 return thread_db_handle_monitor_command (mon);
6313 #else
6314 return 0;
6315 #endif
6316 }
6317
6318 int
6319 linux_process_target::core_of_thread (ptid_t ptid)
6320 {
6321 return linux_common_core_of_thread (ptid);
6322 }
6323
6324 bool
6325 linux_process_target::supports_disable_randomization ()
6326 {
6327 #ifdef HAVE_PERSONALITY
6328 return true;
6329 #else
6330 return false;
6331 #endif
6332 }
6333
6334 bool
6335 linux_process_target::supports_agent ()
6336 {
6337 return true;
6338 }
6339
6340 bool
6341 linux_process_target::supports_range_stepping ()
6342 {
6343 if (can_software_single_step ())
6344 return true;
6345 if (*the_low_target.supports_range_stepping == NULL)
6346 return false;
6347
6348 return (*the_low_target.supports_range_stepping) ();
6349 }
6350
6351 bool
6352 linux_process_target::supports_pid_to_exec_file ()
6353 {
6354 return true;
6355 }
6356
6357 char *
6358 linux_process_target::pid_to_exec_file (int pid)
6359 {
6360 return linux_proc_pid_to_exec_file (pid);
6361 }
6362
6363 bool
6364 linux_process_target::supports_multifs ()
6365 {
6366 return true;
6367 }
6368
6369 int
6370 linux_process_target::multifs_open (int pid, const char *filename,
6371 int flags, mode_t mode)
6372 {
6373 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6374 }
6375
6376 int
6377 linux_process_target::multifs_unlink (int pid, const char *filename)
6378 {
6379 return linux_mntns_unlink (pid, filename);
6380 }
6381
6382 ssize_t
6383 linux_process_target::multifs_readlink (int pid, const char *filename,
6384 char *buf, size_t bufsiz)
6385 {
6386 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6387 }
6388
6389 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6390 struct target_loadseg
6391 {
6392 /* Core address to which the segment is mapped. */
6393 Elf32_Addr addr;
6394 /* VMA recorded in the program header. */
6395 Elf32_Addr p_vaddr;
6396 /* Size of this segment in memory. */
6397 Elf32_Word p_memsz;
6398 };
6399
6400 # if defined PT_GETDSBT
6401 struct target_loadmap
6402 {
6403 /* Protocol version number, must be zero. */
6404 Elf32_Word version;
6405 /* Pointer to the DSBT table, its size, and the DSBT index. */
6406 unsigned *dsbt_table;
6407 unsigned dsbt_size, dsbt_index;
6408 /* Number of segments in this map. */
6409 Elf32_Word nsegs;
6410 /* The actual memory map. */
6411 struct target_loadseg segs[/*nsegs*/];
6412 };
6413 # define LINUX_LOADMAP PT_GETDSBT
6414 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6415 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6416 # else
6417 struct target_loadmap
6418 {
6419 /* Protocol version number, must be zero. */
6420 Elf32_Half version;
6421 /* Number of segments in this map. */
6422 Elf32_Half nsegs;
6423 /* The actual memory map. */
6424 struct target_loadseg segs[/*nsegs*/];
6425 };
6426 # define LINUX_LOADMAP PTRACE_GETFDPIC
6427 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6428 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6429 # endif
6430
6431 bool
6432 linux_process_target::supports_read_loadmap ()
6433 {
6434 return true;
6435 }
6436
6437 int
6438 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6439 unsigned char *myaddr, unsigned int len)
6440 {
6441 int pid = lwpid_of (current_thread);
6442 int addr = -1;
6443 struct target_loadmap *data = NULL;
6444 unsigned int actual_length, copy_length;
6445
6446 if (strcmp (annex, "exec") == 0)
6447 addr = (int) LINUX_LOADMAP_EXEC;
6448 else if (strcmp (annex, "interp") == 0)
6449 addr = (int) LINUX_LOADMAP_INTERP;
6450 else
6451 return -1;
6452
6453 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6454 return -1;
6455
6456 if (data == NULL)
6457 return -1;
6458
6459 actual_length = sizeof (struct target_loadmap)
6460 + sizeof (struct target_loadseg) * data->nsegs;
6461
6462 if (offset < 0 || offset > actual_length)
6463 return -1;
6464
6465 copy_length = actual_length - offset < len ? actual_length - offset : len;
6466 memcpy (myaddr, (char *) data + offset, copy_length);
6467 return copy_length;
6468 }
6469 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6470
6471 void
6472 linux_process_target::process_qsupported (char **features, int count)
6473 {
6474 if (the_low_target.process_qsupported != NULL)
6475 the_low_target.process_qsupported (features, count);
6476 }
6477
6478 bool
6479 linux_process_target::supports_catch_syscall ()
6480 {
6481 return (the_low_target.get_syscall_trapinfo != NULL
6482 && linux_supports_tracesysgood ());
6483 }
6484
6485 int
6486 linux_process_target::get_ipa_tdesc_idx ()
6487 {
6488 if (the_low_target.get_ipa_tdesc_idx == NULL)
6489 return 0;
6490
6491 return (*the_low_target.get_ipa_tdesc_idx) ();
6492 }
6493
6494 bool
6495 linux_process_target::supports_tracepoints ()
6496 {
6497 if (*the_low_target.supports_tracepoints == NULL)
6498 return false;
6499
6500 return (*the_low_target.supports_tracepoints) ();
6501 }
6502
6503 CORE_ADDR
6504 linux_process_target::read_pc (regcache *regcache)
6505 {
6506 if (the_low_target.get_pc == NULL)
6507 return 0;
6508
6509 return (*the_low_target.get_pc) (regcache);
6510 }
6511
6512 void
6513 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6514 {
6515 gdb_assert (the_low_target.set_pc != NULL);
6516
6517 (*the_low_target.set_pc) (regcache, pc);
6518 }
6519
6520 bool
6521 linux_process_target::supports_thread_stopped ()
6522 {
6523 return true;
6524 }
6525
6526 bool
6527 linux_process_target::thread_stopped (thread_info *thread)
6528 {
6529 return get_thread_lwp (thread)->stopped;
6530 }
6531
6532 /* This exposes stop-all-threads functionality to other modules. */
6533
6534 void
6535 linux_process_target::pause_all (bool freeze)
6536 {
6537 stop_all_lwps (freeze, NULL);
6538 }
6539
6540 /* This exposes unstop-all-threads functionality to other gdbserver
6541 modules. */
6542
6543 void
6544 linux_process_target::unpause_all (bool unfreeze)
6545 {
6546 unstop_all_lwps (unfreeze, NULL);
6547 }
6548
6549 int
6550 linux_process_target::prepare_to_access_memory ()
6551 {
6552 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6553 running LWP. */
6554 if (non_stop)
6555 target_pause_all (true);
6556 return 0;
6557 }
6558
6559 void
6560 linux_process_target::done_accessing_memory ()
6561 {
6562 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6563 running LWP. */
6564 if (non_stop)
6565 target_unpause_all (true);
6566 }
6567
6568 bool
6569 linux_process_target::supports_fast_tracepoints ()
6570 {
6571 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6572 }
6573
6574 int
6575 linux_process_target::install_fast_tracepoint_jump_pad
6576 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6577 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6578 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6579 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6580 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6581 char *err)
6582 {
6583 return (*the_low_target.install_fast_tracepoint_jump_pad)
6584 (tpoint, tpaddr, collector, lockaddr, orig_size,
6585 jump_entry, trampoline, trampoline_size,
6586 jjump_pad_insn, jjump_pad_insn_size,
6587 adjusted_insn_addr, adjusted_insn_addr_end,
6588 err);
6589 }
6590
6591 emit_ops *
6592 linux_process_target::emit_ops ()
6593 {
6594 if (the_low_target.emit_ops != NULL)
6595 return (*the_low_target.emit_ops) ();
6596 else
6597 return NULL;
6598 }
6599
6600 int
6601 linux_process_target::get_min_fast_tracepoint_insn_len ()
6602 {
6603 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6604 }
6605
6606 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6607
6608 static int
6609 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6610 CORE_ADDR *phdr_memaddr, int *num_phdr)
6611 {
6612 char filename[PATH_MAX];
6613 int fd;
6614 const int auxv_size = is_elf64
6615 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6616 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6617
6618 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6619
6620 fd = open (filename, O_RDONLY);
6621 if (fd < 0)
6622 return 1;
6623
6624 *phdr_memaddr = 0;
6625 *num_phdr = 0;
6626 while (read (fd, buf, auxv_size) == auxv_size
6627 && (*phdr_memaddr == 0 || *num_phdr == 0))
6628 {
6629 if (is_elf64)
6630 {
6631 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6632
6633 switch (aux->a_type)
6634 {
6635 case AT_PHDR:
6636 *phdr_memaddr = aux->a_un.a_val;
6637 break;
6638 case AT_PHNUM:
6639 *num_phdr = aux->a_un.a_val;
6640 break;
6641 }
6642 }
6643 else
6644 {
6645 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6646
6647 switch (aux->a_type)
6648 {
6649 case AT_PHDR:
6650 *phdr_memaddr = aux->a_un.a_val;
6651 break;
6652 case AT_PHNUM:
6653 *num_phdr = aux->a_un.a_val;
6654 break;
6655 }
6656 }
6657 }
6658
6659 close (fd);
6660
6661 if (*phdr_memaddr == 0 || *num_phdr == 0)
6662 {
6663 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6664 "phdr_memaddr = %ld, phdr_num = %d",
6665 (long) *phdr_memaddr, *num_phdr);
6666 return 2;
6667 }
6668
6669 return 0;
6670 }
6671
6672 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6673
6674 static CORE_ADDR
6675 get_dynamic (const int pid, const int is_elf64)
6676 {
6677 CORE_ADDR phdr_memaddr, relocation;
6678 int num_phdr, i;
6679 unsigned char *phdr_buf;
6680 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6681
6682 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6683 return 0;
6684
6685 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6686 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6687
6688 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6689 return 0;
6690
6691 /* Compute relocation: it is expected to be 0 for "regular" executables,
6692 non-zero for PIE ones. */
6693 relocation = -1;
6694 for (i = 0; relocation == -1 && i < num_phdr; i++)
6695 if (is_elf64)
6696 {
6697 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6698
6699 if (p->p_type == PT_PHDR)
6700 relocation = phdr_memaddr - p->p_vaddr;
6701 }
6702 else
6703 {
6704 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6705
6706 if (p->p_type == PT_PHDR)
6707 relocation = phdr_memaddr - p->p_vaddr;
6708 }
6709
6710 if (relocation == -1)
6711 {
6712 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6713 any real world executables, including PIE executables, have always
6714 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6715 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6716 or present DT_DEBUG anyway (fpc binaries are statically linked).
6717
6718 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6719
6720 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6721
6722 return 0;
6723 }
6724
6725 for (i = 0; i < num_phdr; i++)
6726 {
6727 if (is_elf64)
6728 {
6729 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6730
6731 if (p->p_type == PT_DYNAMIC)
6732 return p->p_vaddr + relocation;
6733 }
6734 else
6735 {
6736 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6737
6738 if (p->p_type == PT_DYNAMIC)
6739 return p->p_vaddr + relocation;
6740 }
6741 }
6742
6743 return 0;
6744 }
6745
6746 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6747 can be 0 if the inferior does not yet have the library list initialized.
6748 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6749 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6750
6751 static CORE_ADDR
6752 get_r_debug (const int pid, const int is_elf64)
6753 {
6754 CORE_ADDR dynamic_memaddr;
6755 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6756 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6757 CORE_ADDR map = -1;
6758
6759 dynamic_memaddr = get_dynamic (pid, is_elf64);
6760 if (dynamic_memaddr == 0)
6761 return map;
6762
6763 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6764 {
6765 if (is_elf64)
6766 {
6767 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6768 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6769 union
6770 {
6771 Elf64_Xword map;
6772 unsigned char buf[sizeof (Elf64_Xword)];
6773 }
6774 rld_map;
6775 #endif
6776 #ifdef DT_MIPS_RLD_MAP
6777 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6778 {
6779 if (linux_read_memory (dyn->d_un.d_val,
6780 rld_map.buf, sizeof (rld_map.buf)) == 0)
6781 return rld_map.map;
6782 else
6783 break;
6784 }
6785 #endif /* DT_MIPS_RLD_MAP */
6786 #ifdef DT_MIPS_RLD_MAP_REL
6787 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6788 {
6789 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6790 rld_map.buf, sizeof (rld_map.buf)) == 0)
6791 return rld_map.map;
6792 else
6793 break;
6794 }
6795 #endif /* DT_MIPS_RLD_MAP_REL */
6796
6797 if (dyn->d_tag == DT_DEBUG && map == -1)
6798 map = dyn->d_un.d_val;
6799
6800 if (dyn->d_tag == DT_NULL)
6801 break;
6802 }
6803 else
6804 {
6805 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6806 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6807 union
6808 {
6809 Elf32_Word map;
6810 unsigned char buf[sizeof (Elf32_Word)];
6811 }
6812 rld_map;
6813 #endif
6814 #ifdef DT_MIPS_RLD_MAP
6815 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6816 {
6817 if (linux_read_memory (dyn->d_un.d_val,
6818 rld_map.buf, sizeof (rld_map.buf)) == 0)
6819 return rld_map.map;
6820 else
6821 break;
6822 }
6823 #endif /* DT_MIPS_RLD_MAP */
6824 #ifdef DT_MIPS_RLD_MAP_REL
6825 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6826 {
6827 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6828 rld_map.buf, sizeof (rld_map.buf)) == 0)
6829 return rld_map.map;
6830 else
6831 break;
6832 }
6833 #endif /* DT_MIPS_RLD_MAP_REL */
6834
6835 if (dyn->d_tag == DT_DEBUG && map == -1)
6836 map = dyn->d_un.d_val;
6837
6838 if (dyn->d_tag == DT_NULL)
6839 break;
6840 }
6841
6842 dynamic_memaddr += dyn_size;
6843 }
6844
6845 return map;
6846 }
6847
6848 /* Read one pointer from MEMADDR in the inferior. */
6849
6850 static int
6851 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6852 {
6853 int ret;
6854
6855 /* Go through a union so this works on either big or little endian
6856 hosts, when the inferior's pointer size is smaller than the size
6857 of CORE_ADDR. It is assumed the inferior's endianness is the
6858 same of the superior's. */
6859 union
6860 {
6861 CORE_ADDR core_addr;
6862 unsigned int ui;
6863 unsigned char uc;
6864 } addr;
6865
6866 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6867 if (ret == 0)
6868 {
6869 if (ptr_size == sizeof (CORE_ADDR))
6870 *ptr = addr.core_addr;
6871 else if (ptr_size == sizeof (unsigned int))
6872 *ptr = addr.ui;
6873 else
6874 gdb_assert_not_reached ("unhandled pointer size");
6875 }
6876 return ret;
6877 }
6878
6879 bool
6880 linux_process_target::supports_qxfer_libraries_svr4 ()
6881 {
6882 return true;
6883 }
6884
6885 struct link_map_offsets
6886 {
6887 /* Offset and size of r_debug.r_version. */
6888 int r_version_offset;
6889
6890 /* Offset and size of r_debug.r_map. */
6891 int r_map_offset;
6892
6893 /* Offset to l_addr field in struct link_map. */
6894 int l_addr_offset;
6895
6896 /* Offset to l_name field in struct link_map. */
6897 int l_name_offset;
6898
6899 /* Offset to l_ld field in struct link_map. */
6900 int l_ld_offset;
6901
6902 /* Offset to l_next field in struct link_map. */
6903 int l_next_offset;
6904
6905 /* Offset to l_prev field in struct link_map. */
6906 int l_prev_offset;
6907 };
6908
6909 /* Construct qXfer:libraries-svr4:read reply. */
6910
6911 int
6912 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6913 unsigned char *readbuf,
6914 unsigned const char *writebuf,
6915 CORE_ADDR offset, int len)
6916 {
6917 struct process_info_private *const priv = current_process ()->priv;
6918 char filename[PATH_MAX];
6919 int pid, is_elf64;
6920
6921 static const struct link_map_offsets lmo_32bit_offsets =
6922 {
6923 0, /* r_version offset. */
6924 4, /* r_debug.r_map offset. */
6925 0, /* l_addr offset in link_map. */
6926 4, /* l_name offset in link_map. */
6927 8, /* l_ld offset in link_map. */
6928 12, /* l_next offset in link_map. */
6929 16 /* l_prev offset in link_map. */
6930 };
6931
6932 static const struct link_map_offsets lmo_64bit_offsets =
6933 {
6934 0, /* r_version offset. */
6935 8, /* r_debug.r_map offset. */
6936 0, /* l_addr offset in link_map. */
6937 8, /* l_name offset in link_map. */
6938 16, /* l_ld offset in link_map. */
6939 24, /* l_next offset in link_map. */
6940 32 /* l_prev offset in link_map. */
6941 };
6942 const struct link_map_offsets *lmo;
6943 unsigned int machine;
6944 int ptr_size;
6945 CORE_ADDR lm_addr = 0, lm_prev = 0;
6946 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6947 int header_done = 0;
6948
6949 if (writebuf != NULL)
6950 return -2;
6951 if (readbuf == NULL)
6952 return -1;
6953
6954 pid = lwpid_of (current_thread);
6955 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6956 is_elf64 = elf_64_file_p (filename, &machine);
6957 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6958 ptr_size = is_elf64 ? 8 : 4;
6959
6960 while (annex[0] != '\0')
6961 {
6962 const char *sep;
6963 CORE_ADDR *addrp;
6964 int name_len;
6965
6966 sep = strchr (annex, '=');
6967 if (sep == NULL)
6968 break;
6969
6970 name_len = sep - annex;
6971 if (name_len == 5 && startswith (annex, "start"))
6972 addrp = &lm_addr;
6973 else if (name_len == 4 && startswith (annex, "prev"))
6974 addrp = &lm_prev;
6975 else
6976 {
6977 annex = strchr (sep, ';');
6978 if (annex == NULL)
6979 break;
6980 annex++;
6981 continue;
6982 }
6983
6984 annex = decode_address_to_semicolon (addrp, sep + 1);
6985 }
6986
6987 if (lm_addr == 0)
6988 {
6989 int r_version = 0;
6990
6991 if (priv->r_debug == 0)
6992 priv->r_debug = get_r_debug (pid, is_elf64);
6993
6994 /* We failed to find DT_DEBUG. Such situation will not change
6995 for this inferior - do not retry it. Report it to GDB as
6996 E01, see for the reasons at the GDB solib-svr4.c side. */
6997 if (priv->r_debug == (CORE_ADDR) -1)
6998 return -1;
6999
7000 if (priv->r_debug != 0)
7001 {
7002 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7003 (unsigned char *) &r_version,
7004 sizeof (r_version)) != 0
7005 || r_version != 1)
7006 {
7007 warning ("unexpected r_debug version %d", r_version);
7008 }
7009 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7010 &lm_addr, ptr_size) != 0)
7011 {
7012 warning ("unable to read r_map from 0x%lx",
7013 (long) priv->r_debug + lmo->r_map_offset);
7014 }
7015 }
7016 }
7017
7018 std::string document = "<library-list-svr4 version=\"1.0\"";
7019
7020 while (lm_addr
7021 && read_one_ptr (lm_addr + lmo->l_name_offset,
7022 &l_name, ptr_size) == 0
7023 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7024 &l_addr, ptr_size) == 0
7025 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7026 &l_ld, ptr_size) == 0
7027 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7028 &l_prev, ptr_size) == 0
7029 && read_one_ptr (lm_addr + lmo->l_next_offset,
7030 &l_next, ptr_size) == 0)
7031 {
7032 unsigned char libname[PATH_MAX];
7033
7034 if (lm_prev != l_prev)
7035 {
7036 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7037 (long) lm_prev, (long) l_prev);
7038 break;
7039 }
7040
7041 /* Ignore the first entry even if it has valid name as the first entry
7042 corresponds to the main executable. The first entry should not be
7043 skipped if the dynamic loader was loaded late by a static executable
7044 (see solib-svr4.c parameter ignore_first). But in such case the main
7045 executable does not have PT_DYNAMIC present and this function already
7046 exited above due to failed get_r_debug. */
7047 if (lm_prev == 0)
7048 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7049 else
7050 {
7051 /* Not checking for error because reading may stop before
7052 we've got PATH_MAX worth of characters. */
7053 libname[0] = '\0';
7054 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7055 libname[sizeof (libname) - 1] = '\0';
7056 if (libname[0] != '\0')
7057 {
7058 if (!header_done)
7059 {
7060 /* Terminate `<library-list-svr4'. */
7061 document += '>';
7062 header_done = 1;
7063 }
7064
7065 string_appendf (document, "<library name=\"");
7066 xml_escape_text_append (&document, (char *) libname);
7067 string_appendf (document, "\" lm=\"0x%lx\" "
7068 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7069 (unsigned long) lm_addr, (unsigned long) l_addr,
7070 (unsigned long) l_ld);
7071 }
7072 }
7073
7074 lm_prev = lm_addr;
7075 lm_addr = l_next;
7076 }
7077
7078 if (!header_done)
7079 {
7080 /* Empty list; terminate `<library-list-svr4'. */
7081 document += "/>";
7082 }
7083 else
7084 document += "</library-list-svr4>";
7085
7086 int document_len = document.length ();
7087 if (offset < document_len)
7088 document_len -= offset;
7089 else
7090 document_len = 0;
7091 if (len > document_len)
7092 len = document_len;
7093
7094 memcpy (readbuf, document.data () + offset, len);
7095
7096 return len;
7097 }
7098
7099 #ifdef HAVE_LINUX_BTRACE
7100
7101 btrace_target_info *
7102 linux_process_target::enable_btrace (ptid_t ptid,
7103 const btrace_config *conf)
7104 {
7105 return linux_enable_btrace (ptid, conf);
7106 }
7107
7108 /* See to_disable_btrace target method. */
7109
7110 int
7111 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7112 {
7113 enum btrace_error err;
7114
7115 err = linux_disable_btrace (tinfo);
7116 return (err == BTRACE_ERR_NONE ? 0 : -1);
7117 }
7118
7119 /* Encode an Intel Processor Trace configuration. */
7120
7121 static void
7122 linux_low_encode_pt_config (struct buffer *buffer,
7123 const struct btrace_data_pt_config *config)
7124 {
7125 buffer_grow_str (buffer, "<pt-config>\n");
7126
7127 switch (config->cpu.vendor)
7128 {
7129 case CV_INTEL:
7130 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7131 "model=\"%u\" stepping=\"%u\"/>\n",
7132 config->cpu.family, config->cpu.model,
7133 config->cpu.stepping);
7134 break;
7135
7136 default:
7137 break;
7138 }
7139
7140 buffer_grow_str (buffer, "</pt-config>\n");
7141 }
7142
7143 /* Encode a raw buffer. */
7144
7145 static void
7146 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7147 unsigned int size)
7148 {
7149 if (size == 0)
7150 return;
7151
7152 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7153 buffer_grow_str (buffer, "<raw>\n");
7154
7155 while (size-- > 0)
7156 {
7157 char elem[2];
7158
7159 elem[0] = tohex ((*data >> 4) & 0xf);
7160 elem[1] = tohex (*data++ & 0xf);
7161
7162 buffer_grow (buffer, elem, 2);
7163 }
7164
7165 buffer_grow_str (buffer, "</raw>\n");
7166 }
7167
7168 /* See to_read_btrace target method. */
7169
7170 int
7171 linux_process_target::read_btrace (btrace_target_info *tinfo,
7172 buffer *buffer,
7173 enum btrace_read_type type)
7174 {
7175 struct btrace_data btrace;
7176 enum btrace_error err;
7177
7178 err = linux_read_btrace (&btrace, tinfo, type);
7179 if (err != BTRACE_ERR_NONE)
7180 {
7181 if (err == BTRACE_ERR_OVERFLOW)
7182 buffer_grow_str0 (buffer, "E.Overflow.");
7183 else
7184 buffer_grow_str0 (buffer, "E.Generic Error.");
7185
7186 return -1;
7187 }
7188
7189 switch (btrace.format)
7190 {
7191 case BTRACE_FORMAT_NONE:
7192 buffer_grow_str0 (buffer, "E.No Trace.");
7193 return -1;
7194
7195 case BTRACE_FORMAT_BTS:
7196 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7197 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7198
7199 for (const btrace_block &block : *btrace.variant.bts.blocks)
7200 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7201 paddress (block.begin), paddress (block.end));
7202
7203 buffer_grow_str0 (buffer, "</btrace>\n");
7204 break;
7205
7206 case BTRACE_FORMAT_PT:
7207 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7208 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7209 buffer_grow_str (buffer, "<pt>\n");
7210
7211 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7212
7213 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7214 btrace.variant.pt.size);
7215
7216 buffer_grow_str (buffer, "</pt>\n");
7217 buffer_grow_str0 (buffer, "</btrace>\n");
7218 break;
7219
7220 default:
7221 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7222 return -1;
7223 }
7224
7225 return 0;
7226 }
7227
7228 /* See to_btrace_conf target method. */
7229
7230 int
7231 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7232 buffer *buffer)
7233 {
7234 const struct btrace_config *conf;
7235
7236 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7237 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7238
7239 conf = linux_btrace_conf (tinfo);
7240 if (conf != NULL)
7241 {
7242 switch (conf->format)
7243 {
7244 case BTRACE_FORMAT_NONE:
7245 break;
7246
7247 case BTRACE_FORMAT_BTS:
7248 buffer_xml_printf (buffer, "<bts");
7249 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7250 buffer_xml_printf (buffer, " />\n");
7251 break;
7252
7253 case BTRACE_FORMAT_PT:
7254 buffer_xml_printf (buffer, "<pt");
7255 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7256 buffer_xml_printf (buffer, "/>\n");
7257 break;
7258 }
7259 }
7260
7261 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7262 return 0;
7263 }
7264 #endif /* HAVE_LINUX_BTRACE */
7265
7266 /* See nat/linux-nat.h. */
7267
7268 ptid_t
7269 current_lwp_ptid (void)
7270 {
7271 return ptid_of (current_thread);
7272 }
7273
7274 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7275
7276 int
7277 linux_process_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7278 {
7279 if (the_low_target.breakpoint_kind_from_pc != NULL)
7280 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7281 else
7282 return process_stratum_target::breakpoint_kind_from_pc (pcptr);
7283 }
7284
7285 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7286
7287 const gdb_byte *
7288 linux_process_target::sw_breakpoint_from_kind (int kind, int *size)
7289 {
7290 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7291
7292 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7293 }
7294
7295 /* Implementation of the target_ops method
7296 "breakpoint_kind_from_current_state". */
7297
7298 int
7299 linux_process_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7300 {
7301 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7302 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7303 else
7304 return breakpoint_kind_from_pc (pcptr);
7305 }
7306
7307 const char *
7308 linux_process_target::thread_name (ptid_t thread)
7309 {
7310 return linux_proc_tid_get_name (thread);
7311 }
7312
7313 #if USE_THREAD_DB
7314 bool
7315 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7316 int *handle_len)
7317 {
7318 return thread_db_thread_handle (ptid, handle, handle_len);
7319 }
7320 #endif
7321
7322 /* Default implementation of linux_target_ops method "set_pc" for
7323 32-bit pc register which is literally named "pc". */
7324
7325 void
7326 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7327 {
7328 uint32_t newpc = pc;
7329
7330 supply_register_by_name (regcache, "pc", &newpc);
7331 }
7332
7333 /* Default implementation of linux_target_ops method "get_pc" for
7334 32-bit pc register which is literally named "pc". */
7335
7336 CORE_ADDR
7337 linux_get_pc_32bit (struct regcache *regcache)
7338 {
7339 uint32_t pc;
7340
7341 collect_register_by_name (regcache, "pc", &pc);
7342 if (debug_threads)
7343 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7344 return pc;
7345 }
7346
7347 /* Default implementation of linux_target_ops method "set_pc" for
7348 64-bit pc register which is literally named "pc". */
7349
7350 void
7351 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7352 {
7353 uint64_t newpc = pc;
7354
7355 supply_register_by_name (regcache, "pc", &newpc);
7356 }
7357
7358 /* Default implementation of linux_target_ops method "get_pc" for
7359 64-bit pc register which is literally named "pc". */
7360
7361 CORE_ADDR
7362 linux_get_pc_64bit (struct regcache *regcache)
7363 {
7364 uint64_t pc;
7365
7366 collect_register_by_name (regcache, "pc", &pc);
7367 if (debug_threads)
7368 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7369 return pc;
7370 }
7371
7372 /* See linux-low.h. */
7373
7374 int
7375 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7376 {
7377 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7378 int offset = 0;
7379
7380 gdb_assert (wordsize == 4 || wordsize == 8);
7381
7382 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7383 {
7384 if (wordsize == 4)
7385 {
7386 uint32_t *data_p = (uint32_t *) data;
7387 if (data_p[0] == match)
7388 {
7389 *valp = data_p[1];
7390 return 1;
7391 }
7392 }
7393 else
7394 {
7395 uint64_t *data_p = (uint64_t *) data;
7396 if (data_p[0] == match)
7397 {
7398 *valp = data_p[1];
7399 return 1;
7400 }
7401 }
7402
7403 offset += 2 * wordsize;
7404 }
7405
7406 return 0;
7407 }
7408
7409 /* See linux-low.h. */
7410
7411 CORE_ADDR
7412 linux_get_hwcap (int wordsize)
7413 {
7414 CORE_ADDR hwcap = 0;
7415 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7416 return hwcap;
7417 }
7418
7419 /* See linux-low.h. */
7420
7421 CORE_ADDR
7422 linux_get_hwcap2 (int wordsize)
7423 {
7424 CORE_ADDR hwcap2 = 0;
7425 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7426 return hwcap2;
7427 }
7428
7429 #ifdef HAVE_LINUX_REGSETS
7430 void
7431 initialize_regsets_info (struct regsets_info *info)
7432 {
7433 for (info->num_regsets = 0;
7434 info->regsets[info->num_regsets].size >= 0;
7435 info->num_regsets++)
7436 ;
7437 }
7438 #endif
7439
7440 void
7441 initialize_low (void)
7442 {
7443 struct sigaction sigchld_action;
7444
7445 memset (&sigchld_action, 0, sizeof (sigchld_action));
7446 set_target_ops (the_linux_target);
7447
7448 linux_ptrace_init_warnings ();
7449 linux_proc_init_warnings ();
7450
7451 sigchld_action.sa_handler = sigchld_handler;
7452 sigemptyset (&sigchld_action.sa_mask);
7453 sigchld_action.sa_flags = SA_RESTART;
7454 sigaction (SIGCHLD, &sigchld_action, NULL);
7455
7456 initialize_low_arch ();
7457
7458 linux_check_ptrace_features ();
7459 }
This page took 0.199999 seconds and 4 git commands to generate.