gdbserver/linux-low: start turning linux target ops into methods
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void linux_resume_one_lwp (struct lwp_info *lwp,
271 int step, int signal, siginfo_t *info);
272 static void unsuspend_all_lwps (struct lwp_info *except);
273 static struct lwp_info *add_lwp (ptid_t ptid);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static int linux_low_ptrace_options (int attached);
280 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
281 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
282
283 /* When the event-loop is doing a step-over, this points at the thread
284 being stepped. */
285 ptid_t step_over_bkpt;
286
287 /* True if the low target can hardware single-step. */
288
289 static int
290 can_hardware_single_step (void)
291 {
292 if (the_low_target.supports_hardware_single_step != NULL)
293 return the_low_target.supports_hardware_single_step ();
294 else
295 return 0;
296 }
297
298 /* True if the low target can software single-step. Such targets
299 implement the GET_NEXT_PCS callback. */
300
301 static int
302 can_software_single_step (void)
303 {
304 return (the_low_target.get_next_pcs != NULL);
305 }
306
307 /* True if the low target supports memory breakpoints. If so, we'll
308 have a GET_PC implementation. */
309
310 static int
311 supports_breakpoints (void)
312 {
313 return (the_low_target.get_pc != NULL);
314 }
315
316 /* Returns true if this target can support fast tracepoints. This
317 does not mean that the in-process agent has been loaded in the
318 inferior. */
319
320 static int
321 supports_fast_tracepoints (void)
322 {
323 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
324 }
325
326 /* True if LWP is stopped in its stepping range. */
327
328 static int
329 lwp_in_step_range (struct lwp_info *lwp)
330 {
331 CORE_ADDR pc = lwp->stop_pc;
332
333 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
334 }
335
336 struct pending_signals
337 {
338 int signal;
339 siginfo_t info;
340 struct pending_signals *prev;
341 };
342
343 /* The read/write ends of the pipe registered as waitable file in the
344 event loop. */
345 static int linux_event_pipe[2] = { -1, -1 };
346
347 /* True if we're currently in async mode. */
348 #define target_is_async_p() (linux_event_pipe[0] != -1)
349
350 static void send_sigstop (struct lwp_info *lwp);
351
352 /* Return non-zero if HEADER is a 64-bit ELF file. */
353
354 static int
355 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
356 {
357 if (header->e_ident[EI_MAG0] == ELFMAG0
358 && header->e_ident[EI_MAG1] == ELFMAG1
359 && header->e_ident[EI_MAG2] == ELFMAG2
360 && header->e_ident[EI_MAG3] == ELFMAG3)
361 {
362 *machine = header->e_machine;
363 return header->e_ident[EI_CLASS] == ELFCLASS64;
364
365 }
366 *machine = EM_NONE;
367 return -1;
368 }
369
370 /* Return non-zero if FILE is a 64-bit ELF file,
371 zero if the file is not a 64-bit ELF file,
372 and -1 if the file is not accessible or doesn't exist. */
373
374 static int
375 elf_64_file_p (const char *file, unsigned int *machine)
376 {
377 Elf64_Ehdr header;
378 int fd;
379
380 fd = open (file, O_RDONLY);
381 if (fd < 0)
382 return -1;
383
384 if (read (fd, &header, sizeof (header)) != sizeof (header))
385 {
386 close (fd);
387 return 0;
388 }
389 close (fd);
390
391 return elf_64_header_p (&header, machine);
392 }
393
394 /* Accepts an integer PID; Returns true if the executable PID is
395 running is a 64-bit ELF file.. */
396
397 int
398 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
399 {
400 char file[PATH_MAX];
401
402 sprintf (file, "/proc/%d/exe", pid);
403 return elf_64_file_p (file, machine);
404 }
405
406 static void
407 delete_lwp (struct lwp_info *lwp)
408 {
409 struct thread_info *thr = get_lwp_thread (lwp);
410
411 if (debug_threads)
412 debug_printf ("deleting %ld\n", lwpid_of (thr));
413
414 remove_thread (thr);
415
416 if (the_low_target.delete_thread != NULL)
417 the_low_target.delete_thread (lwp->arch_private);
418 else
419 gdb_assert (lwp->arch_private == NULL);
420
421 free (lwp);
422 }
423
424 /* Add a process to the common process list, and set its private
425 data. */
426
427 static struct process_info *
428 linux_add_process (int pid, int attached)
429 {
430 struct process_info *proc;
431
432 proc = add_process (pid, attached);
433 proc->priv = XCNEW (struct process_info_private);
434
435 if (the_low_target.new_process != NULL)
436 proc->priv->arch_private = the_low_target.new_process ();
437
438 return proc;
439 }
440
441 static CORE_ADDR get_pc (struct lwp_info *lwp);
442
443 /* Call the target arch_setup function on the current thread. */
444
445 static void
446 linux_arch_setup (void)
447 {
448 the_low_target.arch_setup ();
449 }
450
451 /* Call the target arch_setup function on THREAD. */
452
453 static void
454 linux_arch_setup_thread (struct thread_info *thread)
455 {
456 struct thread_info *saved_thread;
457
458 saved_thread = current_thread;
459 current_thread = thread;
460
461 linux_arch_setup ();
462
463 current_thread = saved_thread;
464 }
465
466 int
467 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
468 int wstat)
469 {
470 client_state &cs = get_client_state ();
471 struct lwp_info *event_lwp = *orig_event_lwp;
472 int event = linux_ptrace_get_extended_event (wstat);
473 struct thread_info *event_thr = get_lwp_thread (event_lwp);
474 struct lwp_info *new_lwp;
475
476 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
477
478 /* All extended events we currently use are mid-syscall. Only
479 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
480 you have to be using PTRACE_SEIZE to get that. */
481 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
482
483 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
484 || (event == PTRACE_EVENT_CLONE))
485 {
486 ptid_t ptid;
487 unsigned long new_pid;
488 int ret, status;
489
490 /* Get the pid of the new lwp. */
491 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
492 &new_pid);
493
494 /* If we haven't already seen the new PID stop, wait for it now. */
495 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
496 {
497 /* The new child has a pending SIGSTOP. We can't affect it until it
498 hits the SIGSTOP, but we're already attached. */
499
500 ret = my_waitpid (new_pid, &status, __WALL);
501
502 if (ret == -1)
503 perror_with_name ("waiting for new child");
504 else if (ret != new_pid)
505 warning ("wait returned unexpected PID %d", ret);
506 else if (!WIFSTOPPED (status))
507 warning ("wait returned unexpected status 0x%x", status);
508 }
509
510 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
511 {
512 struct process_info *parent_proc;
513 struct process_info *child_proc;
514 struct lwp_info *child_lwp;
515 struct thread_info *child_thr;
516 struct target_desc *tdesc;
517
518 ptid = ptid_t (new_pid, new_pid, 0);
519
520 if (debug_threads)
521 {
522 debug_printf ("HEW: Got fork event from LWP %ld, "
523 "new child is %d\n",
524 ptid_of (event_thr).lwp (),
525 ptid.pid ());
526 }
527
528 /* Add the new process to the tables and clone the breakpoint
529 lists of the parent. We need to do this even if the new process
530 will be detached, since we will need the process object and the
531 breakpoints to remove any breakpoints from memory when we
532 detach, and the client side will access registers. */
533 child_proc = linux_add_process (new_pid, 0);
534 gdb_assert (child_proc != NULL);
535 child_lwp = add_lwp (ptid);
536 gdb_assert (child_lwp != NULL);
537 child_lwp->stopped = 1;
538 child_lwp->must_set_ptrace_flags = 1;
539 child_lwp->status_pending_p = 0;
540 child_thr = get_lwp_thread (child_lwp);
541 child_thr->last_resume_kind = resume_stop;
542 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
543
544 /* If we're suspending all threads, leave this one suspended
545 too. If the fork/clone parent is stepping over a breakpoint,
546 all other threads have been suspended already. Leave the
547 child suspended too. */
548 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
549 || event_lwp->bp_reinsert != 0)
550 {
551 if (debug_threads)
552 debug_printf ("HEW: leaving child suspended\n");
553 child_lwp->suspended = 1;
554 }
555
556 parent_proc = get_thread_process (event_thr);
557 child_proc->attached = parent_proc->attached;
558
559 if (event_lwp->bp_reinsert != 0
560 && can_software_single_step ()
561 && event == PTRACE_EVENT_VFORK)
562 {
563 /* If we leave single-step breakpoints there, child will
564 hit it, so uninsert single-step breakpoints from parent
565 (and child). Once vfork child is done, reinsert
566 them back to parent. */
567 uninsert_single_step_breakpoints (event_thr);
568 }
569
570 clone_all_breakpoints (child_thr, event_thr);
571
572 tdesc = allocate_target_description ();
573 copy_target_description (tdesc, parent_proc->tdesc);
574 child_proc->tdesc = tdesc;
575
576 /* Clone arch-specific process data. */
577 if (the_low_target.new_fork != NULL)
578 the_low_target.new_fork (parent_proc, child_proc);
579
580 /* Save fork info in the parent thread. */
581 if (event == PTRACE_EVENT_FORK)
582 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
583 else if (event == PTRACE_EVENT_VFORK)
584 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
585
586 event_lwp->waitstatus.value.related_pid = ptid;
587
588 /* The status_pending field contains bits denoting the
589 extended event, so when the pending event is handled,
590 the handler will look at lwp->waitstatus. */
591 event_lwp->status_pending_p = 1;
592 event_lwp->status_pending = wstat;
593
594 /* Link the threads until the parent event is passed on to
595 higher layers. */
596 event_lwp->fork_relative = child_lwp;
597 child_lwp->fork_relative = event_lwp;
598
599 /* If the parent thread is doing step-over with single-step
600 breakpoints, the list of single-step breakpoints are cloned
601 from the parent's. Remove them from the child process.
602 In case of vfork, we'll reinsert them back once vforked
603 child is done. */
604 if (event_lwp->bp_reinsert != 0
605 && can_software_single_step ())
606 {
607 /* The child process is forked and stopped, so it is safe
608 to access its memory without stopping all other threads
609 from other processes. */
610 delete_single_step_breakpoints (child_thr);
611
612 gdb_assert (has_single_step_breakpoints (event_thr));
613 gdb_assert (!has_single_step_breakpoints (child_thr));
614 }
615
616 /* Report the event. */
617 return 0;
618 }
619
620 if (debug_threads)
621 debug_printf ("HEW: Got clone event "
622 "from LWP %ld, new child is LWP %ld\n",
623 lwpid_of (event_thr), new_pid);
624
625 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
626 new_lwp = add_lwp (ptid);
627
628 /* Either we're going to immediately resume the new thread
629 or leave it stopped. linux_resume_one_lwp is a nop if it
630 thinks the thread is currently running, so set this first
631 before calling linux_resume_one_lwp. */
632 new_lwp->stopped = 1;
633
634 /* If we're suspending all threads, leave this one suspended
635 too. If the fork/clone parent is stepping over a breakpoint,
636 all other threads have been suspended already. Leave the
637 child suspended too. */
638 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
639 || event_lwp->bp_reinsert != 0)
640 new_lwp->suspended = 1;
641
642 /* Normally we will get the pending SIGSTOP. But in some cases
643 we might get another signal delivered to the group first.
644 If we do get another signal, be sure not to lose it. */
645 if (WSTOPSIG (status) != SIGSTOP)
646 {
647 new_lwp->stop_expected = 1;
648 new_lwp->status_pending_p = 1;
649 new_lwp->status_pending = status;
650 }
651 else if (cs.report_thread_events)
652 {
653 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
654 new_lwp->status_pending_p = 1;
655 new_lwp->status_pending = status;
656 }
657
658 #ifdef USE_THREAD_DB
659 thread_db_notice_clone (event_thr, ptid);
660 #endif
661
662 /* Don't report the event. */
663 return 1;
664 }
665 else if (event == PTRACE_EVENT_VFORK_DONE)
666 {
667 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
668
669 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
670 {
671 reinsert_single_step_breakpoints (event_thr);
672
673 gdb_assert (has_single_step_breakpoints (event_thr));
674 }
675
676 /* Report the event. */
677 return 0;
678 }
679 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
680 {
681 struct process_info *proc;
682 std::vector<int> syscalls_to_catch;
683 ptid_t event_ptid;
684 pid_t event_pid;
685
686 if (debug_threads)
687 {
688 debug_printf ("HEW: Got exec event from LWP %ld\n",
689 lwpid_of (event_thr));
690 }
691
692 /* Get the event ptid. */
693 event_ptid = ptid_of (event_thr);
694 event_pid = event_ptid.pid ();
695
696 /* Save the syscall list from the execing process. */
697 proc = get_thread_process (event_thr);
698 syscalls_to_catch = std::move (proc->syscalls_to_catch);
699
700 /* Delete the execing process and all its threads. */
701 mourn (proc);
702 current_thread = NULL;
703
704 /* Create a new process/lwp/thread. */
705 proc = linux_add_process (event_pid, 0);
706 event_lwp = add_lwp (event_ptid);
707 event_thr = get_lwp_thread (event_lwp);
708 gdb_assert (current_thread == event_thr);
709 linux_arch_setup_thread (event_thr);
710
711 /* Set the event status. */
712 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
713 event_lwp->waitstatus.value.execd_pathname
714 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
715
716 /* Mark the exec status as pending. */
717 event_lwp->stopped = 1;
718 event_lwp->status_pending_p = 1;
719 event_lwp->status_pending = wstat;
720 event_thr->last_resume_kind = resume_continue;
721 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
722
723 /* Update syscall state in the new lwp, effectively mid-syscall too. */
724 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
725
726 /* Restore the list to catch. Don't rely on the client, which is free
727 to avoid sending a new list when the architecture doesn't change.
728 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
729 proc->syscalls_to_catch = std::move (syscalls_to_catch);
730
731 /* Report the event. */
732 *orig_event_lwp = event_lwp;
733 return 0;
734 }
735
736 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
737 }
738
739 /* Return the PC as read from the regcache of LWP, without any
740 adjustment. */
741
742 static CORE_ADDR
743 get_pc (struct lwp_info *lwp)
744 {
745 struct thread_info *saved_thread;
746 struct regcache *regcache;
747 CORE_ADDR pc;
748
749 if (the_low_target.get_pc == NULL)
750 return 0;
751
752 saved_thread = current_thread;
753 current_thread = get_lwp_thread (lwp);
754
755 regcache = get_thread_regcache (current_thread, 1);
756 pc = (*the_low_target.get_pc) (regcache);
757
758 if (debug_threads)
759 debug_printf ("pc is 0x%lx\n", (long) pc);
760
761 current_thread = saved_thread;
762 return pc;
763 }
764
765 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
766 Fill *SYSNO with the syscall nr trapped. */
767
768 static void
769 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
770 {
771 struct thread_info *saved_thread;
772 struct regcache *regcache;
773
774 if (the_low_target.get_syscall_trapinfo == NULL)
775 {
776 /* If we cannot get the syscall trapinfo, report an unknown
777 system call number. */
778 *sysno = UNKNOWN_SYSCALL;
779 return;
780 }
781
782 saved_thread = current_thread;
783 current_thread = get_lwp_thread (lwp);
784
785 regcache = get_thread_regcache (current_thread, 1);
786 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
787
788 if (debug_threads)
789 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
790
791 current_thread = saved_thread;
792 }
793
794 static int check_stopped_by_watchpoint (struct lwp_info *child);
795
796 /* Called when the LWP stopped for a signal/trap. If it stopped for a
797 trap check what caused it (breakpoint, watchpoint, trace, etc.),
798 and save the result in the LWP's stop_reason field. If it stopped
799 for a breakpoint, decrement the PC if necessary on the lwp's
800 architecture. Returns true if we now have the LWP's stop PC. */
801
802 static int
803 save_stop_reason (struct lwp_info *lwp)
804 {
805 CORE_ADDR pc;
806 CORE_ADDR sw_breakpoint_pc;
807 struct thread_info *saved_thread;
808 #if USE_SIGTRAP_SIGINFO
809 siginfo_t siginfo;
810 #endif
811
812 if (the_low_target.get_pc == NULL)
813 return 0;
814
815 pc = get_pc (lwp);
816 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
817
818 /* breakpoint_at reads from the current thread. */
819 saved_thread = current_thread;
820 current_thread = get_lwp_thread (lwp);
821
822 #if USE_SIGTRAP_SIGINFO
823 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
824 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
825 {
826 if (siginfo.si_signo == SIGTRAP)
827 {
828 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
829 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
830 {
831 /* The si_code is ambiguous on this arch -- check debug
832 registers. */
833 if (!check_stopped_by_watchpoint (lwp))
834 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
835 }
836 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
837 {
838 /* If we determine the LWP stopped for a SW breakpoint,
839 trust it. Particularly don't check watchpoint
840 registers, because at least on s390, we'd find
841 stopped-by-watchpoint as long as there's a watchpoint
842 set. */
843 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
844 }
845 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
846 {
847 /* This can indicate either a hardware breakpoint or
848 hardware watchpoint. Check debug registers. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
851 }
852 else if (siginfo.si_code == TRAP_TRACE)
853 {
854 /* We may have single stepped an instruction that
855 triggered a watchpoint. In that case, on some
856 architectures (such as x86), instead of TRAP_HWBKPT,
857 si_code indicates TRAP_TRACE, and we need to check
858 the debug registers separately. */
859 if (!check_stopped_by_watchpoint (lwp))
860 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
861 }
862 }
863 }
864 #else
865 /* We may have just stepped a breakpoint instruction. E.g., in
866 non-stop mode, GDB first tells the thread A to step a range, and
867 then the user inserts a breakpoint inside the range. In that
868 case we need to report the breakpoint PC. */
869 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
870 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
871 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
872
873 if (hardware_breakpoint_inserted_here (pc))
874 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
875
876 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
877 check_stopped_by_watchpoint (lwp);
878 #endif
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
881 {
882 if (debug_threads)
883 {
884 struct thread_info *thr = get_lwp_thread (lwp);
885
886 debug_printf ("CSBB: %s stopped by software breakpoint\n",
887 target_pid_to_str (ptid_of (thr)));
888 }
889
890 /* Back up the PC if necessary. */
891 if (pc != sw_breakpoint_pc)
892 {
893 struct regcache *regcache
894 = get_thread_regcache (current_thread, 1);
895 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
896 }
897
898 /* Update this so we record the correct stop PC below. */
899 pc = sw_breakpoint_pc;
900 }
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
906
907 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
910 }
911 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
912 {
913 if (debug_threads)
914 {
915 struct thread_info *thr = get_lwp_thread (lwp);
916
917 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
918 target_pid_to_str (ptid_of (thr)));
919 }
920 }
921 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
922 {
923 if (debug_threads)
924 {
925 struct thread_info *thr = get_lwp_thread (lwp);
926
927 debug_printf ("CSBB: %s stopped by trace\n",
928 target_pid_to_str (ptid_of (thr)));
929 }
930 }
931
932 lwp->stop_pc = pc;
933 current_thread = saved_thread;
934 return 1;
935 }
936
937 static struct lwp_info *
938 add_lwp (ptid_t ptid)
939 {
940 struct lwp_info *lwp;
941
942 lwp = XCNEW (struct lwp_info);
943
944 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
945
946 lwp->thread = add_thread (ptid, lwp);
947
948 if (the_low_target.new_thread != NULL)
949 the_low_target.new_thread (lwp);
950
951 return lwp;
952 }
953
954 /* Callback to be used when calling fork_inferior, responsible for
955 actually initiating the tracing of the inferior. */
956
957 static void
958 linux_ptrace_fun ()
959 {
960 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
961 (PTRACE_TYPE_ARG4) 0) < 0)
962 trace_start_error_with_name ("ptrace");
963
964 if (setpgid (0, 0) < 0)
965 trace_start_error_with_name ("setpgid");
966
967 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
968 stdout to stderr so that inferior i/o doesn't corrupt the connection.
969 Also, redirect stdin to /dev/null. */
970 if (remote_connection_is_stdio ())
971 {
972 if (close (0) < 0)
973 trace_start_error_with_name ("close");
974 if (open ("/dev/null", O_RDONLY) < 0)
975 trace_start_error_with_name ("open");
976 if (dup2 (2, 1) < 0)
977 trace_start_error_with_name ("dup2");
978 if (write (2, "stdin/stdout redirected\n",
979 sizeof ("stdin/stdout redirected\n") - 1) < 0)
980 {
981 /* Errors ignored. */;
982 }
983 }
984 }
985
986 /* Start an inferior process and returns its pid.
987 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
988 are its arguments. */
989
990 int
991 linux_process_target::create_inferior (const char *program,
992 const std::vector<char *> &program_args)
993 {
994 client_state &cs = get_client_state ();
995 struct lwp_info *new_lwp;
996 int pid;
997 ptid_t ptid;
998
999 {
1000 maybe_disable_address_space_randomization restore_personality
1001 (cs.disable_randomization);
1002 std::string str_program_args = stringify_argv (program_args);
1003
1004 pid = fork_inferior (program,
1005 str_program_args.c_str (),
1006 get_environ ()->envp (), linux_ptrace_fun,
1007 NULL, NULL, NULL, NULL);
1008 }
1009
1010 linux_add_process (pid, 0);
1011
1012 ptid = ptid_t (pid, pid, 0);
1013 new_lwp = add_lwp (ptid);
1014 new_lwp->must_set_ptrace_flags = 1;
1015
1016 post_fork_inferior (pid, program);
1017
1018 return pid;
1019 }
1020
1021 /* Implement the post_create_inferior target_ops method. */
1022
1023 void
1024 linux_process_target::post_create_inferior ()
1025 {
1026 struct lwp_info *lwp = get_thread_lwp (current_thread);
1027
1028 linux_arch_setup ();
1029
1030 if (lwp->must_set_ptrace_flags)
1031 {
1032 struct process_info *proc = current_process ();
1033 int options = linux_low_ptrace_options (proc->attached);
1034
1035 linux_enable_event_reporting (lwpid_of (current_thread), options);
1036 lwp->must_set_ptrace_flags = 0;
1037 }
1038 }
1039
1040 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1041 error. */
1042
1043 int
1044 linux_attach_lwp (ptid_t ptid)
1045 {
1046 struct lwp_info *new_lwp;
1047 int lwpid = ptid.lwp ();
1048
1049 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1050 != 0)
1051 return errno;
1052
1053 new_lwp = add_lwp (ptid);
1054
1055 /* We need to wait for SIGSTOP before being able to make the next
1056 ptrace call on this LWP. */
1057 new_lwp->must_set_ptrace_flags = 1;
1058
1059 if (linux_proc_pid_is_stopped (lwpid))
1060 {
1061 if (debug_threads)
1062 debug_printf ("Attached to a stopped process\n");
1063
1064 /* The process is definitely stopped. It is in a job control
1065 stop, unless the kernel predates the TASK_STOPPED /
1066 TASK_TRACED distinction, in which case it might be in a
1067 ptrace stop. Make sure it is in a ptrace stop; from there we
1068 can kill it, signal it, et cetera.
1069
1070 First make sure there is a pending SIGSTOP. Since we are
1071 already attached, the process can not transition from stopped
1072 to running without a PTRACE_CONT; so we know this signal will
1073 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1074 probably already in the queue (unless this kernel is old
1075 enough to use TASK_STOPPED for ptrace stops); but since
1076 SIGSTOP is not an RT signal, it can only be queued once. */
1077 kill_lwp (lwpid, SIGSTOP);
1078
1079 /* Finally, resume the stopped process. This will deliver the
1080 SIGSTOP (or a higher priority signal, just like normal
1081 PTRACE_ATTACH), which we'll catch later on. */
1082 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1083 }
1084
1085 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1086 brings it to a halt.
1087
1088 There are several cases to consider here:
1089
1090 1) gdbserver has already attached to the process and is being notified
1091 of a new thread that is being created.
1092 In this case we should ignore that SIGSTOP and resume the
1093 process. This is handled below by setting stop_expected = 1,
1094 and the fact that add_thread sets last_resume_kind ==
1095 resume_continue.
1096
1097 2) This is the first thread (the process thread), and we're attaching
1098 to it via attach_inferior.
1099 In this case we want the process thread to stop.
1100 This is handled by having linux_attach set last_resume_kind ==
1101 resume_stop after we return.
1102
1103 If the pid we are attaching to is also the tgid, we attach to and
1104 stop all the existing threads. Otherwise, we attach to pid and
1105 ignore any other threads in the same group as this pid.
1106
1107 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1108 existing threads.
1109 In this case we want the thread to stop.
1110 FIXME: This case is currently not properly handled.
1111 We should wait for the SIGSTOP but don't. Things work apparently
1112 because enough time passes between when we ptrace (ATTACH) and when
1113 gdb makes the next ptrace call on the thread.
1114
1115 On the other hand, if we are currently trying to stop all threads, we
1116 should treat the new thread as if we had sent it a SIGSTOP. This works
1117 because we are guaranteed that the add_lwp call above added us to the
1118 end of the list, and so the new thread has not yet reached
1119 wait_for_sigstop (but will). */
1120 new_lwp->stop_expected = 1;
1121
1122 return 0;
1123 }
1124
1125 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1126 already attached. Returns true if a new LWP is found, false
1127 otherwise. */
1128
1129 static int
1130 attach_proc_task_lwp_callback (ptid_t ptid)
1131 {
1132 /* Is this a new thread? */
1133 if (find_thread_ptid (ptid) == NULL)
1134 {
1135 int lwpid = ptid.lwp ();
1136 int err;
1137
1138 if (debug_threads)
1139 debug_printf ("Found new lwp %d\n", lwpid);
1140
1141 err = linux_attach_lwp (ptid);
1142
1143 /* Be quiet if we simply raced with the thread exiting. EPERM
1144 is returned if the thread's task still exists, and is marked
1145 as exited or zombie, as well as other conditions, so in that
1146 case, confirm the status in /proc/PID/status. */
1147 if (err == ESRCH
1148 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1149 {
1150 if (debug_threads)
1151 {
1152 debug_printf ("Cannot attach to lwp %d: "
1153 "thread is gone (%d: %s)\n",
1154 lwpid, err, safe_strerror (err));
1155 }
1156 }
1157 else if (err != 0)
1158 {
1159 std::string reason
1160 = linux_ptrace_attach_fail_reason_string (ptid, err);
1161
1162 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1163 }
1164
1165 return 1;
1166 }
1167 return 0;
1168 }
1169
1170 static void async_file_mark (void);
1171
1172 /* Attach to PID. If PID is the tgid, attach to it and all
1173 of its threads. */
1174
1175 int
1176 linux_process_target::attach (unsigned long pid)
1177 {
1178 struct process_info *proc;
1179 struct thread_info *initial_thread;
1180 ptid_t ptid = ptid_t (pid, pid, 0);
1181 int err;
1182
1183 proc = linux_add_process (pid, 1);
1184
1185 /* Attach to PID. We will check for other threads
1186 soon. */
1187 err = linux_attach_lwp (ptid);
1188 if (err != 0)
1189 {
1190 remove_process (proc);
1191
1192 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1193 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1194 }
1195
1196 /* Don't ignore the initial SIGSTOP if we just attached to this
1197 process. It will be collected by wait shortly. */
1198 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1199 initial_thread->last_resume_kind = resume_stop;
1200
1201 /* We must attach to every LWP. If /proc is mounted, use that to
1202 find them now. On the one hand, the inferior may be using raw
1203 clone instead of using pthreads. On the other hand, even if it
1204 is using pthreads, GDB may not be connected yet (thread_db needs
1205 to do symbol lookups, through qSymbol). Also, thread_db walks
1206 structures in the inferior's address space to find the list of
1207 threads/LWPs, and those structures may well be corrupted. Note
1208 that once thread_db is loaded, we'll still use it to list threads
1209 and associate pthread info with each LWP. */
1210 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1211
1212 /* GDB will shortly read the xml target description for this
1213 process, to figure out the process' architecture. But the target
1214 description is only filled in when the first process/thread in
1215 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1216 that now, otherwise, if GDB is fast enough, it could read the
1217 target description _before_ that initial stop. */
1218 if (non_stop)
1219 {
1220 struct lwp_info *lwp;
1221 int wstat, lwpid;
1222 ptid_t pid_ptid = ptid_t (pid);
1223
1224 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1225 gdb_assert (lwpid > 0);
1226
1227 lwp = find_lwp_pid (ptid_t (lwpid));
1228
1229 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1230 {
1231 lwp->status_pending_p = 1;
1232 lwp->status_pending = wstat;
1233 }
1234
1235 initial_thread->last_resume_kind = resume_continue;
1236
1237 async_file_mark ();
1238
1239 gdb_assert (proc->tdesc != NULL);
1240 }
1241
1242 return 0;
1243 }
1244
1245 static int
1246 last_thread_of_process_p (int pid)
1247 {
1248 bool seen_one = false;
1249
1250 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1251 {
1252 if (!seen_one)
1253 {
1254 /* This is the first thread of this process we see. */
1255 seen_one = true;
1256 return false;
1257 }
1258 else
1259 {
1260 /* This is the second thread of this process we see. */
1261 return true;
1262 }
1263 });
1264
1265 return thread == NULL;
1266 }
1267
1268 /* Kill LWP. */
1269
1270 static void
1271 linux_kill_one_lwp (struct lwp_info *lwp)
1272 {
1273 struct thread_info *thr = get_lwp_thread (lwp);
1274 int pid = lwpid_of (thr);
1275
1276 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1277 there is no signal context, and ptrace(PTRACE_KILL) (or
1278 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1279 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1280 alternative is to kill with SIGKILL. We only need one SIGKILL
1281 per process, not one for each thread. But since we still support
1282 support debugging programs using raw clone without CLONE_THREAD,
1283 we send one for each thread. For years, we used PTRACE_KILL
1284 only, so we're being a bit paranoid about some old kernels where
1285 PTRACE_KILL might work better (dubious if there are any such, but
1286 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1287 second, and so we're fine everywhere. */
1288
1289 errno = 0;
1290 kill_lwp (pid, SIGKILL);
1291 if (debug_threads)
1292 {
1293 int save_errno = errno;
1294
1295 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1296 target_pid_to_str (ptid_of (thr)),
1297 save_errno ? safe_strerror (save_errno) : "OK");
1298 }
1299
1300 errno = 0;
1301 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1302 if (debug_threads)
1303 {
1304 int save_errno = errno;
1305
1306 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1307 target_pid_to_str (ptid_of (thr)),
1308 save_errno ? safe_strerror (save_errno) : "OK");
1309 }
1310 }
1311
1312 /* Kill LWP and wait for it to die. */
1313
1314 static void
1315 kill_wait_lwp (struct lwp_info *lwp)
1316 {
1317 struct thread_info *thr = get_lwp_thread (lwp);
1318 int pid = ptid_of (thr).pid ();
1319 int lwpid = ptid_of (thr).lwp ();
1320 int wstat;
1321 int res;
1322
1323 if (debug_threads)
1324 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1325
1326 do
1327 {
1328 linux_kill_one_lwp (lwp);
1329
1330 /* Make sure it died. Notes:
1331
1332 - The loop is most likely unnecessary.
1333
1334 - We don't use wait_for_event as that could delete lwps
1335 while we're iterating over them. We're not interested in
1336 any pending status at this point, only in making sure all
1337 wait status on the kernel side are collected until the
1338 process is reaped.
1339
1340 - We don't use __WALL here as the __WALL emulation relies on
1341 SIGCHLD, and killing a stopped process doesn't generate
1342 one, nor an exit status.
1343 */
1344 res = my_waitpid (lwpid, &wstat, 0);
1345 if (res == -1 && errno == ECHILD)
1346 res = my_waitpid (lwpid, &wstat, __WCLONE);
1347 } while (res > 0 && WIFSTOPPED (wstat));
1348
1349 /* Even if it was stopped, the child may have already disappeared.
1350 E.g., if it was killed by SIGKILL. */
1351 if (res < 0 && errno != ECHILD)
1352 perror_with_name ("kill_wait_lwp");
1353 }
1354
1355 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1356 except the leader. */
1357
1358 static void
1359 kill_one_lwp_callback (thread_info *thread, int pid)
1360 {
1361 struct lwp_info *lwp = get_thread_lwp (thread);
1362
1363 /* We avoid killing the first thread here, because of a Linux kernel (at
1364 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1365 the children get a chance to be reaped, it will remain a zombie
1366 forever. */
1367
1368 if (lwpid_of (thread) == pid)
1369 {
1370 if (debug_threads)
1371 debug_printf ("lkop: is last of process %s\n",
1372 target_pid_to_str (thread->id));
1373 return;
1374 }
1375
1376 kill_wait_lwp (lwp);
1377 }
1378
1379 int
1380 linux_process_target::kill (process_info *process)
1381 {
1382 int pid = process->pid;
1383
1384 /* If we're killing a running inferior, make sure it is stopped
1385 first, as PTRACE_KILL will not work otherwise. */
1386 stop_all_lwps (0, NULL);
1387
1388 for_each_thread (pid, [&] (thread_info *thread)
1389 {
1390 kill_one_lwp_callback (thread, pid);
1391 });
1392
1393 /* See the comment in linux_kill_one_lwp. We did not kill the first
1394 thread in the list, so do so now. */
1395 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1396
1397 if (lwp == NULL)
1398 {
1399 if (debug_threads)
1400 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1401 pid);
1402 }
1403 else
1404 kill_wait_lwp (lwp);
1405
1406 mourn (process);
1407
1408 /* Since we presently can only stop all lwps of all processes, we
1409 need to unstop lwps of other processes. */
1410 unstop_all_lwps (0, NULL);
1411 return 0;
1412 }
1413
1414 /* Get pending signal of THREAD, for detaching purposes. This is the
1415 signal the thread last stopped for, which we need to deliver to the
1416 thread when detaching, otherwise, it'd be suppressed/lost. */
1417
1418 static int
1419 get_detach_signal (struct thread_info *thread)
1420 {
1421 client_state &cs = get_client_state ();
1422 enum gdb_signal signo = GDB_SIGNAL_0;
1423 int status;
1424 struct lwp_info *lp = get_thread_lwp (thread);
1425
1426 if (lp->status_pending_p)
1427 status = lp->status_pending;
1428 else
1429 {
1430 /* If the thread had been suspended by gdbserver, and it stopped
1431 cleanly, then it'll have stopped with SIGSTOP. But we don't
1432 want to deliver that SIGSTOP. */
1433 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1434 || thread->last_status.value.sig == GDB_SIGNAL_0)
1435 return 0;
1436
1437 /* Otherwise, we may need to deliver the signal we
1438 intercepted. */
1439 status = lp->last_status;
1440 }
1441
1442 if (!WIFSTOPPED (status))
1443 {
1444 if (debug_threads)
1445 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1446 target_pid_to_str (ptid_of (thread)));
1447 return 0;
1448 }
1449
1450 /* Extended wait statuses aren't real SIGTRAPs. */
1451 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s had stopped with extended "
1455 "status: no pending signal\n",
1456 target_pid_to_str (ptid_of (thread)));
1457 return 0;
1458 }
1459
1460 signo = gdb_signal_from_host (WSTOPSIG (status));
1461
1462 if (cs.program_signals_p && !cs.program_signals[signo])
1463 {
1464 if (debug_threads)
1465 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1466 target_pid_to_str (ptid_of (thread)),
1467 gdb_signal_to_string (signo));
1468 return 0;
1469 }
1470 else if (!cs.program_signals_p
1471 /* If we have no way to know which signals GDB does not
1472 want to have passed to the program, assume
1473 SIGTRAP/SIGINT, which is GDB's default. */
1474 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1475 {
1476 if (debug_threads)
1477 debug_printf ("GPS: lwp %s had signal %s, "
1478 "but we don't know if we should pass it. "
1479 "Default to not.\n",
1480 target_pid_to_str (ptid_of (thread)),
1481 gdb_signal_to_string (signo));
1482 return 0;
1483 }
1484 else
1485 {
1486 if (debug_threads)
1487 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1488 target_pid_to_str (ptid_of (thread)),
1489 gdb_signal_to_string (signo));
1490
1491 return WSTOPSIG (status);
1492 }
1493 }
1494
1495 /* Detach from LWP. */
1496
1497 static void
1498 linux_detach_one_lwp (struct lwp_info *lwp)
1499 {
1500 struct thread_info *thread = get_lwp_thread (lwp);
1501 int sig;
1502 int lwpid;
1503
1504 /* If there is a pending SIGSTOP, get rid of it. */
1505 if (lwp->stop_expected)
1506 {
1507 if (debug_threads)
1508 debug_printf ("Sending SIGCONT to %s\n",
1509 target_pid_to_str (ptid_of (thread)));
1510
1511 kill_lwp (lwpid_of (thread), SIGCONT);
1512 lwp->stop_expected = 0;
1513 }
1514
1515 /* Pass on any pending signal for this thread. */
1516 sig = get_detach_signal (thread);
1517
1518 /* Preparing to resume may try to write registers, and fail if the
1519 lwp is zombie. If that happens, ignore the error. We'll handle
1520 it below, when detach fails with ESRCH. */
1521 try
1522 {
1523 /* Flush any pending changes to the process's registers. */
1524 regcache_invalidate_thread (thread);
1525
1526 /* Finally, let it resume. */
1527 if (the_low_target.prepare_to_resume != NULL)
1528 the_low_target.prepare_to_resume (lwp);
1529 }
1530 catch (const gdb_exception_error &ex)
1531 {
1532 if (!check_ptrace_stopped_lwp_gone (lwp))
1533 throw;
1534 }
1535
1536 lwpid = lwpid_of (thread);
1537 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1538 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1539 {
1540 int save_errno = errno;
1541
1542 /* We know the thread exists, so ESRCH must mean the lwp is
1543 zombie. This can happen if one of the already-detached
1544 threads exits the whole thread group. In that case we're
1545 still attached, and must reap the lwp. */
1546 if (save_errno == ESRCH)
1547 {
1548 int ret, status;
1549
1550 ret = my_waitpid (lwpid, &status, __WALL);
1551 if (ret == -1)
1552 {
1553 warning (_("Couldn't reap LWP %d while detaching: %s"),
1554 lwpid, safe_strerror (errno));
1555 }
1556 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1557 {
1558 warning (_("Reaping LWP %d while detaching "
1559 "returned unexpected status 0x%x"),
1560 lwpid, status);
1561 }
1562 }
1563 else
1564 {
1565 error (_("Can't detach %s: %s"),
1566 target_pid_to_str (ptid_of (thread)),
1567 safe_strerror (save_errno));
1568 }
1569 }
1570 else if (debug_threads)
1571 {
1572 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1573 target_pid_to_str (ptid_of (thread)),
1574 strsignal (sig));
1575 }
1576
1577 delete_lwp (lwp);
1578 }
1579
1580 /* Callback for for_each_thread. Detaches from non-leader threads of a
1581 given process. */
1582
1583 static void
1584 linux_detach_lwp_callback (thread_info *thread)
1585 {
1586 /* We don't actually detach from the thread group leader just yet.
1587 If the thread group exits, we must reap the zombie clone lwps
1588 before we're able to reap the leader. */
1589 if (thread->id.pid () == thread->id.lwp ())
1590 return;
1591
1592 lwp_info *lwp = get_thread_lwp (thread);
1593 linux_detach_one_lwp (lwp);
1594 }
1595
1596 int
1597 linux_process_target::detach (process_info *process)
1598 {
1599 struct lwp_info *main_lwp;
1600
1601 /* As there's a step over already in progress, let it finish first,
1602 otherwise nesting a stabilize_threads operation on top gets real
1603 messy. */
1604 complete_ongoing_step_over ();
1605
1606 /* Stop all threads before detaching. First, ptrace requires that
1607 the thread is stopped to successfully detach. Second, thread_db
1608 may need to uninstall thread event breakpoints from memory, which
1609 only works with a stopped process anyway. */
1610 stop_all_lwps (0, NULL);
1611
1612 #ifdef USE_THREAD_DB
1613 thread_db_detach (process);
1614 #endif
1615
1616 /* Stabilize threads (move out of jump pads). */
1617 target_stabilize_threads ();
1618
1619 /* Detach from the clone lwps first. If the thread group exits just
1620 while we're detaching, we must reap the clone lwps before we're
1621 able to reap the leader. */
1622 for_each_thread (process->pid, linux_detach_lwp_callback);
1623
1624 main_lwp = find_lwp_pid (ptid_t (process->pid));
1625 linux_detach_one_lwp (main_lwp);
1626
1627 mourn (process);
1628
1629 /* Since we presently can only stop all lwps of all processes, we
1630 need to unstop lwps of other processes. */
1631 unstop_all_lwps (0, NULL);
1632 return 0;
1633 }
1634
1635 /* Remove all LWPs that belong to process PROC from the lwp list. */
1636
1637 void
1638 linux_process_target::mourn (process_info *process)
1639 {
1640 struct process_info_private *priv;
1641
1642 #ifdef USE_THREAD_DB
1643 thread_db_mourn (process);
1644 #endif
1645
1646 for_each_thread (process->pid, [] (thread_info *thread)
1647 {
1648 delete_lwp (get_thread_lwp (thread));
1649 });
1650
1651 /* Freeing all private data. */
1652 priv = process->priv;
1653 if (the_low_target.delete_process != NULL)
1654 the_low_target.delete_process (priv->arch_private);
1655 else
1656 gdb_assert (priv->arch_private == NULL);
1657 free (priv);
1658 process->priv = NULL;
1659
1660 remove_process (process);
1661 }
1662
1663 void
1664 linux_process_target::join (int pid)
1665 {
1666 int status, ret;
1667
1668 do {
1669 ret = my_waitpid (pid, &status, 0);
1670 if (WIFEXITED (status) || WIFSIGNALED (status))
1671 break;
1672 } while (ret != -1 || errno != ECHILD);
1673 }
1674
1675 /* Return true if the given thread is still alive. */
1676
1677 bool
1678 linux_process_target::thread_alive (ptid_t ptid)
1679 {
1680 struct lwp_info *lwp = find_lwp_pid (ptid);
1681
1682 /* We assume we always know if a thread exits. If a whole process
1683 exited but we still haven't been able to report it to GDB, we'll
1684 hold on to the last lwp of the dead process. */
1685 if (lwp != NULL)
1686 return !lwp_is_marked_dead (lwp);
1687 else
1688 return 0;
1689 }
1690
1691 /* Return 1 if this lwp still has an interesting status pending. If
1692 not (e.g., it had stopped for a breakpoint that is gone), return
1693 false. */
1694
1695 static int
1696 thread_still_has_status_pending_p (struct thread_info *thread)
1697 {
1698 struct lwp_info *lp = get_thread_lwp (thread);
1699
1700 if (!lp->status_pending_p)
1701 return 0;
1702
1703 if (thread->last_resume_kind != resume_stop
1704 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1705 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1706 {
1707 struct thread_info *saved_thread;
1708 CORE_ADDR pc;
1709 int discard = 0;
1710
1711 gdb_assert (lp->last_status != 0);
1712
1713 pc = get_pc (lp);
1714
1715 saved_thread = current_thread;
1716 current_thread = thread;
1717
1718 if (pc != lp->stop_pc)
1719 {
1720 if (debug_threads)
1721 debug_printf ("PC of %ld changed\n",
1722 lwpid_of (thread));
1723 discard = 1;
1724 }
1725
1726 #if !USE_SIGTRAP_SIGINFO
1727 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1728 && !(*the_low_target.breakpoint_at) (pc))
1729 {
1730 if (debug_threads)
1731 debug_printf ("previous SW breakpoint of %ld gone\n",
1732 lwpid_of (thread));
1733 discard = 1;
1734 }
1735 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1736 && !hardware_breakpoint_inserted_here (pc))
1737 {
1738 if (debug_threads)
1739 debug_printf ("previous HW breakpoint of %ld gone\n",
1740 lwpid_of (thread));
1741 discard = 1;
1742 }
1743 #endif
1744
1745 current_thread = saved_thread;
1746
1747 if (discard)
1748 {
1749 if (debug_threads)
1750 debug_printf ("discarding pending breakpoint status\n");
1751 lp->status_pending_p = 0;
1752 return 0;
1753 }
1754 }
1755
1756 return 1;
1757 }
1758
1759 /* Returns true if LWP is resumed from the client's perspective. */
1760
1761 static int
1762 lwp_resumed (struct lwp_info *lwp)
1763 {
1764 struct thread_info *thread = get_lwp_thread (lwp);
1765
1766 if (thread->last_resume_kind != resume_stop)
1767 return 1;
1768
1769 /* Did gdb send us a `vCont;t', but we haven't reported the
1770 corresponding stop to gdb yet? If so, the thread is still
1771 resumed/running from gdb's perspective. */
1772 if (thread->last_resume_kind == resume_stop
1773 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1774 return 1;
1775
1776 return 0;
1777 }
1778
1779 /* Return true if this lwp has an interesting status pending. */
1780 static bool
1781 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1782 {
1783 struct lwp_info *lp = get_thread_lwp (thread);
1784
1785 /* Check if we're only interested in events from a specific process
1786 or a specific LWP. */
1787 if (!thread->id.matches (ptid))
1788 return 0;
1789
1790 if (!lwp_resumed (lp))
1791 return 0;
1792
1793 if (lp->status_pending_p
1794 && !thread_still_has_status_pending_p (thread))
1795 {
1796 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1797 return 0;
1798 }
1799
1800 return lp->status_pending_p;
1801 }
1802
1803 struct lwp_info *
1804 find_lwp_pid (ptid_t ptid)
1805 {
1806 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1807 {
1808 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1809 return thr_arg->id.lwp () == lwp;
1810 });
1811
1812 if (thread == NULL)
1813 return NULL;
1814
1815 return get_thread_lwp (thread);
1816 }
1817
1818 /* Return the number of known LWPs in the tgid given by PID. */
1819
1820 static int
1821 num_lwps (int pid)
1822 {
1823 int count = 0;
1824
1825 for_each_thread (pid, [&] (thread_info *thread)
1826 {
1827 count++;
1828 });
1829
1830 return count;
1831 }
1832
1833 /* See nat/linux-nat.h. */
1834
1835 struct lwp_info *
1836 iterate_over_lwps (ptid_t filter,
1837 gdb::function_view<iterate_over_lwps_ftype> callback)
1838 {
1839 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1840 {
1841 lwp_info *lwp = get_thread_lwp (thr_arg);
1842
1843 return callback (lwp);
1844 });
1845
1846 if (thread == NULL)
1847 return NULL;
1848
1849 return get_thread_lwp (thread);
1850 }
1851
1852 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1853 their exits until all other threads in the group have exited. */
1854
1855 static void
1856 check_zombie_leaders (void)
1857 {
1858 for_each_process ([] (process_info *proc) {
1859 pid_t leader_pid = pid_of (proc);
1860 struct lwp_info *leader_lp;
1861
1862 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1863
1864 if (debug_threads)
1865 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1866 "num_lwps=%d, zombie=%d\n",
1867 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1868 linux_proc_pid_is_zombie (leader_pid));
1869
1870 if (leader_lp != NULL && !leader_lp->stopped
1871 /* Check if there are other threads in the group, as we may
1872 have raced with the inferior simply exiting. */
1873 && !last_thread_of_process_p (leader_pid)
1874 && linux_proc_pid_is_zombie (leader_pid))
1875 {
1876 /* A leader zombie can mean one of two things:
1877
1878 - It exited, and there's an exit status pending
1879 available, or only the leader exited (not the whole
1880 program). In the latter case, we can't waitpid the
1881 leader's exit status until all other threads are gone.
1882
1883 - There are 3 or more threads in the group, and a thread
1884 other than the leader exec'd. On an exec, the Linux
1885 kernel destroys all other threads (except the execing
1886 one) in the thread group, and resets the execing thread's
1887 tid to the tgid. No exit notification is sent for the
1888 execing thread -- from the ptracer's perspective, it
1889 appears as though the execing thread just vanishes.
1890 Until we reap all other threads except the leader and the
1891 execing thread, the leader will be zombie, and the
1892 execing thread will be in `D (disc sleep)'. As soon as
1893 all other threads are reaped, the execing thread changes
1894 it's tid to the tgid, and the previous (zombie) leader
1895 vanishes, giving place to the "new" leader. We could try
1896 distinguishing the exit and exec cases, by waiting once
1897 more, and seeing if something comes out, but it doesn't
1898 sound useful. The previous leader _does_ go away, and
1899 we'll re-add the new one once we see the exec event
1900 (which is just the same as what would happen if the
1901 previous leader did exit voluntarily before some other
1902 thread execs). */
1903
1904 if (debug_threads)
1905 debug_printf ("CZL: Thread group leader %d zombie "
1906 "(it exited, or another thread execd).\n",
1907 leader_pid);
1908
1909 delete_lwp (leader_lp);
1910 }
1911 });
1912 }
1913
1914 /* Callback for `find_thread'. Returns the first LWP that is not
1915 stopped. */
1916
1917 static bool
1918 not_stopped_callback (thread_info *thread, ptid_t filter)
1919 {
1920 if (!thread->id.matches (filter))
1921 return false;
1922
1923 lwp_info *lwp = get_thread_lwp (thread);
1924
1925 return !lwp->stopped;
1926 }
1927
1928 /* Increment LWP's suspend count. */
1929
1930 static void
1931 lwp_suspended_inc (struct lwp_info *lwp)
1932 {
1933 lwp->suspended++;
1934
1935 if (debug_threads && lwp->suspended > 4)
1936 {
1937 struct thread_info *thread = get_lwp_thread (lwp);
1938
1939 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1940 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1941 }
1942 }
1943
1944 /* Decrement LWP's suspend count. */
1945
1946 static void
1947 lwp_suspended_decr (struct lwp_info *lwp)
1948 {
1949 lwp->suspended--;
1950
1951 if (lwp->suspended < 0)
1952 {
1953 struct thread_info *thread = get_lwp_thread (lwp);
1954
1955 internal_error (__FILE__, __LINE__,
1956 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1957 lwp->suspended);
1958 }
1959 }
1960
1961 /* This function should only be called if the LWP got a SIGTRAP.
1962
1963 Handle any tracepoint steps or hits. Return true if a tracepoint
1964 event was handled, 0 otherwise. */
1965
1966 static int
1967 handle_tracepoints (struct lwp_info *lwp)
1968 {
1969 struct thread_info *tinfo = get_lwp_thread (lwp);
1970 int tpoint_related_event = 0;
1971
1972 gdb_assert (lwp->suspended == 0);
1973
1974 /* If this tracepoint hit causes a tracing stop, we'll immediately
1975 uninsert tracepoints. To do this, we temporarily pause all
1976 threads, unpatch away, and then unpause threads. We need to make
1977 sure the unpausing doesn't resume LWP too. */
1978 lwp_suspended_inc (lwp);
1979
1980 /* And we need to be sure that any all-threads-stopping doesn't try
1981 to move threads out of the jump pads, as it could deadlock the
1982 inferior (LWP could be in the jump pad, maybe even holding the
1983 lock.) */
1984
1985 /* Do any necessary step collect actions. */
1986 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1987
1988 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1989
1990 /* See if we just hit a tracepoint and do its main collect
1991 actions. */
1992 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1993
1994 lwp_suspended_decr (lwp);
1995
1996 gdb_assert (lwp->suspended == 0);
1997 gdb_assert (!stabilizing_threads
1998 || (lwp->collecting_fast_tracepoint
1999 != fast_tpoint_collect_result::not_collecting));
2000
2001 if (tpoint_related_event)
2002 {
2003 if (debug_threads)
2004 debug_printf ("got a tracepoint event\n");
2005 return 1;
2006 }
2007
2008 return 0;
2009 }
2010
2011 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2012 collection status. */
2013
2014 static fast_tpoint_collect_result
2015 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2016 struct fast_tpoint_collect_status *status)
2017 {
2018 CORE_ADDR thread_area;
2019 struct thread_info *thread = get_lwp_thread (lwp);
2020
2021 if (the_low_target.get_thread_area == NULL)
2022 return fast_tpoint_collect_result::not_collecting;
2023
2024 /* Get the thread area address. This is used to recognize which
2025 thread is which when tracing with the in-process agent library.
2026 We don't read anything from the address, and treat it as opaque;
2027 it's the address itself that we assume is unique per-thread. */
2028 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2029 return fast_tpoint_collect_result::not_collecting;
2030
2031 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2032 }
2033
2034 bool
2035 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2036 {
2037 struct thread_info *saved_thread;
2038
2039 saved_thread = current_thread;
2040 current_thread = get_lwp_thread (lwp);
2041
2042 if ((wstat == NULL
2043 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2044 && supports_fast_tracepoints ()
2045 && agent_loaded_p ())
2046 {
2047 struct fast_tpoint_collect_status status;
2048
2049 if (debug_threads)
2050 debug_printf ("Checking whether LWP %ld needs to move out of the "
2051 "jump pad.\n",
2052 lwpid_of (current_thread));
2053
2054 fast_tpoint_collect_result r
2055 = linux_fast_tracepoint_collecting (lwp, &status);
2056
2057 if (wstat == NULL
2058 || (WSTOPSIG (*wstat) != SIGILL
2059 && WSTOPSIG (*wstat) != SIGFPE
2060 && WSTOPSIG (*wstat) != SIGSEGV
2061 && WSTOPSIG (*wstat) != SIGBUS))
2062 {
2063 lwp->collecting_fast_tracepoint = r;
2064
2065 if (r != fast_tpoint_collect_result::not_collecting)
2066 {
2067 if (r == fast_tpoint_collect_result::before_insn
2068 && lwp->exit_jump_pad_bkpt == NULL)
2069 {
2070 /* Haven't executed the original instruction yet.
2071 Set breakpoint there, and wait till it's hit,
2072 then single-step until exiting the jump pad. */
2073 lwp->exit_jump_pad_bkpt
2074 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2075 }
2076
2077 if (debug_threads)
2078 debug_printf ("Checking whether LWP %ld needs to move out of "
2079 "the jump pad...it does\n",
2080 lwpid_of (current_thread));
2081 current_thread = saved_thread;
2082
2083 return true;
2084 }
2085 }
2086 else
2087 {
2088 /* If we get a synchronous signal while collecting, *and*
2089 while executing the (relocated) original instruction,
2090 reset the PC to point at the tpoint address, before
2091 reporting to GDB. Otherwise, it's an IPA lib bug: just
2092 report the signal to GDB, and pray for the best. */
2093
2094 lwp->collecting_fast_tracepoint
2095 = fast_tpoint_collect_result::not_collecting;
2096
2097 if (r != fast_tpoint_collect_result::not_collecting
2098 && (status.adjusted_insn_addr <= lwp->stop_pc
2099 && lwp->stop_pc < status.adjusted_insn_addr_end))
2100 {
2101 siginfo_t info;
2102 struct regcache *regcache;
2103
2104 /* The si_addr on a few signals references the address
2105 of the faulting instruction. Adjust that as
2106 well. */
2107 if ((WSTOPSIG (*wstat) == SIGILL
2108 || WSTOPSIG (*wstat) == SIGFPE
2109 || WSTOPSIG (*wstat) == SIGBUS
2110 || WSTOPSIG (*wstat) == SIGSEGV)
2111 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2112 (PTRACE_TYPE_ARG3) 0, &info) == 0
2113 /* Final check just to make sure we don't clobber
2114 the siginfo of non-kernel-sent signals. */
2115 && (uintptr_t) info.si_addr == lwp->stop_pc)
2116 {
2117 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2118 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2119 (PTRACE_TYPE_ARG3) 0, &info);
2120 }
2121
2122 regcache = get_thread_regcache (current_thread, 1);
2123 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2124 lwp->stop_pc = status.tpoint_addr;
2125
2126 /* Cancel any fast tracepoint lock this thread was
2127 holding. */
2128 force_unlock_trace_buffer ();
2129 }
2130
2131 if (lwp->exit_jump_pad_bkpt != NULL)
2132 {
2133 if (debug_threads)
2134 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2135 "stopping all threads momentarily.\n");
2136
2137 stop_all_lwps (1, lwp);
2138
2139 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2140 lwp->exit_jump_pad_bkpt = NULL;
2141
2142 unstop_all_lwps (1, lwp);
2143
2144 gdb_assert (lwp->suspended >= 0);
2145 }
2146 }
2147 }
2148
2149 if (debug_threads)
2150 debug_printf ("Checking whether LWP %ld needs to move out of the "
2151 "jump pad...no\n",
2152 lwpid_of (current_thread));
2153
2154 current_thread = saved_thread;
2155 return false;
2156 }
2157
2158 /* Enqueue one signal in the "signals to report later when out of the
2159 jump pad" list. */
2160
2161 static void
2162 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2163 {
2164 struct pending_signals *p_sig;
2165 struct thread_info *thread = get_lwp_thread (lwp);
2166
2167 if (debug_threads)
2168 debug_printf ("Deferring signal %d for LWP %ld.\n",
2169 WSTOPSIG (*wstat), lwpid_of (thread));
2170
2171 if (debug_threads)
2172 {
2173 struct pending_signals *sig;
2174
2175 for (sig = lwp->pending_signals_to_report;
2176 sig != NULL;
2177 sig = sig->prev)
2178 debug_printf (" Already queued %d\n",
2179 sig->signal);
2180
2181 debug_printf (" (no more currently queued signals)\n");
2182 }
2183
2184 /* Don't enqueue non-RT signals if they are already in the deferred
2185 queue. (SIGSTOP being the easiest signal to see ending up here
2186 twice) */
2187 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2188 {
2189 struct pending_signals *sig;
2190
2191 for (sig = lwp->pending_signals_to_report;
2192 sig != NULL;
2193 sig = sig->prev)
2194 {
2195 if (sig->signal == WSTOPSIG (*wstat))
2196 {
2197 if (debug_threads)
2198 debug_printf ("Not requeuing already queued non-RT signal %d"
2199 " for LWP %ld\n",
2200 sig->signal,
2201 lwpid_of (thread));
2202 return;
2203 }
2204 }
2205 }
2206
2207 p_sig = XCNEW (struct pending_signals);
2208 p_sig->prev = lwp->pending_signals_to_report;
2209 p_sig->signal = WSTOPSIG (*wstat);
2210
2211 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2212 &p_sig->info);
2213
2214 lwp->pending_signals_to_report = p_sig;
2215 }
2216
2217 /* Dequeue one signal from the "signals to report later when out of
2218 the jump pad" list. */
2219
2220 static int
2221 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2222 {
2223 struct thread_info *thread = get_lwp_thread (lwp);
2224
2225 if (lwp->pending_signals_to_report != NULL)
2226 {
2227 struct pending_signals **p_sig;
2228
2229 p_sig = &lwp->pending_signals_to_report;
2230 while ((*p_sig)->prev != NULL)
2231 p_sig = &(*p_sig)->prev;
2232
2233 *wstat = W_STOPCODE ((*p_sig)->signal);
2234 if ((*p_sig)->info.si_signo != 0)
2235 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2236 &(*p_sig)->info);
2237 free (*p_sig);
2238 *p_sig = NULL;
2239
2240 if (debug_threads)
2241 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2242 WSTOPSIG (*wstat), lwpid_of (thread));
2243
2244 if (debug_threads)
2245 {
2246 struct pending_signals *sig;
2247
2248 for (sig = lwp->pending_signals_to_report;
2249 sig != NULL;
2250 sig = sig->prev)
2251 debug_printf (" Still queued %d\n",
2252 sig->signal);
2253
2254 debug_printf (" (no more queued signals)\n");
2255 }
2256
2257 return 1;
2258 }
2259
2260 return 0;
2261 }
2262
2263 /* Fetch the possibly triggered data watchpoint info and store it in
2264 CHILD.
2265
2266 On some archs, like x86, that use debug registers to set
2267 watchpoints, it's possible that the way to know which watched
2268 address trapped, is to check the register that is used to select
2269 which address to watch. Problem is, between setting the watchpoint
2270 and reading back which data address trapped, the user may change
2271 the set of watchpoints, and, as a consequence, GDB changes the
2272 debug registers in the inferior. To avoid reading back a stale
2273 stopped-data-address when that happens, we cache in LP the fact
2274 that a watchpoint trapped, and the corresponding data address, as
2275 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2276 registers meanwhile, we have the cached data we can rely on. */
2277
2278 static int
2279 check_stopped_by_watchpoint (struct lwp_info *child)
2280 {
2281 if (the_low_target.stopped_by_watchpoint != NULL)
2282 {
2283 struct thread_info *saved_thread;
2284
2285 saved_thread = current_thread;
2286 current_thread = get_lwp_thread (child);
2287
2288 if (the_low_target.stopped_by_watchpoint ())
2289 {
2290 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2291
2292 if (the_low_target.stopped_data_address != NULL)
2293 child->stopped_data_address
2294 = the_low_target.stopped_data_address ();
2295 else
2296 child->stopped_data_address = 0;
2297 }
2298
2299 current_thread = saved_thread;
2300 }
2301
2302 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2303 }
2304
2305 /* Return the ptrace options that we want to try to enable. */
2306
2307 static int
2308 linux_low_ptrace_options (int attached)
2309 {
2310 client_state &cs = get_client_state ();
2311 int options = 0;
2312
2313 if (!attached)
2314 options |= PTRACE_O_EXITKILL;
2315
2316 if (cs.report_fork_events)
2317 options |= PTRACE_O_TRACEFORK;
2318
2319 if (cs.report_vfork_events)
2320 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2321
2322 if (cs.report_exec_events)
2323 options |= PTRACE_O_TRACEEXEC;
2324
2325 options |= PTRACE_O_TRACESYSGOOD;
2326
2327 return options;
2328 }
2329
2330 lwp_info *
2331 linux_process_target::filter_event (int lwpid, int wstat)
2332 {
2333 client_state &cs = get_client_state ();
2334 struct lwp_info *child;
2335 struct thread_info *thread;
2336 int have_stop_pc = 0;
2337
2338 child = find_lwp_pid (ptid_t (lwpid));
2339
2340 /* Check for stop events reported by a process we didn't already
2341 know about - anything not already in our LWP list.
2342
2343 If we're expecting to receive stopped processes after
2344 fork, vfork, and clone events, then we'll just add the
2345 new one to our list and go back to waiting for the event
2346 to be reported - the stopped process might be returned
2347 from waitpid before or after the event is.
2348
2349 But note the case of a non-leader thread exec'ing after the
2350 leader having exited, and gone from our lists (because
2351 check_zombie_leaders deleted it). The non-leader thread
2352 changes its tid to the tgid. */
2353
2354 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2355 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2356 {
2357 ptid_t child_ptid;
2358
2359 /* A multi-thread exec after we had seen the leader exiting. */
2360 if (debug_threads)
2361 {
2362 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2363 "after exec.\n", lwpid);
2364 }
2365
2366 child_ptid = ptid_t (lwpid, lwpid, 0);
2367 child = add_lwp (child_ptid);
2368 child->stopped = 1;
2369 current_thread = child->thread;
2370 }
2371
2372 /* If we didn't find a process, one of two things presumably happened:
2373 - A process we started and then detached from has exited. Ignore it.
2374 - A process we are controlling has forked and the new child's stop
2375 was reported to us by the kernel. Save its PID. */
2376 if (child == NULL && WIFSTOPPED (wstat))
2377 {
2378 add_to_pid_list (&stopped_pids, lwpid, wstat);
2379 return NULL;
2380 }
2381 else if (child == NULL)
2382 return NULL;
2383
2384 thread = get_lwp_thread (child);
2385
2386 child->stopped = 1;
2387
2388 child->last_status = wstat;
2389
2390 /* Check if the thread has exited. */
2391 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2392 {
2393 if (debug_threads)
2394 debug_printf ("LLFE: %d exited.\n", lwpid);
2395
2396 if (finish_step_over (child))
2397 {
2398 /* Unsuspend all other LWPs, and set them back running again. */
2399 unsuspend_all_lwps (child);
2400 }
2401
2402 /* If there is at least one more LWP, then the exit signal was
2403 not the end of the debugged application and should be
2404 ignored, unless GDB wants to hear about thread exits. */
2405 if (cs.report_thread_events
2406 || last_thread_of_process_p (pid_of (thread)))
2407 {
2408 /* Since events are serialized to GDB core, and we can't
2409 report this one right now. Leave the status pending for
2410 the next time we're able to report it. */
2411 mark_lwp_dead (child, wstat);
2412 return child;
2413 }
2414 else
2415 {
2416 delete_lwp (child);
2417 return NULL;
2418 }
2419 }
2420
2421 gdb_assert (WIFSTOPPED (wstat));
2422
2423 if (WIFSTOPPED (wstat))
2424 {
2425 struct process_info *proc;
2426
2427 /* Architecture-specific setup after inferior is running. */
2428 proc = find_process_pid (pid_of (thread));
2429 if (proc->tdesc == NULL)
2430 {
2431 if (proc->attached)
2432 {
2433 /* This needs to happen after we have attached to the
2434 inferior and it is stopped for the first time, but
2435 before we access any inferior registers. */
2436 linux_arch_setup_thread (thread);
2437 }
2438 else
2439 {
2440 /* The process is started, but GDBserver will do
2441 architecture-specific setup after the program stops at
2442 the first instruction. */
2443 child->status_pending_p = 1;
2444 child->status_pending = wstat;
2445 return child;
2446 }
2447 }
2448 }
2449
2450 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2451 {
2452 struct process_info *proc = find_process_pid (pid_of (thread));
2453 int options = linux_low_ptrace_options (proc->attached);
2454
2455 linux_enable_event_reporting (lwpid, options);
2456 child->must_set_ptrace_flags = 0;
2457 }
2458
2459 /* Always update syscall_state, even if it will be filtered later. */
2460 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2461 {
2462 child->syscall_state
2463 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2464 ? TARGET_WAITKIND_SYSCALL_RETURN
2465 : TARGET_WAITKIND_SYSCALL_ENTRY);
2466 }
2467 else
2468 {
2469 /* Almost all other ptrace-stops are known to be outside of system
2470 calls, with further exceptions in handle_extended_wait. */
2471 child->syscall_state = TARGET_WAITKIND_IGNORE;
2472 }
2473
2474 /* Be careful to not overwrite stop_pc until save_stop_reason is
2475 called. */
2476 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2477 && linux_is_extended_waitstatus (wstat))
2478 {
2479 child->stop_pc = get_pc (child);
2480 if (handle_extended_wait (&child, wstat))
2481 {
2482 /* The event has been handled, so just return without
2483 reporting it. */
2484 return NULL;
2485 }
2486 }
2487
2488 if (linux_wstatus_maybe_breakpoint (wstat))
2489 {
2490 if (save_stop_reason (child))
2491 have_stop_pc = 1;
2492 }
2493
2494 if (!have_stop_pc)
2495 child->stop_pc = get_pc (child);
2496
2497 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2498 && child->stop_expected)
2499 {
2500 if (debug_threads)
2501 debug_printf ("Expected stop.\n");
2502 child->stop_expected = 0;
2503
2504 if (thread->last_resume_kind == resume_stop)
2505 {
2506 /* We want to report the stop to the core. Treat the
2507 SIGSTOP as a normal event. */
2508 if (debug_threads)
2509 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2510 target_pid_to_str (ptid_of (thread)));
2511 }
2512 else if (stopping_threads != NOT_STOPPING_THREADS)
2513 {
2514 /* Stopping threads. We don't want this SIGSTOP to end up
2515 pending. */
2516 if (debug_threads)
2517 debug_printf ("LLW: SIGSTOP caught for %s "
2518 "while stopping threads.\n",
2519 target_pid_to_str (ptid_of (thread)));
2520 return NULL;
2521 }
2522 else
2523 {
2524 /* This is a delayed SIGSTOP. Filter out the event. */
2525 if (debug_threads)
2526 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2527 child->stepping ? "step" : "continue",
2528 target_pid_to_str (ptid_of (thread)));
2529
2530 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2531 return NULL;
2532 }
2533 }
2534
2535 child->status_pending_p = 1;
2536 child->status_pending = wstat;
2537 return child;
2538 }
2539
2540 /* Return true if THREAD is doing hardware single step. */
2541
2542 static int
2543 maybe_hw_step (struct thread_info *thread)
2544 {
2545 if (can_hardware_single_step ())
2546 return 1;
2547 else
2548 {
2549 /* GDBserver must insert single-step breakpoint for software
2550 single step. */
2551 gdb_assert (has_single_step_breakpoints (thread));
2552 return 0;
2553 }
2554 }
2555
2556 /* Resume LWPs that are currently stopped without any pending status
2557 to report, but are resumed from the core's perspective. */
2558
2559 static void
2560 resume_stopped_resumed_lwps (thread_info *thread)
2561 {
2562 struct lwp_info *lp = get_thread_lwp (thread);
2563
2564 if (lp->stopped
2565 && !lp->suspended
2566 && !lp->status_pending_p
2567 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2568 {
2569 int step = 0;
2570
2571 if (thread->last_resume_kind == resume_step)
2572 step = maybe_hw_step (thread);
2573
2574 if (debug_threads)
2575 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2576 target_pid_to_str (ptid_of (thread)),
2577 paddress (lp->stop_pc),
2578 step);
2579
2580 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2581 }
2582 }
2583
2584 int
2585 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2586 ptid_t filter_ptid,
2587 int *wstatp, int options)
2588 {
2589 struct thread_info *event_thread;
2590 struct lwp_info *event_child, *requested_child;
2591 sigset_t block_mask, prev_mask;
2592
2593 retry:
2594 /* N.B. event_thread points to the thread_info struct that contains
2595 event_child. Keep them in sync. */
2596 event_thread = NULL;
2597 event_child = NULL;
2598 requested_child = NULL;
2599
2600 /* Check for a lwp with a pending status. */
2601
2602 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2603 {
2604 event_thread = find_thread_in_random ([&] (thread_info *thread)
2605 {
2606 return status_pending_p_callback (thread, filter_ptid);
2607 });
2608
2609 if (event_thread != NULL)
2610 event_child = get_thread_lwp (event_thread);
2611 if (debug_threads && event_thread)
2612 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2613 }
2614 else if (filter_ptid != null_ptid)
2615 {
2616 requested_child = find_lwp_pid (filter_ptid);
2617
2618 if (stopping_threads == NOT_STOPPING_THREADS
2619 && requested_child->status_pending_p
2620 && (requested_child->collecting_fast_tracepoint
2621 != fast_tpoint_collect_result::not_collecting))
2622 {
2623 enqueue_one_deferred_signal (requested_child,
2624 &requested_child->status_pending);
2625 requested_child->status_pending_p = 0;
2626 requested_child->status_pending = 0;
2627 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2628 }
2629
2630 if (requested_child->suspended
2631 && requested_child->status_pending_p)
2632 {
2633 internal_error (__FILE__, __LINE__,
2634 "requesting an event out of a"
2635 " suspended child?");
2636 }
2637
2638 if (requested_child->status_pending_p)
2639 {
2640 event_child = requested_child;
2641 event_thread = get_lwp_thread (event_child);
2642 }
2643 }
2644
2645 if (event_child != NULL)
2646 {
2647 if (debug_threads)
2648 debug_printf ("Got an event from pending child %ld (%04x)\n",
2649 lwpid_of (event_thread), event_child->status_pending);
2650 *wstatp = event_child->status_pending;
2651 event_child->status_pending_p = 0;
2652 event_child->status_pending = 0;
2653 current_thread = event_thread;
2654 return lwpid_of (event_thread);
2655 }
2656
2657 /* But if we don't find a pending event, we'll have to wait.
2658
2659 We only enter this loop if no process has a pending wait status.
2660 Thus any action taken in response to a wait status inside this
2661 loop is responding as soon as we detect the status, not after any
2662 pending events. */
2663
2664 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2665 all signals while here. */
2666 sigfillset (&block_mask);
2667 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2668
2669 /* Always pull all events out of the kernel. We'll randomly select
2670 an event LWP out of all that have events, to prevent
2671 starvation. */
2672 while (event_child == NULL)
2673 {
2674 pid_t ret = 0;
2675
2676 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2677 quirks:
2678
2679 - If the thread group leader exits while other threads in the
2680 thread group still exist, waitpid(TGID, ...) hangs. That
2681 waitpid won't return an exit status until the other threads
2682 in the group are reaped.
2683
2684 - When a non-leader thread execs, that thread just vanishes
2685 without reporting an exit (so we'd hang if we waited for it
2686 explicitly in that case). The exec event is reported to
2687 the TGID pid. */
2688 errno = 0;
2689 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2690
2691 if (debug_threads)
2692 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2693 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2694
2695 if (ret > 0)
2696 {
2697 if (debug_threads)
2698 {
2699 debug_printf ("LLW: waitpid %ld received %s\n",
2700 (long) ret, status_to_str (*wstatp));
2701 }
2702
2703 /* Filter all events. IOW, leave all events pending. We'll
2704 randomly select an event LWP out of all that have events
2705 below. */
2706 filter_event (ret, *wstatp);
2707 /* Retry until nothing comes out of waitpid. A single
2708 SIGCHLD can indicate more than one child stopped. */
2709 continue;
2710 }
2711
2712 /* Now that we've pulled all events out of the kernel, resume
2713 LWPs that don't have an interesting event to report. */
2714 if (stopping_threads == NOT_STOPPING_THREADS)
2715 for_each_thread (resume_stopped_resumed_lwps);
2716
2717 /* ... and find an LWP with a status to report to the core, if
2718 any. */
2719 event_thread = find_thread_in_random ([&] (thread_info *thread)
2720 {
2721 return status_pending_p_callback (thread, filter_ptid);
2722 });
2723
2724 if (event_thread != NULL)
2725 {
2726 event_child = get_thread_lwp (event_thread);
2727 *wstatp = event_child->status_pending;
2728 event_child->status_pending_p = 0;
2729 event_child->status_pending = 0;
2730 break;
2731 }
2732
2733 /* Check for zombie thread group leaders. Those can't be reaped
2734 until all other threads in the thread group are. */
2735 check_zombie_leaders ();
2736
2737 auto not_stopped = [&] (thread_info *thread)
2738 {
2739 return not_stopped_callback (thread, wait_ptid);
2740 };
2741
2742 /* If there are no resumed children left in the set of LWPs we
2743 want to wait for, bail. We can't just block in
2744 waitpid/sigsuspend, because lwps might have been left stopped
2745 in trace-stop state, and we'd be stuck forever waiting for
2746 their status to change (which would only happen if we resumed
2747 them). Even if WNOHANG is set, this return code is preferred
2748 over 0 (below), as it is more detailed. */
2749 if (find_thread (not_stopped) == NULL)
2750 {
2751 if (debug_threads)
2752 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2753 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2754 return -1;
2755 }
2756
2757 /* No interesting event to report to the caller. */
2758 if ((options & WNOHANG))
2759 {
2760 if (debug_threads)
2761 debug_printf ("WNOHANG set, no event found\n");
2762
2763 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2764 return 0;
2765 }
2766
2767 /* Block until we get an event reported with SIGCHLD. */
2768 if (debug_threads)
2769 debug_printf ("sigsuspend'ing\n");
2770
2771 sigsuspend (&prev_mask);
2772 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2773 goto retry;
2774 }
2775
2776 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2777
2778 current_thread = event_thread;
2779
2780 return lwpid_of (event_thread);
2781 }
2782
2783 int
2784 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2785 {
2786 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2787 }
2788
2789 /* Select one LWP out of those that have events pending. */
2790
2791 static void
2792 select_event_lwp (struct lwp_info **orig_lp)
2793 {
2794 struct thread_info *event_thread = NULL;
2795
2796 /* In all-stop, give preference to the LWP that is being
2797 single-stepped. There will be at most one, and it's the LWP that
2798 the core is most interested in. If we didn't do this, then we'd
2799 have to handle pending step SIGTRAPs somehow in case the core
2800 later continues the previously-stepped thread, otherwise we'd
2801 report the pending SIGTRAP, and the core, not having stepped the
2802 thread, wouldn't understand what the trap was for, and therefore
2803 would report it to the user as a random signal. */
2804 if (!non_stop)
2805 {
2806 event_thread = find_thread ([] (thread_info *thread)
2807 {
2808 lwp_info *lp = get_thread_lwp (thread);
2809
2810 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2811 && thread->last_resume_kind == resume_step
2812 && lp->status_pending_p);
2813 });
2814
2815 if (event_thread != NULL)
2816 {
2817 if (debug_threads)
2818 debug_printf ("SEL: Select single-step %s\n",
2819 target_pid_to_str (ptid_of (event_thread)));
2820 }
2821 }
2822 if (event_thread == NULL)
2823 {
2824 /* No single-stepping LWP. Select one at random, out of those
2825 which have had events. */
2826
2827 event_thread = find_thread_in_random ([&] (thread_info *thread)
2828 {
2829 lwp_info *lp = get_thread_lwp (thread);
2830
2831 /* Only resumed LWPs that have an event pending. */
2832 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2833 && lp->status_pending_p);
2834 });
2835 }
2836
2837 if (event_thread != NULL)
2838 {
2839 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2840
2841 /* Switch the event LWP. */
2842 *orig_lp = event_lp;
2843 }
2844 }
2845
2846 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2847 NULL. */
2848
2849 static void
2850 unsuspend_all_lwps (struct lwp_info *except)
2851 {
2852 for_each_thread ([&] (thread_info *thread)
2853 {
2854 lwp_info *lwp = get_thread_lwp (thread);
2855
2856 if (lwp != except)
2857 lwp_suspended_decr (lwp);
2858 });
2859 }
2860
2861 static bool stuck_in_jump_pad_callback (thread_info *thread);
2862 static bool lwp_running (thread_info *thread);
2863
2864 /* Stabilize threads (move out of jump pads).
2865
2866 If a thread is midway collecting a fast tracepoint, we need to
2867 finish the collection and move it out of the jump pad before
2868 reporting the signal.
2869
2870 This avoids recursion while collecting (when a signal arrives
2871 midway, and the signal handler itself collects), which would trash
2872 the trace buffer. In case the user set a breakpoint in a signal
2873 handler, this avoids the backtrace showing the jump pad, etc..
2874 Most importantly, there are certain things we can't do safely if
2875 threads are stopped in a jump pad (or in its callee's). For
2876 example:
2877
2878 - starting a new trace run. A thread still collecting the
2879 previous run, could trash the trace buffer when resumed. The trace
2880 buffer control structures would have been reset but the thread had
2881 no way to tell. The thread could even midway memcpy'ing to the
2882 buffer, which would mean that when resumed, it would clobber the
2883 trace buffer that had been set for a new run.
2884
2885 - we can't rewrite/reuse the jump pads for new tracepoints
2886 safely. Say you do tstart while a thread is stopped midway while
2887 collecting. When the thread is later resumed, it finishes the
2888 collection, and returns to the jump pad, to execute the original
2889 instruction that was under the tracepoint jump at the time the
2890 older run had been started. If the jump pad had been rewritten
2891 since for something else in the new run, the thread would now
2892 execute the wrong / random instructions. */
2893
2894 void
2895 linux_process_target::stabilize_threads ()
2896 {
2897 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2898
2899 if (thread_stuck != NULL)
2900 {
2901 if (debug_threads)
2902 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2903 lwpid_of (thread_stuck));
2904 return;
2905 }
2906
2907 thread_info *saved_thread = current_thread;
2908
2909 stabilizing_threads = 1;
2910
2911 /* Kick 'em all. */
2912 for_each_thread ([this] (thread_info *thread)
2913 {
2914 move_out_of_jump_pad (thread);
2915 });
2916
2917 /* Loop until all are stopped out of the jump pads. */
2918 while (find_thread (lwp_running) != NULL)
2919 {
2920 struct target_waitstatus ourstatus;
2921 struct lwp_info *lwp;
2922 int wstat;
2923
2924 /* Note that we go through the full wait even loop. While
2925 moving threads out of jump pad, we need to be able to step
2926 over internal breakpoints and such. */
2927 wait_1 (minus_one_ptid, &ourstatus, 0);
2928
2929 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2930 {
2931 lwp = get_thread_lwp (current_thread);
2932
2933 /* Lock it. */
2934 lwp_suspended_inc (lwp);
2935
2936 if (ourstatus.value.sig != GDB_SIGNAL_0
2937 || current_thread->last_resume_kind == resume_stop)
2938 {
2939 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2940 enqueue_one_deferred_signal (lwp, &wstat);
2941 }
2942 }
2943 }
2944
2945 unsuspend_all_lwps (NULL);
2946
2947 stabilizing_threads = 0;
2948
2949 current_thread = saved_thread;
2950
2951 if (debug_threads)
2952 {
2953 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2954
2955 if (thread_stuck != NULL)
2956 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2957 lwpid_of (thread_stuck));
2958 }
2959 }
2960
2961 /* Convenience function that is called when the kernel reports an
2962 event that is not passed out to GDB. */
2963
2964 static ptid_t
2965 ignore_event (struct target_waitstatus *ourstatus)
2966 {
2967 /* If we got an event, there may still be others, as a single
2968 SIGCHLD can indicate more than one child stopped. This forces
2969 another target_wait call. */
2970 async_file_mark ();
2971
2972 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2973 return null_ptid;
2974 }
2975
2976 /* Convenience function that is called when the kernel reports an exit
2977 event. This decides whether to report the event to GDB as a
2978 process exit event, a thread exit event, or to suppress the
2979 event. */
2980
2981 static ptid_t
2982 filter_exit_event (struct lwp_info *event_child,
2983 struct target_waitstatus *ourstatus)
2984 {
2985 client_state &cs = get_client_state ();
2986 struct thread_info *thread = get_lwp_thread (event_child);
2987 ptid_t ptid = ptid_of (thread);
2988
2989 if (!last_thread_of_process_p (pid_of (thread)))
2990 {
2991 if (cs.report_thread_events)
2992 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2993 else
2994 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2995
2996 delete_lwp (event_child);
2997 }
2998 return ptid;
2999 }
3000
3001 /* Returns 1 if GDB is interested in any event_child syscalls. */
3002
3003 static int
3004 gdb_catching_syscalls_p (struct lwp_info *event_child)
3005 {
3006 struct thread_info *thread = get_lwp_thread (event_child);
3007 struct process_info *proc = get_thread_process (thread);
3008
3009 return !proc->syscalls_to_catch.empty ();
3010 }
3011
3012 /* Returns 1 if GDB is interested in the event_child syscall.
3013 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3014
3015 static int
3016 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3017 {
3018 int sysno;
3019 struct thread_info *thread = get_lwp_thread (event_child);
3020 struct process_info *proc = get_thread_process (thread);
3021
3022 if (proc->syscalls_to_catch.empty ())
3023 return 0;
3024
3025 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3026 return 1;
3027
3028 get_syscall_trapinfo (event_child, &sysno);
3029
3030 for (int iter : proc->syscalls_to_catch)
3031 if (iter == sysno)
3032 return 1;
3033
3034 return 0;
3035 }
3036
3037 ptid_t
3038 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3039 int target_options)
3040 {
3041 client_state &cs = get_client_state ();
3042 int w;
3043 struct lwp_info *event_child;
3044 int options;
3045 int pid;
3046 int step_over_finished;
3047 int bp_explains_trap;
3048 int maybe_internal_trap;
3049 int report_to_gdb;
3050 int trace_event;
3051 int in_step_range;
3052 int any_resumed;
3053
3054 if (debug_threads)
3055 {
3056 debug_enter ();
3057 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3058 }
3059
3060 /* Translate generic target options into linux options. */
3061 options = __WALL;
3062 if (target_options & TARGET_WNOHANG)
3063 options |= WNOHANG;
3064
3065 bp_explains_trap = 0;
3066 trace_event = 0;
3067 in_step_range = 0;
3068 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3069
3070 auto status_pending_p_any = [&] (thread_info *thread)
3071 {
3072 return status_pending_p_callback (thread, minus_one_ptid);
3073 };
3074
3075 auto not_stopped = [&] (thread_info *thread)
3076 {
3077 return not_stopped_callback (thread, minus_one_ptid);
3078 };
3079
3080 /* Find a resumed LWP, if any. */
3081 if (find_thread (status_pending_p_any) != NULL)
3082 any_resumed = 1;
3083 else if (find_thread (not_stopped) != NULL)
3084 any_resumed = 1;
3085 else
3086 any_resumed = 0;
3087
3088 if (step_over_bkpt == null_ptid)
3089 pid = wait_for_event (ptid, &w, options);
3090 else
3091 {
3092 if (debug_threads)
3093 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3094 target_pid_to_str (step_over_bkpt));
3095 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3096 }
3097
3098 if (pid == 0 || (pid == -1 && !any_resumed))
3099 {
3100 gdb_assert (target_options & TARGET_WNOHANG);
3101
3102 if (debug_threads)
3103 {
3104 debug_printf ("wait_1 ret = null_ptid, "
3105 "TARGET_WAITKIND_IGNORE\n");
3106 debug_exit ();
3107 }
3108
3109 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3110 return null_ptid;
3111 }
3112 else if (pid == -1)
3113 {
3114 if (debug_threads)
3115 {
3116 debug_printf ("wait_1 ret = null_ptid, "
3117 "TARGET_WAITKIND_NO_RESUMED\n");
3118 debug_exit ();
3119 }
3120
3121 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3122 return null_ptid;
3123 }
3124
3125 event_child = get_thread_lwp (current_thread);
3126
3127 /* wait_for_event only returns an exit status for the last
3128 child of a process. Report it. */
3129 if (WIFEXITED (w) || WIFSIGNALED (w))
3130 {
3131 if (WIFEXITED (w))
3132 {
3133 ourstatus->kind = TARGET_WAITKIND_EXITED;
3134 ourstatus->value.integer = WEXITSTATUS (w);
3135
3136 if (debug_threads)
3137 {
3138 debug_printf ("wait_1 ret = %s, exited with "
3139 "retcode %d\n",
3140 target_pid_to_str (ptid_of (current_thread)),
3141 WEXITSTATUS (w));
3142 debug_exit ();
3143 }
3144 }
3145 else
3146 {
3147 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3148 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3149
3150 if (debug_threads)
3151 {
3152 debug_printf ("wait_1 ret = %s, terminated with "
3153 "signal %d\n",
3154 target_pid_to_str (ptid_of (current_thread)),
3155 WTERMSIG (w));
3156 debug_exit ();
3157 }
3158 }
3159
3160 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3161 return filter_exit_event (event_child, ourstatus);
3162
3163 return ptid_of (current_thread);
3164 }
3165
3166 /* If step-over executes a breakpoint instruction, in the case of a
3167 hardware single step it means a gdb/gdbserver breakpoint had been
3168 planted on top of a permanent breakpoint, in the case of a software
3169 single step it may just mean that gdbserver hit the reinsert breakpoint.
3170 The PC has been adjusted by save_stop_reason to point at
3171 the breakpoint address.
3172 So in the case of the hardware single step advance the PC manually
3173 past the breakpoint and in the case of software single step advance only
3174 if it's not the single_step_breakpoint we are hitting.
3175 This avoids that a program would keep trapping a permanent breakpoint
3176 forever. */
3177 if (step_over_bkpt != null_ptid
3178 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3179 && (event_child->stepping
3180 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3181 {
3182 int increment_pc = 0;
3183 int breakpoint_kind = 0;
3184 CORE_ADDR stop_pc = event_child->stop_pc;
3185
3186 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3187 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3188
3189 if (debug_threads)
3190 {
3191 debug_printf ("step-over for %s executed software breakpoint\n",
3192 target_pid_to_str (ptid_of (current_thread)));
3193 }
3194
3195 if (increment_pc != 0)
3196 {
3197 struct regcache *regcache
3198 = get_thread_regcache (current_thread, 1);
3199
3200 event_child->stop_pc += increment_pc;
3201 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3202
3203 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3204 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3205 }
3206 }
3207
3208 /* If this event was not handled before, and is not a SIGTRAP, we
3209 report it. SIGILL and SIGSEGV are also treated as traps in case
3210 a breakpoint is inserted at the current PC. If this target does
3211 not support internal breakpoints at all, we also report the
3212 SIGTRAP without further processing; it's of no concern to us. */
3213 maybe_internal_trap
3214 = (supports_breakpoints ()
3215 && (WSTOPSIG (w) == SIGTRAP
3216 || ((WSTOPSIG (w) == SIGILL
3217 || WSTOPSIG (w) == SIGSEGV)
3218 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3219
3220 if (maybe_internal_trap)
3221 {
3222 /* Handle anything that requires bookkeeping before deciding to
3223 report the event or continue waiting. */
3224
3225 /* First check if we can explain the SIGTRAP with an internal
3226 breakpoint, or if we should possibly report the event to GDB.
3227 Do this before anything that may remove or insert a
3228 breakpoint. */
3229 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3230
3231 /* We have a SIGTRAP, possibly a step-over dance has just
3232 finished. If so, tweak the state machine accordingly,
3233 reinsert breakpoints and delete any single-step
3234 breakpoints. */
3235 step_over_finished = finish_step_over (event_child);
3236
3237 /* Now invoke the callbacks of any internal breakpoints there. */
3238 check_breakpoints (event_child->stop_pc);
3239
3240 /* Handle tracepoint data collecting. This may overflow the
3241 trace buffer, and cause a tracing stop, removing
3242 breakpoints. */
3243 trace_event = handle_tracepoints (event_child);
3244
3245 if (bp_explains_trap)
3246 {
3247 if (debug_threads)
3248 debug_printf ("Hit a gdbserver breakpoint.\n");
3249 }
3250 }
3251 else
3252 {
3253 /* We have some other signal, possibly a step-over dance was in
3254 progress, and it should be cancelled too. */
3255 step_over_finished = finish_step_over (event_child);
3256 }
3257
3258 /* We have all the data we need. Either report the event to GDB, or
3259 resume threads and keep waiting for more. */
3260
3261 /* If we're collecting a fast tracepoint, finish the collection and
3262 move out of the jump pad before delivering a signal. See
3263 linux_stabilize_threads. */
3264
3265 if (WIFSTOPPED (w)
3266 && WSTOPSIG (w) != SIGTRAP
3267 && supports_fast_tracepoints ()
3268 && agent_loaded_p ())
3269 {
3270 if (debug_threads)
3271 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3272 "to defer or adjust it.\n",
3273 WSTOPSIG (w), lwpid_of (current_thread));
3274
3275 /* Allow debugging the jump pad itself. */
3276 if (current_thread->last_resume_kind != resume_step
3277 && maybe_move_out_of_jump_pad (event_child, &w))
3278 {
3279 enqueue_one_deferred_signal (event_child, &w);
3280
3281 if (debug_threads)
3282 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3283 WSTOPSIG (w), lwpid_of (current_thread));
3284
3285 linux_resume_one_lwp (event_child, 0, 0, NULL);
3286
3287 if (debug_threads)
3288 debug_exit ();
3289 return ignore_event (ourstatus);
3290 }
3291 }
3292
3293 if (event_child->collecting_fast_tracepoint
3294 != fast_tpoint_collect_result::not_collecting)
3295 {
3296 if (debug_threads)
3297 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3298 "Check if we're already there.\n",
3299 lwpid_of (current_thread),
3300 (int) event_child->collecting_fast_tracepoint);
3301
3302 trace_event = 1;
3303
3304 event_child->collecting_fast_tracepoint
3305 = linux_fast_tracepoint_collecting (event_child, NULL);
3306
3307 if (event_child->collecting_fast_tracepoint
3308 != fast_tpoint_collect_result::before_insn)
3309 {
3310 /* No longer need this breakpoint. */
3311 if (event_child->exit_jump_pad_bkpt != NULL)
3312 {
3313 if (debug_threads)
3314 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3315 "stopping all threads momentarily.\n");
3316
3317 /* Other running threads could hit this breakpoint.
3318 We don't handle moribund locations like GDB does,
3319 instead we always pause all threads when removing
3320 breakpoints, so that any step-over or
3321 decr_pc_after_break adjustment is always taken
3322 care of while the breakpoint is still
3323 inserted. */
3324 stop_all_lwps (1, event_child);
3325
3326 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3327 event_child->exit_jump_pad_bkpt = NULL;
3328
3329 unstop_all_lwps (1, event_child);
3330
3331 gdb_assert (event_child->suspended >= 0);
3332 }
3333 }
3334
3335 if (event_child->collecting_fast_tracepoint
3336 == fast_tpoint_collect_result::not_collecting)
3337 {
3338 if (debug_threads)
3339 debug_printf ("fast tracepoint finished "
3340 "collecting successfully.\n");
3341
3342 /* We may have a deferred signal to report. */
3343 if (dequeue_one_deferred_signal (event_child, &w))
3344 {
3345 if (debug_threads)
3346 debug_printf ("dequeued one signal.\n");
3347 }
3348 else
3349 {
3350 if (debug_threads)
3351 debug_printf ("no deferred signals.\n");
3352
3353 if (stabilizing_threads)
3354 {
3355 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3356 ourstatus->value.sig = GDB_SIGNAL_0;
3357
3358 if (debug_threads)
3359 {
3360 debug_printf ("wait_1 ret = %s, stopped "
3361 "while stabilizing threads\n",
3362 target_pid_to_str (ptid_of (current_thread)));
3363 debug_exit ();
3364 }
3365
3366 return ptid_of (current_thread);
3367 }
3368 }
3369 }
3370 }
3371
3372 /* Check whether GDB would be interested in this event. */
3373
3374 /* Check if GDB is interested in this syscall. */
3375 if (WIFSTOPPED (w)
3376 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3377 && !gdb_catch_this_syscall_p (event_child))
3378 {
3379 if (debug_threads)
3380 {
3381 debug_printf ("Ignored syscall for LWP %ld.\n",
3382 lwpid_of (current_thread));
3383 }
3384
3385 linux_resume_one_lwp (event_child, event_child->stepping,
3386 0, NULL);
3387
3388 if (debug_threads)
3389 debug_exit ();
3390 return ignore_event (ourstatus);
3391 }
3392
3393 /* If GDB is not interested in this signal, don't stop other
3394 threads, and don't report it to GDB. Just resume the inferior
3395 right away. We do this for threading-related signals as well as
3396 any that GDB specifically requested we ignore. But never ignore
3397 SIGSTOP if we sent it ourselves, and do not ignore signals when
3398 stepping - they may require special handling to skip the signal
3399 handler. Also never ignore signals that could be caused by a
3400 breakpoint. */
3401 if (WIFSTOPPED (w)
3402 && current_thread->last_resume_kind != resume_step
3403 && (
3404 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3405 (current_process ()->priv->thread_db != NULL
3406 && (WSTOPSIG (w) == __SIGRTMIN
3407 || WSTOPSIG (w) == __SIGRTMIN + 1))
3408 ||
3409 #endif
3410 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3411 && !(WSTOPSIG (w) == SIGSTOP
3412 && current_thread->last_resume_kind == resume_stop)
3413 && !linux_wstatus_maybe_breakpoint (w))))
3414 {
3415 siginfo_t info, *info_p;
3416
3417 if (debug_threads)
3418 debug_printf ("Ignored signal %d for LWP %ld.\n",
3419 WSTOPSIG (w), lwpid_of (current_thread));
3420
3421 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3422 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3423 info_p = &info;
3424 else
3425 info_p = NULL;
3426
3427 if (step_over_finished)
3428 {
3429 /* We cancelled this thread's step-over above. We still
3430 need to unsuspend all other LWPs, and set them back
3431 running again while the signal handler runs. */
3432 unsuspend_all_lwps (event_child);
3433
3434 /* Enqueue the pending signal info so that proceed_all_lwps
3435 doesn't lose it. */
3436 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3437
3438 proceed_all_lwps ();
3439 }
3440 else
3441 {
3442 linux_resume_one_lwp (event_child, event_child->stepping,
3443 WSTOPSIG (w), info_p);
3444 }
3445
3446 if (debug_threads)
3447 debug_exit ();
3448
3449 return ignore_event (ourstatus);
3450 }
3451
3452 /* Note that all addresses are always "out of the step range" when
3453 there's no range to begin with. */
3454 in_step_range = lwp_in_step_range (event_child);
3455
3456 /* If GDB wanted this thread to single step, and the thread is out
3457 of the step range, we always want to report the SIGTRAP, and let
3458 GDB handle it. Watchpoints should always be reported. So should
3459 signals we can't explain. A SIGTRAP we can't explain could be a
3460 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3461 do, we're be able to handle GDB breakpoints on top of internal
3462 breakpoints, by handling the internal breakpoint and still
3463 reporting the event to GDB. If we don't, we're out of luck, GDB
3464 won't see the breakpoint hit. If we see a single-step event but
3465 the thread should be continuing, don't pass the trap to gdb.
3466 That indicates that we had previously finished a single-step but
3467 left the single-step pending -- see
3468 complete_ongoing_step_over. */
3469 report_to_gdb = (!maybe_internal_trap
3470 || (current_thread->last_resume_kind == resume_step
3471 && !in_step_range)
3472 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3473 || (!in_step_range
3474 && !bp_explains_trap
3475 && !trace_event
3476 && !step_over_finished
3477 && !(current_thread->last_resume_kind == resume_continue
3478 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3479 || (gdb_breakpoint_here (event_child->stop_pc)
3480 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3481 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3482 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3483
3484 run_breakpoint_commands (event_child->stop_pc);
3485
3486 /* We found no reason GDB would want us to stop. We either hit one
3487 of our own breakpoints, or finished an internal step GDB
3488 shouldn't know about. */
3489 if (!report_to_gdb)
3490 {
3491 if (debug_threads)
3492 {
3493 if (bp_explains_trap)
3494 debug_printf ("Hit a gdbserver breakpoint.\n");
3495 if (step_over_finished)
3496 debug_printf ("Step-over finished.\n");
3497 if (trace_event)
3498 debug_printf ("Tracepoint event.\n");
3499 if (lwp_in_step_range (event_child))
3500 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3501 paddress (event_child->stop_pc),
3502 paddress (event_child->step_range_start),
3503 paddress (event_child->step_range_end));
3504 }
3505
3506 /* We're not reporting this breakpoint to GDB, so apply the
3507 decr_pc_after_break adjustment to the inferior's regcache
3508 ourselves. */
3509
3510 if (the_low_target.set_pc != NULL)
3511 {
3512 struct regcache *regcache
3513 = get_thread_regcache (current_thread, 1);
3514 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3515 }
3516
3517 if (step_over_finished)
3518 {
3519 /* If we have finished stepping over a breakpoint, we've
3520 stopped and suspended all LWPs momentarily except the
3521 stepping one. This is where we resume them all again.
3522 We're going to keep waiting, so use proceed, which
3523 handles stepping over the next breakpoint. */
3524 unsuspend_all_lwps (event_child);
3525 }
3526 else
3527 {
3528 /* Remove the single-step breakpoints if any. Note that
3529 there isn't single-step breakpoint if we finished stepping
3530 over. */
3531 if (can_software_single_step ()
3532 && has_single_step_breakpoints (current_thread))
3533 {
3534 stop_all_lwps (0, event_child);
3535 delete_single_step_breakpoints (current_thread);
3536 unstop_all_lwps (0, event_child);
3537 }
3538 }
3539
3540 if (debug_threads)
3541 debug_printf ("proceeding all threads.\n");
3542 proceed_all_lwps ();
3543
3544 if (debug_threads)
3545 debug_exit ();
3546
3547 return ignore_event (ourstatus);
3548 }
3549
3550 if (debug_threads)
3551 {
3552 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3553 {
3554 std::string str
3555 = target_waitstatus_to_string (&event_child->waitstatus);
3556
3557 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3558 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3559 }
3560 if (current_thread->last_resume_kind == resume_step)
3561 {
3562 if (event_child->step_range_start == event_child->step_range_end)
3563 debug_printf ("GDB wanted to single-step, reporting event.\n");
3564 else if (!lwp_in_step_range (event_child))
3565 debug_printf ("Out of step range, reporting event.\n");
3566 }
3567 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3568 debug_printf ("Stopped by watchpoint.\n");
3569 else if (gdb_breakpoint_here (event_child->stop_pc))
3570 debug_printf ("Stopped by GDB breakpoint.\n");
3571 if (debug_threads)
3572 debug_printf ("Hit a non-gdbserver trap event.\n");
3573 }
3574
3575 /* Alright, we're going to report a stop. */
3576
3577 /* Remove single-step breakpoints. */
3578 if (can_software_single_step ())
3579 {
3580 /* Remove single-step breakpoints or not. It it is true, stop all
3581 lwps, so that other threads won't hit the breakpoint in the
3582 staled memory. */
3583 int remove_single_step_breakpoints_p = 0;
3584
3585 if (non_stop)
3586 {
3587 remove_single_step_breakpoints_p
3588 = has_single_step_breakpoints (current_thread);
3589 }
3590 else
3591 {
3592 /* In all-stop, a stop reply cancels all previous resume
3593 requests. Delete all single-step breakpoints. */
3594
3595 find_thread ([&] (thread_info *thread) {
3596 if (has_single_step_breakpoints (thread))
3597 {
3598 remove_single_step_breakpoints_p = 1;
3599 return true;
3600 }
3601
3602 return false;
3603 });
3604 }
3605
3606 if (remove_single_step_breakpoints_p)
3607 {
3608 /* If we remove single-step breakpoints from memory, stop all lwps,
3609 so that other threads won't hit the breakpoint in the staled
3610 memory. */
3611 stop_all_lwps (0, event_child);
3612
3613 if (non_stop)
3614 {
3615 gdb_assert (has_single_step_breakpoints (current_thread));
3616 delete_single_step_breakpoints (current_thread);
3617 }
3618 else
3619 {
3620 for_each_thread ([] (thread_info *thread){
3621 if (has_single_step_breakpoints (thread))
3622 delete_single_step_breakpoints (thread);
3623 });
3624 }
3625
3626 unstop_all_lwps (0, event_child);
3627 }
3628 }
3629
3630 if (!stabilizing_threads)
3631 {
3632 /* In all-stop, stop all threads. */
3633 if (!non_stop)
3634 stop_all_lwps (0, NULL);
3635
3636 if (step_over_finished)
3637 {
3638 if (!non_stop)
3639 {
3640 /* If we were doing a step-over, all other threads but
3641 the stepping one had been paused in start_step_over,
3642 with their suspend counts incremented. We don't want
3643 to do a full unstop/unpause, because we're in
3644 all-stop mode (so we want threads stopped), but we
3645 still need to unsuspend the other threads, to
3646 decrement their `suspended' count back. */
3647 unsuspend_all_lwps (event_child);
3648 }
3649 else
3650 {
3651 /* If we just finished a step-over, then all threads had
3652 been momentarily paused. In all-stop, that's fine,
3653 we want threads stopped by now anyway. In non-stop,
3654 we need to re-resume threads that GDB wanted to be
3655 running. */
3656 unstop_all_lwps (1, event_child);
3657 }
3658 }
3659
3660 /* If we're not waiting for a specific LWP, choose an event LWP
3661 from among those that have had events. Giving equal priority
3662 to all LWPs that have had events helps prevent
3663 starvation. */
3664 if (ptid == minus_one_ptid)
3665 {
3666 event_child->status_pending_p = 1;
3667 event_child->status_pending = w;
3668
3669 select_event_lwp (&event_child);
3670
3671 /* current_thread and event_child must stay in sync. */
3672 current_thread = get_lwp_thread (event_child);
3673
3674 event_child->status_pending_p = 0;
3675 w = event_child->status_pending;
3676 }
3677
3678
3679 /* Stabilize threads (move out of jump pads). */
3680 if (!non_stop)
3681 target_stabilize_threads ();
3682 }
3683 else
3684 {
3685 /* If we just finished a step-over, then all threads had been
3686 momentarily paused. In all-stop, that's fine, we want
3687 threads stopped by now anyway. In non-stop, we need to
3688 re-resume threads that GDB wanted to be running. */
3689 if (step_over_finished)
3690 unstop_all_lwps (1, event_child);
3691 }
3692
3693 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3694 {
3695 /* If the reported event is an exit, fork, vfork or exec, let
3696 GDB know. */
3697
3698 /* Break the unreported fork relationship chain. */
3699 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3700 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3701 {
3702 event_child->fork_relative->fork_relative = NULL;
3703 event_child->fork_relative = NULL;
3704 }
3705
3706 *ourstatus = event_child->waitstatus;
3707 /* Clear the event lwp's waitstatus since we handled it already. */
3708 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3709 }
3710 else
3711 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3712
3713 /* Now that we've selected our final event LWP, un-adjust its PC if
3714 it was a software breakpoint, and the client doesn't know we can
3715 adjust the breakpoint ourselves. */
3716 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3717 && !cs.swbreak_feature)
3718 {
3719 int decr_pc = the_low_target.decr_pc_after_break;
3720
3721 if (decr_pc != 0)
3722 {
3723 struct regcache *regcache
3724 = get_thread_regcache (current_thread, 1);
3725 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3726 }
3727 }
3728
3729 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3730 {
3731 get_syscall_trapinfo (event_child,
3732 &ourstatus->value.syscall_number);
3733 ourstatus->kind = event_child->syscall_state;
3734 }
3735 else if (current_thread->last_resume_kind == resume_stop
3736 && WSTOPSIG (w) == SIGSTOP)
3737 {
3738 /* A thread that has been requested to stop by GDB with vCont;t,
3739 and it stopped cleanly, so report as SIG0. The use of
3740 SIGSTOP is an implementation detail. */
3741 ourstatus->value.sig = GDB_SIGNAL_0;
3742 }
3743 else if (current_thread->last_resume_kind == resume_stop
3744 && WSTOPSIG (w) != SIGSTOP)
3745 {
3746 /* A thread that has been requested to stop by GDB with vCont;t,
3747 but, it stopped for other reasons. */
3748 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3749 }
3750 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3751 {
3752 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3753 }
3754
3755 gdb_assert (step_over_bkpt == null_ptid);
3756
3757 if (debug_threads)
3758 {
3759 debug_printf ("wait_1 ret = %s, %d, %d\n",
3760 target_pid_to_str (ptid_of (current_thread)),
3761 ourstatus->kind, ourstatus->value.sig);
3762 debug_exit ();
3763 }
3764
3765 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3766 return filter_exit_event (event_child, ourstatus);
3767
3768 return ptid_of (current_thread);
3769 }
3770
3771 /* Get rid of any pending event in the pipe. */
3772 static void
3773 async_file_flush (void)
3774 {
3775 int ret;
3776 char buf;
3777
3778 do
3779 ret = read (linux_event_pipe[0], &buf, 1);
3780 while (ret >= 0 || (ret == -1 && errno == EINTR));
3781 }
3782
3783 /* Put something in the pipe, so the event loop wakes up. */
3784 static void
3785 async_file_mark (void)
3786 {
3787 int ret;
3788
3789 async_file_flush ();
3790
3791 do
3792 ret = write (linux_event_pipe[1], "+", 1);
3793 while (ret == 0 || (ret == -1 && errno == EINTR));
3794
3795 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3796 be awakened anyway. */
3797 }
3798
3799 ptid_t
3800 linux_process_target::wait (ptid_t ptid,
3801 target_waitstatus *ourstatus,
3802 int target_options)
3803 {
3804 ptid_t event_ptid;
3805
3806 /* Flush the async file first. */
3807 if (target_is_async_p ())
3808 async_file_flush ();
3809
3810 do
3811 {
3812 event_ptid = wait_1 (ptid, ourstatus, target_options);
3813 }
3814 while ((target_options & TARGET_WNOHANG) == 0
3815 && event_ptid == null_ptid
3816 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3817
3818 /* If at least one stop was reported, there may be more. A single
3819 SIGCHLD can signal more than one child stop. */
3820 if (target_is_async_p ()
3821 && (target_options & TARGET_WNOHANG) != 0
3822 && event_ptid != null_ptid)
3823 async_file_mark ();
3824
3825 return event_ptid;
3826 }
3827
3828 /* Send a signal to an LWP. */
3829
3830 static int
3831 kill_lwp (unsigned long lwpid, int signo)
3832 {
3833 int ret;
3834
3835 errno = 0;
3836 ret = syscall (__NR_tkill, lwpid, signo);
3837 if (errno == ENOSYS)
3838 {
3839 /* If tkill fails, then we are not using nptl threads, a
3840 configuration we no longer support. */
3841 perror_with_name (("tkill"));
3842 }
3843 return ret;
3844 }
3845
3846 void
3847 linux_stop_lwp (struct lwp_info *lwp)
3848 {
3849 send_sigstop (lwp);
3850 }
3851
3852 static void
3853 send_sigstop (struct lwp_info *lwp)
3854 {
3855 int pid;
3856
3857 pid = lwpid_of (get_lwp_thread (lwp));
3858
3859 /* If we already have a pending stop signal for this process, don't
3860 send another. */
3861 if (lwp->stop_expected)
3862 {
3863 if (debug_threads)
3864 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3865
3866 return;
3867 }
3868
3869 if (debug_threads)
3870 debug_printf ("Sending sigstop to lwp %d\n", pid);
3871
3872 lwp->stop_expected = 1;
3873 kill_lwp (pid, SIGSTOP);
3874 }
3875
3876 static void
3877 send_sigstop (thread_info *thread, lwp_info *except)
3878 {
3879 struct lwp_info *lwp = get_thread_lwp (thread);
3880
3881 /* Ignore EXCEPT. */
3882 if (lwp == except)
3883 return;
3884
3885 if (lwp->stopped)
3886 return;
3887
3888 send_sigstop (lwp);
3889 }
3890
3891 /* Increment the suspend count of an LWP, and stop it, if not stopped
3892 yet. */
3893 static void
3894 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3895 {
3896 struct lwp_info *lwp = get_thread_lwp (thread);
3897
3898 /* Ignore EXCEPT. */
3899 if (lwp == except)
3900 return;
3901
3902 lwp_suspended_inc (lwp);
3903
3904 send_sigstop (thread, except);
3905 }
3906
3907 static void
3908 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3909 {
3910 /* Store the exit status for later. */
3911 lwp->status_pending_p = 1;
3912 lwp->status_pending = wstat;
3913
3914 /* Store in waitstatus as well, as there's nothing else to process
3915 for this event. */
3916 if (WIFEXITED (wstat))
3917 {
3918 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3919 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3920 }
3921 else if (WIFSIGNALED (wstat))
3922 {
3923 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3924 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3925 }
3926
3927 /* Prevent trying to stop it. */
3928 lwp->stopped = 1;
3929
3930 /* No further stops are expected from a dead lwp. */
3931 lwp->stop_expected = 0;
3932 }
3933
3934 /* Return true if LWP has exited already, and has a pending exit event
3935 to report to GDB. */
3936
3937 static int
3938 lwp_is_marked_dead (struct lwp_info *lwp)
3939 {
3940 return (lwp->status_pending_p
3941 && (WIFEXITED (lwp->status_pending)
3942 || WIFSIGNALED (lwp->status_pending)));
3943 }
3944
3945 void
3946 linux_process_target::wait_for_sigstop ()
3947 {
3948 struct thread_info *saved_thread;
3949 ptid_t saved_tid;
3950 int wstat;
3951 int ret;
3952
3953 saved_thread = current_thread;
3954 if (saved_thread != NULL)
3955 saved_tid = saved_thread->id;
3956 else
3957 saved_tid = null_ptid; /* avoid bogus unused warning */
3958
3959 if (debug_threads)
3960 debug_printf ("wait_for_sigstop: pulling events\n");
3961
3962 /* Passing NULL_PTID as filter indicates we want all events to be
3963 left pending. Eventually this returns when there are no
3964 unwaited-for children left. */
3965 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3966 gdb_assert (ret == -1);
3967
3968 if (saved_thread == NULL || mythread_alive (saved_tid))
3969 current_thread = saved_thread;
3970 else
3971 {
3972 if (debug_threads)
3973 debug_printf ("Previously current thread died.\n");
3974
3975 /* We can't change the current inferior behind GDB's back,
3976 otherwise, a subsequent command may apply to the wrong
3977 process. */
3978 current_thread = NULL;
3979 }
3980 }
3981
3982 /* Returns true if THREAD is stopped in a jump pad, and we can't
3983 move it out, because we need to report the stop event to GDB. For
3984 example, if the user puts a breakpoint in the jump pad, it's
3985 because she wants to debug it. */
3986
3987 static bool
3988 stuck_in_jump_pad_callback (thread_info *thread)
3989 {
3990 struct lwp_info *lwp = get_thread_lwp (thread);
3991
3992 if (lwp->suspended != 0)
3993 {
3994 internal_error (__FILE__, __LINE__,
3995 "LWP %ld is suspended, suspended=%d\n",
3996 lwpid_of (thread), lwp->suspended);
3997 }
3998 gdb_assert (lwp->stopped);
3999
4000 /* Allow debugging the jump pad, gdb_collect, etc.. */
4001 return (supports_fast_tracepoints ()
4002 && agent_loaded_p ()
4003 && (gdb_breakpoint_here (lwp->stop_pc)
4004 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4005 || thread->last_resume_kind == resume_step)
4006 && (linux_fast_tracepoint_collecting (lwp, NULL)
4007 != fast_tpoint_collect_result::not_collecting));
4008 }
4009
4010 void
4011 linux_process_target::move_out_of_jump_pad (thread_info *thread)
4012 {
4013 struct thread_info *saved_thread;
4014 struct lwp_info *lwp = get_thread_lwp (thread);
4015 int *wstat;
4016
4017 if (lwp->suspended != 0)
4018 {
4019 internal_error (__FILE__, __LINE__,
4020 "LWP %ld is suspended, suspended=%d\n",
4021 lwpid_of (thread), lwp->suspended);
4022 }
4023 gdb_assert (lwp->stopped);
4024
4025 /* For gdb_breakpoint_here. */
4026 saved_thread = current_thread;
4027 current_thread = thread;
4028
4029 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4030
4031 /* Allow debugging the jump pad, gdb_collect, etc. */
4032 if (!gdb_breakpoint_here (lwp->stop_pc)
4033 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4034 && thread->last_resume_kind != resume_step
4035 && maybe_move_out_of_jump_pad (lwp, wstat))
4036 {
4037 if (debug_threads)
4038 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4039 lwpid_of (thread));
4040
4041 if (wstat)
4042 {
4043 lwp->status_pending_p = 0;
4044 enqueue_one_deferred_signal (lwp, wstat);
4045
4046 if (debug_threads)
4047 debug_printf ("Signal %d for LWP %ld deferred "
4048 "(in jump pad)\n",
4049 WSTOPSIG (*wstat), lwpid_of (thread));
4050 }
4051
4052 linux_resume_one_lwp (lwp, 0, 0, NULL);
4053 }
4054 else
4055 lwp_suspended_inc (lwp);
4056
4057 current_thread = saved_thread;
4058 }
4059
4060 static bool
4061 lwp_running (thread_info *thread)
4062 {
4063 struct lwp_info *lwp = get_thread_lwp (thread);
4064
4065 if (lwp_is_marked_dead (lwp))
4066 return false;
4067
4068 return !lwp->stopped;
4069 }
4070
4071 void
4072 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4073 {
4074 /* Should not be called recursively. */
4075 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4076
4077 if (debug_threads)
4078 {
4079 debug_enter ();
4080 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4081 suspend ? "stop-and-suspend" : "stop",
4082 except != NULL
4083 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4084 : "none");
4085 }
4086
4087 stopping_threads = (suspend
4088 ? STOPPING_AND_SUSPENDING_THREADS
4089 : STOPPING_THREADS);
4090
4091 if (suspend)
4092 for_each_thread ([&] (thread_info *thread)
4093 {
4094 suspend_and_send_sigstop (thread, except);
4095 });
4096 else
4097 for_each_thread ([&] (thread_info *thread)
4098 {
4099 send_sigstop (thread, except);
4100 });
4101
4102 wait_for_sigstop ();
4103 stopping_threads = NOT_STOPPING_THREADS;
4104
4105 if (debug_threads)
4106 {
4107 debug_printf ("stop_all_lwps done, setting stopping_threads "
4108 "back to !stopping\n");
4109 debug_exit ();
4110 }
4111 }
4112
4113 /* Enqueue one signal in the chain of signals which need to be
4114 delivered to this process on next resume. */
4115
4116 static void
4117 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4118 {
4119 struct pending_signals *p_sig = XNEW (struct pending_signals);
4120
4121 p_sig->prev = lwp->pending_signals;
4122 p_sig->signal = signal;
4123 if (info == NULL)
4124 memset (&p_sig->info, 0, sizeof (siginfo_t));
4125 else
4126 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4127 lwp->pending_signals = p_sig;
4128 }
4129
4130 /* Install breakpoints for software single stepping. */
4131
4132 static void
4133 install_software_single_step_breakpoints (struct lwp_info *lwp)
4134 {
4135 struct thread_info *thread = get_lwp_thread (lwp);
4136 struct regcache *regcache = get_thread_regcache (thread, 1);
4137
4138 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4139
4140 current_thread = thread;
4141 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4142
4143 for (CORE_ADDR pc : next_pcs)
4144 set_single_step_breakpoint (pc, current_ptid);
4145 }
4146
4147 /* Single step via hardware or software single step.
4148 Return 1 if hardware single stepping, 0 if software single stepping
4149 or can't single step. */
4150
4151 static int
4152 single_step (struct lwp_info* lwp)
4153 {
4154 int step = 0;
4155
4156 if (can_hardware_single_step ())
4157 {
4158 step = 1;
4159 }
4160 else if (can_software_single_step ())
4161 {
4162 install_software_single_step_breakpoints (lwp);
4163 step = 0;
4164 }
4165 else
4166 {
4167 if (debug_threads)
4168 debug_printf ("stepping is not implemented on this target");
4169 }
4170
4171 return step;
4172 }
4173
4174 /* The signal can be delivered to the inferior if we are not trying to
4175 finish a fast tracepoint collect. Since signal can be delivered in
4176 the step-over, the program may go to signal handler and trap again
4177 after return from the signal handler. We can live with the spurious
4178 double traps. */
4179
4180 static int
4181 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4182 {
4183 return (lwp->collecting_fast_tracepoint
4184 == fast_tpoint_collect_result::not_collecting);
4185 }
4186
4187 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4188 SIGNAL is nonzero, give it that signal. */
4189
4190 static void
4191 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4192 int step, int signal, siginfo_t *info)
4193 {
4194 struct thread_info *thread = get_lwp_thread (lwp);
4195 struct thread_info *saved_thread;
4196 int ptrace_request;
4197 struct process_info *proc = get_thread_process (thread);
4198
4199 /* Note that target description may not be initialised
4200 (proc->tdesc == NULL) at this point because the program hasn't
4201 stopped at the first instruction yet. It means GDBserver skips
4202 the extra traps from the wrapper program (see option --wrapper).
4203 Code in this function that requires register access should be
4204 guarded by proc->tdesc == NULL or something else. */
4205
4206 if (lwp->stopped == 0)
4207 return;
4208
4209 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4210
4211 fast_tpoint_collect_result fast_tp_collecting
4212 = lwp->collecting_fast_tracepoint;
4213
4214 gdb_assert (!stabilizing_threads
4215 || (fast_tp_collecting
4216 != fast_tpoint_collect_result::not_collecting));
4217
4218 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4219 user used the "jump" command, or "set $pc = foo"). */
4220 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4221 {
4222 /* Collecting 'while-stepping' actions doesn't make sense
4223 anymore. */
4224 release_while_stepping_state_list (thread);
4225 }
4226
4227 /* If we have pending signals or status, and a new signal, enqueue the
4228 signal. Also enqueue the signal if it can't be delivered to the
4229 inferior right now. */
4230 if (signal != 0
4231 && (lwp->status_pending_p
4232 || lwp->pending_signals != NULL
4233 || !lwp_signal_can_be_delivered (lwp)))
4234 {
4235 enqueue_pending_signal (lwp, signal, info);
4236
4237 /* Postpone any pending signal. It was enqueued above. */
4238 signal = 0;
4239 }
4240
4241 if (lwp->status_pending_p)
4242 {
4243 if (debug_threads)
4244 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4245 " has pending status\n",
4246 lwpid_of (thread), step ? "step" : "continue",
4247 lwp->stop_expected ? "expected" : "not expected");
4248 return;
4249 }
4250
4251 saved_thread = current_thread;
4252 current_thread = thread;
4253
4254 /* This bit needs some thinking about. If we get a signal that
4255 we must report while a single-step reinsert is still pending,
4256 we often end up resuming the thread. It might be better to
4257 (ew) allow a stack of pending events; then we could be sure that
4258 the reinsert happened right away and not lose any signals.
4259
4260 Making this stack would also shrink the window in which breakpoints are
4261 uninserted (see comment in linux_wait_for_lwp) but not enough for
4262 complete correctness, so it won't solve that problem. It may be
4263 worthwhile just to solve this one, however. */
4264 if (lwp->bp_reinsert != 0)
4265 {
4266 if (debug_threads)
4267 debug_printf (" pending reinsert at 0x%s\n",
4268 paddress (lwp->bp_reinsert));
4269
4270 if (can_hardware_single_step ())
4271 {
4272 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4273 {
4274 if (step == 0)
4275 warning ("BAD - reinserting but not stepping.");
4276 if (lwp->suspended)
4277 warning ("BAD - reinserting and suspended(%d).",
4278 lwp->suspended);
4279 }
4280 }
4281
4282 step = maybe_hw_step (thread);
4283 }
4284
4285 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4286 {
4287 if (debug_threads)
4288 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4289 " (exit-jump-pad-bkpt)\n",
4290 lwpid_of (thread));
4291 }
4292 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4293 {
4294 if (debug_threads)
4295 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4296 " single-stepping\n",
4297 lwpid_of (thread));
4298
4299 if (can_hardware_single_step ())
4300 step = 1;
4301 else
4302 {
4303 internal_error (__FILE__, __LINE__,
4304 "moving out of jump pad single-stepping"
4305 " not implemented on this target");
4306 }
4307 }
4308
4309 /* If we have while-stepping actions in this thread set it stepping.
4310 If we have a signal to deliver, it may or may not be set to
4311 SIG_IGN, we don't know. Assume so, and allow collecting
4312 while-stepping into a signal handler. A possible smart thing to
4313 do would be to set an internal breakpoint at the signal return
4314 address, continue, and carry on catching this while-stepping
4315 action only when that breakpoint is hit. A future
4316 enhancement. */
4317 if (thread->while_stepping != NULL)
4318 {
4319 if (debug_threads)
4320 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4321 lwpid_of (thread));
4322
4323 step = single_step (lwp);
4324 }
4325
4326 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4327 {
4328 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4329
4330 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4331
4332 if (debug_threads)
4333 {
4334 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4335 (long) lwp->stop_pc);
4336 }
4337 }
4338
4339 /* If we have pending signals, consume one if it can be delivered to
4340 the inferior. */
4341 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4342 {
4343 struct pending_signals **p_sig;
4344
4345 p_sig = &lwp->pending_signals;
4346 while ((*p_sig)->prev != NULL)
4347 p_sig = &(*p_sig)->prev;
4348
4349 signal = (*p_sig)->signal;
4350 if ((*p_sig)->info.si_signo != 0)
4351 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4352 &(*p_sig)->info);
4353
4354 free (*p_sig);
4355 *p_sig = NULL;
4356 }
4357
4358 if (debug_threads)
4359 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4360 lwpid_of (thread), step ? "step" : "continue", signal,
4361 lwp->stop_expected ? "expected" : "not expected");
4362
4363 if (the_low_target.prepare_to_resume != NULL)
4364 the_low_target.prepare_to_resume (lwp);
4365
4366 regcache_invalidate_thread (thread);
4367 errno = 0;
4368 lwp->stepping = step;
4369 if (step)
4370 ptrace_request = PTRACE_SINGLESTEP;
4371 else if (gdb_catching_syscalls_p (lwp))
4372 ptrace_request = PTRACE_SYSCALL;
4373 else
4374 ptrace_request = PTRACE_CONT;
4375 ptrace (ptrace_request,
4376 lwpid_of (thread),
4377 (PTRACE_TYPE_ARG3) 0,
4378 /* Coerce to a uintptr_t first to avoid potential gcc warning
4379 of coercing an 8 byte integer to a 4 byte pointer. */
4380 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4381
4382 current_thread = saved_thread;
4383 if (errno)
4384 perror_with_name ("resuming thread");
4385
4386 /* Successfully resumed. Clear state that no longer makes sense,
4387 and mark the LWP as running. Must not do this before resuming
4388 otherwise if that fails other code will be confused. E.g., we'd
4389 later try to stop the LWP and hang forever waiting for a stop
4390 status. Note that we must not throw after this is cleared,
4391 otherwise handle_zombie_lwp_error would get confused. */
4392 lwp->stopped = 0;
4393 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4394 }
4395
4396 /* Called when we try to resume a stopped LWP and that errors out. If
4397 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4398 or about to become), discard the error, clear any pending status
4399 the LWP may have, and return true (we'll collect the exit status
4400 soon enough). Otherwise, return false. */
4401
4402 static int
4403 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4404 {
4405 struct thread_info *thread = get_lwp_thread (lp);
4406
4407 /* If we get an error after resuming the LWP successfully, we'd
4408 confuse !T state for the LWP being gone. */
4409 gdb_assert (lp->stopped);
4410
4411 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4412 because even if ptrace failed with ESRCH, the tracee may be "not
4413 yet fully dead", but already refusing ptrace requests. In that
4414 case the tracee has 'R (Running)' state for a little bit
4415 (observed in Linux 3.18). See also the note on ESRCH in the
4416 ptrace(2) man page. Instead, check whether the LWP has any state
4417 other than ptrace-stopped. */
4418
4419 /* Don't assume anything if /proc/PID/status can't be read. */
4420 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4421 {
4422 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4423 lp->status_pending_p = 0;
4424 return 1;
4425 }
4426 return 0;
4427 }
4428
4429 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4430 disappears while we try to resume it. */
4431
4432 static void
4433 linux_resume_one_lwp (struct lwp_info *lwp,
4434 int step, int signal, siginfo_t *info)
4435 {
4436 try
4437 {
4438 linux_resume_one_lwp_throw (lwp, step, signal, info);
4439 }
4440 catch (const gdb_exception_error &ex)
4441 {
4442 if (!check_ptrace_stopped_lwp_gone (lwp))
4443 throw;
4444 }
4445 }
4446
4447 /* This function is called once per thread via for_each_thread.
4448 We look up which resume request applies to THREAD and mark it with a
4449 pointer to the appropriate resume request.
4450
4451 This algorithm is O(threads * resume elements), but resume elements
4452 is small (and will remain small at least until GDB supports thread
4453 suspension). */
4454
4455 static void
4456 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4457 {
4458 struct lwp_info *lwp = get_thread_lwp (thread);
4459
4460 for (int ndx = 0; ndx < n; ndx++)
4461 {
4462 ptid_t ptid = resume[ndx].thread;
4463 if (ptid == minus_one_ptid
4464 || ptid == thread->id
4465 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4466 of PID'. */
4467 || (ptid.pid () == pid_of (thread)
4468 && (ptid.is_pid ()
4469 || ptid.lwp () == -1)))
4470 {
4471 if (resume[ndx].kind == resume_stop
4472 && thread->last_resume_kind == resume_stop)
4473 {
4474 if (debug_threads)
4475 debug_printf ("already %s LWP %ld at GDB's request\n",
4476 (thread->last_status.kind
4477 == TARGET_WAITKIND_STOPPED)
4478 ? "stopped"
4479 : "stopping",
4480 lwpid_of (thread));
4481
4482 continue;
4483 }
4484
4485 /* Ignore (wildcard) resume requests for already-resumed
4486 threads. */
4487 if (resume[ndx].kind != resume_stop
4488 && thread->last_resume_kind != resume_stop)
4489 {
4490 if (debug_threads)
4491 debug_printf ("already %s LWP %ld at GDB's request\n",
4492 (thread->last_resume_kind
4493 == resume_step)
4494 ? "stepping"
4495 : "continuing",
4496 lwpid_of (thread));
4497 continue;
4498 }
4499
4500 /* Don't let wildcard resumes resume fork children that GDB
4501 does not yet know are new fork children. */
4502 if (lwp->fork_relative != NULL)
4503 {
4504 struct lwp_info *rel = lwp->fork_relative;
4505
4506 if (rel->status_pending_p
4507 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4508 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4509 {
4510 if (debug_threads)
4511 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4512 lwpid_of (thread));
4513 continue;
4514 }
4515 }
4516
4517 /* If the thread has a pending event that has already been
4518 reported to GDBserver core, but GDB has not pulled the
4519 event out of the vStopped queue yet, likewise, ignore the
4520 (wildcard) resume request. */
4521 if (in_queued_stop_replies (thread->id))
4522 {
4523 if (debug_threads)
4524 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4525 lwpid_of (thread));
4526 continue;
4527 }
4528
4529 lwp->resume = &resume[ndx];
4530 thread->last_resume_kind = lwp->resume->kind;
4531
4532 lwp->step_range_start = lwp->resume->step_range_start;
4533 lwp->step_range_end = lwp->resume->step_range_end;
4534
4535 /* If we had a deferred signal to report, dequeue one now.
4536 This can happen if LWP gets more than one signal while
4537 trying to get out of a jump pad. */
4538 if (lwp->stopped
4539 && !lwp->status_pending_p
4540 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4541 {
4542 lwp->status_pending_p = 1;
4543
4544 if (debug_threads)
4545 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4546 "leaving status pending.\n",
4547 WSTOPSIG (lwp->status_pending),
4548 lwpid_of (thread));
4549 }
4550
4551 return;
4552 }
4553 }
4554
4555 /* No resume action for this thread. */
4556 lwp->resume = NULL;
4557 }
4558
4559 /* find_thread callback for linux_resume. Return true if this lwp has an
4560 interesting status pending. */
4561
4562 static bool
4563 resume_status_pending_p (thread_info *thread)
4564 {
4565 struct lwp_info *lwp = get_thread_lwp (thread);
4566
4567 /* LWPs which will not be resumed are not interesting, because
4568 we might not wait for them next time through linux_wait. */
4569 if (lwp->resume == NULL)
4570 return false;
4571
4572 return thread_still_has_status_pending_p (thread);
4573 }
4574
4575 /* Return 1 if this lwp that GDB wants running is stopped at an
4576 internal breakpoint that we need to step over. It assumes that any
4577 required STOP_PC adjustment has already been propagated to the
4578 inferior's regcache. */
4579
4580 static bool
4581 need_step_over_p (thread_info *thread)
4582 {
4583 struct lwp_info *lwp = get_thread_lwp (thread);
4584 struct thread_info *saved_thread;
4585 CORE_ADDR pc;
4586 struct process_info *proc = get_thread_process (thread);
4587
4588 /* GDBserver is skipping the extra traps from the wrapper program,
4589 don't have to do step over. */
4590 if (proc->tdesc == NULL)
4591 return false;
4592
4593 /* LWPs which will not be resumed are not interesting, because we
4594 might not wait for them next time through linux_wait. */
4595
4596 if (!lwp->stopped)
4597 {
4598 if (debug_threads)
4599 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4600 lwpid_of (thread));
4601 return false;
4602 }
4603
4604 if (thread->last_resume_kind == resume_stop)
4605 {
4606 if (debug_threads)
4607 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4608 " stopped\n",
4609 lwpid_of (thread));
4610 return false;
4611 }
4612
4613 gdb_assert (lwp->suspended >= 0);
4614
4615 if (lwp->suspended)
4616 {
4617 if (debug_threads)
4618 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4619 lwpid_of (thread));
4620 return false;
4621 }
4622
4623 if (lwp->status_pending_p)
4624 {
4625 if (debug_threads)
4626 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4627 " status.\n",
4628 lwpid_of (thread));
4629 return false;
4630 }
4631
4632 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4633 or we have. */
4634 pc = get_pc (lwp);
4635
4636 /* If the PC has changed since we stopped, then don't do anything,
4637 and let the breakpoint/tracepoint be hit. This happens if, for
4638 instance, GDB handled the decr_pc_after_break subtraction itself,
4639 GDB is OOL stepping this thread, or the user has issued a "jump"
4640 command, or poked thread's registers herself. */
4641 if (pc != lwp->stop_pc)
4642 {
4643 if (debug_threads)
4644 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4645 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4646 lwpid_of (thread),
4647 paddress (lwp->stop_pc), paddress (pc));
4648 return false;
4649 }
4650
4651 /* On software single step target, resume the inferior with signal
4652 rather than stepping over. */
4653 if (can_software_single_step ()
4654 && lwp->pending_signals != NULL
4655 && lwp_signal_can_be_delivered (lwp))
4656 {
4657 if (debug_threads)
4658 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4659 " signals.\n",
4660 lwpid_of (thread));
4661
4662 return false;
4663 }
4664
4665 saved_thread = current_thread;
4666 current_thread = thread;
4667
4668 /* We can only step over breakpoints we know about. */
4669 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4670 {
4671 /* Don't step over a breakpoint that GDB expects to hit
4672 though. If the condition is being evaluated on the target's side
4673 and it evaluate to false, step over this breakpoint as well. */
4674 if (gdb_breakpoint_here (pc)
4675 && gdb_condition_true_at_breakpoint (pc)
4676 && gdb_no_commands_at_breakpoint (pc))
4677 {
4678 if (debug_threads)
4679 debug_printf ("Need step over [LWP %ld]? yes, but found"
4680 " GDB breakpoint at 0x%s; skipping step over\n",
4681 lwpid_of (thread), paddress (pc));
4682
4683 current_thread = saved_thread;
4684 return false;
4685 }
4686 else
4687 {
4688 if (debug_threads)
4689 debug_printf ("Need step over [LWP %ld]? yes, "
4690 "found breakpoint at 0x%s\n",
4691 lwpid_of (thread), paddress (pc));
4692
4693 /* We've found an lwp that needs stepping over --- return 1 so
4694 that find_thread stops looking. */
4695 current_thread = saved_thread;
4696
4697 return true;
4698 }
4699 }
4700
4701 current_thread = saved_thread;
4702
4703 if (debug_threads)
4704 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4705 " at 0x%s\n",
4706 lwpid_of (thread), paddress (pc));
4707
4708 return false;
4709 }
4710
4711 void
4712 linux_process_target::start_step_over (lwp_info *lwp)
4713 {
4714 struct thread_info *thread = get_lwp_thread (lwp);
4715 struct thread_info *saved_thread;
4716 CORE_ADDR pc;
4717 int step;
4718
4719 if (debug_threads)
4720 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4721 lwpid_of (thread));
4722
4723 stop_all_lwps (1, lwp);
4724
4725 if (lwp->suspended != 0)
4726 {
4727 internal_error (__FILE__, __LINE__,
4728 "LWP %ld suspended=%d\n", lwpid_of (thread),
4729 lwp->suspended);
4730 }
4731
4732 if (debug_threads)
4733 debug_printf ("Done stopping all threads for step-over.\n");
4734
4735 /* Note, we should always reach here with an already adjusted PC,
4736 either by GDB (if we're resuming due to GDB's request), or by our
4737 caller, if we just finished handling an internal breakpoint GDB
4738 shouldn't care about. */
4739 pc = get_pc (lwp);
4740
4741 saved_thread = current_thread;
4742 current_thread = thread;
4743
4744 lwp->bp_reinsert = pc;
4745 uninsert_breakpoints_at (pc);
4746 uninsert_fast_tracepoint_jumps_at (pc);
4747
4748 step = single_step (lwp);
4749
4750 current_thread = saved_thread;
4751
4752 linux_resume_one_lwp (lwp, step, 0, NULL);
4753
4754 /* Require next event from this LWP. */
4755 step_over_bkpt = thread->id;
4756 }
4757
4758 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4759 start_step_over, if still there, and delete any single-step
4760 breakpoints we've set, on non hardware single-step targets. */
4761
4762 static int
4763 finish_step_over (struct lwp_info *lwp)
4764 {
4765 if (lwp->bp_reinsert != 0)
4766 {
4767 struct thread_info *saved_thread = current_thread;
4768
4769 if (debug_threads)
4770 debug_printf ("Finished step over.\n");
4771
4772 current_thread = get_lwp_thread (lwp);
4773
4774 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4775 may be no breakpoint to reinsert there by now. */
4776 reinsert_breakpoints_at (lwp->bp_reinsert);
4777 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4778
4779 lwp->bp_reinsert = 0;
4780
4781 /* Delete any single-step breakpoints. No longer needed. We
4782 don't have to worry about other threads hitting this trap,
4783 and later not being able to explain it, because we were
4784 stepping over a breakpoint, and we hold all threads but
4785 LWP stopped while doing that. */
4786 if (!can_hardware_single_step ())
4787 {
4788 gdb_assert (has_single_step_breakpoints (current_thread));
4789 delete_single_step_breakpoints (current_thread);
4790 }
4791
4792 step_over_bkpt = null_ptid;
4793 current_thread = saved_thread;
4794 return 1;
4795 }
4796 else
4797 return 0;
4798 }
4799
4800 void
4801 linux_process_target::complete_ongoing_step_over ()
4802 {
4803 if (step_over_bkpt != null_ptid)
4804 {
4805 struct lwp_info *lwp;
4806 int wstat;
4807 int ret;
4808
4809 if (debug_threads)
4810 debug_printf ("detach: step over in progress, finish it first\n");
4811
4812 /* Passing NULL_PTID as filter indicates we want all events to
4813 be left pending. Eventually this returns when there are no
4814 unwaited-for children left. */
4815 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4816 __WALL);
4817 gdb_assert (ret == -1);
4818
4819 lwp = find_lwp_pid (step_over_bkpt);
4820 if (lwp != NULL)
4821 finish_step_over (lwp);
4822 step_over_bkpt = null_ptid;
4823 unsuspend_all_lwps (lwp);
4824 }
4825 }
4826
4827 /* This function is called once per thread. We check the thread's resume
4828 request, which will tell us whether to resume, step, or leave the thread
4829 stopped; and what signal, if any, it should be sent.
4830
4831 For threads which we aren't explicitly told otherwise, we preserve
4832 the stepping flag; this is used for stepping over gdbserver-placed
4833 breakpoints.
4834
4835 If pending_flags was set in any thread, we queue any needed
4836 signals, since we won't actually resume. We already have a pending
4837 event to report, so we don't need to preserve any step requests;
4838 they should be re-issued if necessary. */
4839
4840 static void
4841 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4842 {
4843 struct lwp_info *lwp = get_thread_lwp (thread);
4844 int leave_pending;
4845
4846 if (lwp->resume == NULL)
4847 return;
4848
4849 if (lwp->resume->kind == resume_stop)
4850 {
4851 if (debug_threads)
4852 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4853
4854 if (!lwp->stopped)
4855 {
4856 if (debug_threads)
4857 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4858
4859 /* Stop the thread, and wait for the event asynchronously,
4860 through the event loop. */
4861 send_sigstop (lwp);
4862 }
4863 else
4864 {
4865 if (debug_threads)
4866 debug_printf ("already stopped LWP %ld\n",
4867 lwpid_of (thread));
4868
4869 /* The LWP may have been stopped in an internal event that
4870 was not meant to be notified back to GDB (e.g., gdbserver
4871 breakpoint), so we should be reporting a stop event in
4872 this case too. */
4873
4874 /* If the thread already has a pending SIGSTOP, this is a
4875 no-op. Otherwise, something later will presumably resume
4876 the thread and this will cause it to cancel any pending
4877 operation, due to last_resume_kind == resume_stop. If
4878 the thread already has a pending status to report, we
4879 will still report it the next time we wait - see
4880 status_pending_p_callback. */
4881
4882 /* If we already have a pending signal to report, then
4883 there's no need to queue a SIGSTOP, as this means we're
4884 midway through moving the LWP out of the jumppad, and we
4885 will report the pending signal as soon as that is
4886 finished. */
4887 if (lwp->pending_signals_to_report == NULL)
4888 send_sigstop (lwp);
4889 }
4890
4891 /* For stop requests, we're done. */
4892 lwp->resume = NULL;
4893 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4894 return;
4895 }
4896
4897 /* If this thread which is about to be resumed has a pending status,
4898 then don't resume it - we can just report the pending status.
4899 Likewise if it is suspended, because e.g., another thread is
4900 stepping past a breakpoint. Make sure to queue any signals that
4901 would otherwise be sent. In all-stop mode, we do this decision
4902 based on if *any* thread has a pending status. If there's a
4903 thread that needs the step-over-breakpoint dance, then don't
4904 resume any other thread but that particular one. */
4905 leave_pending = (lwp->suspended
4906 || lwp->status_pending_p
4907 || leave_all_stopped);
4908
4909 /* If we have a new signal, enqueue the signal. */
4910 if (lwp->resume->sig != 0)
4911 {
4912 siginfo_t info, *info_p;
4913
4914 /* If this is the same signal we were previously stopped by,
4915 make sure to queue its siginfo. */
4916 if (WIFSTOPPED (lwp->last_status)
4917 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4918 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4919 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4920 info_p = &info;
4921 else
4922 info_p = NULL;
4923
4924 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4925 }
4926
4927 if (!leave_pending)
4928 {
4929 if (debug_threads)
4930 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4931
4932 proceed_one_lwp (thread, NULL);
4933 }
4934 else
4935 {
4936 if (debug_threads)
4937 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4938 }
4939
4940 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4941 lwp->resume = NULL;
4942 }
4943
4944 void
4945 linux_process_target::resume (thread_resume *resume_info, size_t n)
4946 {
4947 struct thread_info *need_step_over = NULL;
4948
4949 if (debug_threads)
4950 {
4951 debug_enter ();
4952 debug_printf ("linux_resume:\n");
4953 }
4954
4955 for_each_thread ([&] (thread_info *thread)
4956 {
4957 linux_set_resume_request (thread, resume_info, n);
4958 });
4959
4960 /* If there is a thread which would otherwise be resumed, which has
4961 a pending status, then don't resume any threads - we can just
4962 report the pending status. Make sure to queue any signals that
4963 would otherwise be sent. In non-stop mode, we'll apply this
4964 logic to each thread individually. We consume all pending events
4965 before considering to start a step-over (in all-stop). */
4966 bool any_pending = false;
4967 if (!non_stop)
4968 any_pending = find_thread (resume_status_pending_p) != NULL;
4969
4970 /* If there is a thread which would otherwise be resumed, which is
4971 stopped at a breakpoint that needs stepping over, then don't
4972 resume any threads - have it step over the breakpoint with all
4973 other threads stopped, then resume all threads again. Make sure
4974 to queue any signals that would otherwise be delivered or
4975 queued. */
4976 if (!any_pending && supports_breakpoints ())
4977 need_step_over = find_thread (need_step_over_p);
4978
4979 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4980
4981 if (debug_threads)
4982 {
4983 if (need_step_over != NULL)
4984 debug_printf ("Not resuming all, need step over\n");
4985 else if (any_pending)
4986 debug_printf ("Not resuming, all-stop and found "
4987 "an LWP with pending status\n");
4988 else
4989 debug_printf ("Resuming, no pending status or step over needed\n");
4990 }
4991
4992 /* Even if we're leaving threads stopped, queue all signals we'd
4993 otherwise deliver. */
4994 for_each_thread ([&] (thread_info *thread)
4995 {
4996 linux_resume_one_thread (thread, leave_all_stopped);
4997 });
4998
4999 if (need_step_over)
5000 start_step_over (get_thread_lwp (need_step_over));
5001
5002 if (debug_threads)
5003 {
5004 debug_printf ("linux_resume done\n");
5005 debug_exit ();
5006 }
5007
5008 /* We may have events that were pending that can/should be sent to
5009 the client now. Trigger a linux_wait call. */
5010 if (target_is_async_p ())
5011 async_file_mark ();
5012 }
5013
5014 /* This function is called once per thread. We check the thread's
5015 last resume request, which will tell us whether to resume, step, or
5016 leave the thread stopped. Any signal the client requested to be
5017 delivered has already been enqueued at this point.
5018
5019 If any thread that GDB wants running is stopped at an internal
5020 breakpoint that needs stepping over, we start a step-over operation
5021 on that particular thread, and leave all others stopped. */
5022
5023 static void
5024 proceed_one_lwp (thread_info *thread, lwp_info *except)
5025 {
5026 struct lwp_info *lwp = get_thread_lwp (thread);
5027 int step;
5028
5029 if (lwp == except)
5030 return;
5031
5032 if (debug_threads)
5033 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5034
5035 if (!lwp->stopped)
5036 {
5037 if (debug_threads)
5038 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5039 return;
5040 }
5041
5042 if (thread->last_resume_kind == resume_stop
5043 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5044 {
5045 if (debug_threads)
5046 debug_printf (" client wants LWP to remain %ld stopped\n",
5047 lwpid_of (thread));
5048 return;
5049 }
5050
5051 if (lwp->status_pending_p)
5052 {
5053 if (debug_threads)
5054 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5055 lwpid_of (thread));
5056 return;
5057 }
5058
5059 gdb_assert (lwp->suspended >= 0);
5060
5061 if (lwp->suspended)
5062 {
5063 if (debug_threads)
5064 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5065 return;
5066 }
5067
5068 if (thread->last_resume_kind == resume_stop
5069 && lwp->pending_signals_to_report == NULL
5070 && (lwp->collecting_fast_tracepoint
5071 == fast_tpoint_collect_result::not_collecting))
5072 {
5073 /* We haven't reported this LWP as stopped yet (otherwise, the
5074 last_status.kind check above would catch it, and we wouldn't
5075 reach here. This LWP may have been momentarily paused by a
5076 stop_all_lwps call while handling for example, another LWP's
5077 step-over. In that case, the pending expected SIGSTOP signal
5078 that was queued at vCont;t handling time will have already
5079 been consumed by wait_for_sigstop, and so we need to requeue
5080 another one here. Note that if the LWP already has a SIGSTOP
5081 pending, this is a no-op. */
5082
5083 if (debug_threads)
5084 debug_printf ("Client wants LWP %ld to stop. "
5085 "Making sure it has a SIGSTOP pending\n",
5086 lwpid_of (thread));
5087
5088 send_sigstop (lwp);
5089 }
5090
5091 if (thread->last_resume_kind == resume_step)
5092 {
5093 if (debug_threads)
5094 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5095 lwpid_of (thread));
5096
5097 /* If resume_step is requested by GDB, install single-step
5098 breakpoints when the thread is about to be actually resumed if
5099 the single-step breakpoints weren't removed. */
5100 if (can_software_single_step ()
5101 && !has_single_step_breakpoints (thread))
5102 install_software_single_step_breakpoints (lwp);
5103
5104 step = maybe_hw_step (thread);
5105 }
5106 else if (lwp->bp_reinsert != 0)
5107 {
5108 if (debug_threads)
5109 debug_printf (" stepping LWP %ld, reinsert set\n",
5110 lwpid_of (thread));
5111
5112 step = maybe_hw_step (thread);
5113 }
5114 else
5115 step = 0;
5116
5117 linux_resume_one_lwp (lwp, step, 0, NULL);
5118 }
5119
5120 static void
5121 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5122 {
5123 struct lwp_info *lwp = get_thread_lwp (thread);
5124
5125 if (lwp == except)
5126 return;
5127
5128 lwp_suspended_decr (lwp);
5129
5130 proceed_one_lwp (thread, except);
5131 }
5132
5133 void
5134 linux_process_target::proceed_all_lwps ()
5135 {
5136 struct thread_info *need_step_over;
5137
5138 /* If there is a thread which would otherwise be resumed, which is
5139 stopped at a breakpoint that needs stepping over, then don't
5140 resume any threads - have it step over the breakpoint with all
5141 other threads stopped, then resume all threads again. */
5142
5143 if (supports_breakpoints ())
5144 {
5145 need_step_over = find_thread (need_step_over_p);
5146
5147 if (need_step_over != NULL)
5148 {
5149 if (debug_threads)
5150 debug_printf ("proceed_all_lwps: found "
5151 "thread %ld needing a step-over\n",
5152 lwpid_of (need_step_over));
5153
5154 start_step_over (get_thread_lwp (need_step_over));
5155 return;
5156 }
5157 }
5158
5159 if (debug_threads)
5160 debug_printf ("Proceeding, no step-over needed\n");
5161
5162 for_each_thread ([] (thread_info *thread)
5163 {
5164 proceed_one_lwp (thread, NULL);
5165 });
5166 }
5167
5168 void
5169 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5170 {
5171 if (debug_threads)
5172 {
5173 debug_enter ();
5174 if (except)
5175 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5176 lwpid_of (get_lwp_thread (except)));
5177 else
5178 debug_printf ("unstopping all lwps\n");
5179 }
5180
5181 if (unsuspend)
5182 for_each_thread ([&] (thread_info *thread)
5183 {
5184 unsuspend_and_proceed_one_lwp (thread, except);
5185 });
5186 else
5187 for_each_thread ([&] (thread_info *thread)
5188 {
5189 proceed_one_lwp (thread, except);
5190 });
5191
5192 if (debug_threads)
5193 {
5194 debug_printf ("unstop_all_lwps done\n");
5195 debug_exit ();
5196 }
5197 }
5198
5199
5200 #ifdef HAVE_LINUX_REGSETS
5201
5202 #define use_linux_regsets 1
5203
5204 /* Returns true if REGSET has been disabled. */
5205
5206 static int
5207 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5208 {
5209 return (info->disabled_regsets != NULL
5210 && info->disabled_regsets[regset - info->regsets]);
5211 }
5212
5213 /* Disable REGSET. */
5214
5215 static void
5216 disable_regset (struct regsets_info *info, struct regset_info *regset)
5217 {
5218 int dr_offset;
5219
5220 dr_offset = regset - info->regsets;
5221 if (info->disabled_regsets == NULL)
5222 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5223 info->disabled_regsets[dr_offset] = 1;
5224 }
5225
5226 static int
5227 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5228 struct regcache *regcache)
5229 {
5230 struct regset_info *regset;
5231 int saw_general_regs = 0;
5232 int pid;
5233 struct iovec iov;
5234
5235 pid = lwpid_of (current_thread);
5236 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5237 {
5238 void *buf, *data;
5239 int nt_type, res;
5240
5241 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5242 continue;
5243
5244 buf = xmalloc (regset->size);
5245
5246 nt_type = regset->nt_type;
5247 if (nt_type)
5248 {
5249 iov.iov_base = buf;
5250 iov.iov_len = regset->size;
5251 data = (void *) &iov;
5252 }
5253 else
5254 data = buf;
5255
5256 #ifndef __sparc__
5257 res = ptrace (regset->get_request, pid,
5258 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5259 #else
5260 res = ptrace (regset->get_request, pid, data, nt_type);
5261 #endif
5262 if (res < 0)
5263 {
5264 if (errno == EIO
5265 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5266 {
5267 /* If we get EIO on a regset, or an EINVAL and the regset is
5268 optional, do not try it again for this process mode. */
5269 disable_regset (regsets_info, regset);
5270 }
5271 else if (errno == ENODATA)
5272 {
5273 /* ENODATA may be returned if the regset is currently
5274 not "active". This can happen in normal operation,
5275 so suppress the warning in this case. */
5276 }
5277 else if (errno == ESRCH)
5278 {
5279 /* At this point, ESRCH should mean the process is
5280 already gone, in which case we simply ignore attempts
5281 to read its registers. */
5282 }
5283 else
5284 {
5285 char s[256];
5286 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5287 pid);
5288 perror (s);
5289 }
5290 }
5291 else
5292 {
5293 if (regset->type == GENERAL_REGS)
5294 saw_general_regs = 1;
5295 regset->store_function (regcache, buf);
5296 }
5297 free (buf);
5298 }
5299 if (saw_general_regs)
5300 return 0;
5301 else
5302 return 1;
5303 }
5304
5305 static int
5306 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5307 struct regcache *regcache)
5308 {
5309 struct regset_info *regset;
5310 int saw_general_regs = 0;
5311 int pid;
5312 struct iovec iov;
5313
5314 pid = lwpid_of (current_thread);
5315 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5316 {
5317 void *buf, *data;
5318 int nt_type, res;
5319
5320 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5321 || regset->fill_function == NULL)
5322 continue;
5323
5324 buf = xmalloc (regset->size);
5325
5326 /* First fill the buffer with the current register set contents,
5327 in case there are any items in the kernel's regset that are
5328 not in gdbserver's regcache. */
5329
5330 nt_type = regset->nt_type;
5331 if (nt_type)
5332 {
5333 iov.iov_base = buf;
5334 iov.iov_len = regset->size;
5335 data = (void *) &iov;
5336 }
5337 else
5338 data = buf;
5339
5340 #ifndef __sparc__
5341 res = ptrace (regset->get_request, pid,
5342 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5343 #else
5344 res = ptrace (regset->get_request, pid, data, nt_type);
5345 #endif
5346
5347 if (res == 0)
5348 {
5349 /* Then overlay our cached registers on that. */
5350 regset->fill_function (regcache, buf);
5351
5352 /* Only now do we write the register set. */
5353 #ifndef __sparc__
5354 res = ptrace (regset->set_request, pid,
5355 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5356 #else
5357 res = ptrace (regset->set_request, pid, data, nt_type);
5358 #endif
5359 }
5360
5361 if (res < 0)
5362 {
5363 if (errno == EIO
5364 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5365 {
5366 /* If we get EIO on a regset, or an EINVAL and the regset is
5367 optional, do not try it again for this process mode. */
5368 disable_regset (regsets_info, regset);
5369 }
5370 else if (errno == ESRCH)
5371 {
5372 /* At this point, ESRCH should mean the process is
5373 already gone, in which case we simply ignore attempts
5374 to change its registers. See also the related
5375 comment in linux_resume_one_lwp. */
5376 free (buf);
5377 return 0;
5378 }
5379 else
5380 {
5381 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5382 }
5383 }
5384 else if (regset->type == GENERAL_REGS)
5385 saw_general_regs = 1;
5386 free (buf);
5387 }
5388 if (saw_general_regs)
5389 return 0;
5390 else
5391 return 1;
5392 }
5393
5394 #else /* !HAVE_LINUX_REGSETS */
5395
5396 #define use_linux_regsets 0
5397 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5398 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5399
5400 #endif
5401
5402 /* Return 1 if register REGNO is supported by one of the regset ptrace
5403 calls or 0 if it has to be transferred individually. */
5404
5405 static int
5406 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5407 {
5408 unsigned char mask = 1 << (regno % 8);
5409 size_t index = regno / 8;
5410
5411 return (use_linux_regsets
5412 && (regs_info->regset_bitmap == NULL
5413 || (regs_info->regset_bitmap[index] & mask) != 0));
5414 }
5415
5416 #ifdef HAVE_LINUX_USRREGS
5417
5418 static int
5419 register_addr (const struct usrregs_info *usrregs, int regnum)
5420 {
5421 int addr;
5422
5423 if (regnum < 0 || regnum >= usrregs->num_regs)
5424 error ("Invalid register number %d.", regnum);
5425
5426 addr = usrregs->regmap[regnum];
5427
5428 return addr;
5429 }
5430
5431 /* Fetch one register. */
5432 static void
5433 fetch_register (const struct usrregs_info *usrregs,
5434 struct regcache *regcache, int regno)
5435 {
5436 CORE_ADDR regaddr;
5437 int i, size;
5438 char *buf;
5439 int pid;
5440
5441 if (regno >= usrregs->num_regs)
5442 return;
5443 if ((*the_low_target.cannot_fetch_register) (regno))
5444 return;
5445
5446 regaddr = register_addr (usrregs, regno);
5447 if (regaddr == -1)
5448 return;
5449
5450 size = ((register_size (regcache->tdesc, regno)
5451 + sizeof (PTRACE_XFER_TYPE) - 1)
5452 & -sizeof (PTRACE_XFER_TYPE));
5453 buf = (char *) alloca (size);
5454
5455 pid = lwpid_of (current_thread);
5456 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5457 {
5458 errno = 0;
5459 *(PTRACE_XFER_TYPE *) (buf + i) =
5460 ptrace (PTRACE_PEEKUSER, pid,
5461 /* Coerce to a uintptr_t first to avoid potential gcc warning
5462 of coercing an 8 byte integer to a 4 byte pointer. */
5463 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5464 regaddr += sizeof (PTRACE_XFER_TYPE);
5465 if (errno != 0)
5466 {
5467 /* Mark register REGNO unavailable. */
5468 supply_register (regcache, regno, NULL);
5469 return;
5470 }
5471 }
5472
5473 if (the_low_target.supply_ptrace_register)
5474 the_low_target.supply_ptrace_register (regcache, regno, buf);
5475 else
5476 supply_register (regcache, regno, buf);
5477 }
5478
5479 /* Store one register. */
5480 static void
5481 store_register (const struct usrregs_info *usrregs,
5482 struct regcache *regcache, int regno)
5483 {
5484 CORE_ADDR regaddr;
5485 int i, size;
5486 char *buf;
5487 int pid;
5488
5489 if (regno >= usrregs->num_regs)
5490 return;
5491 if ((*the_low_target.cannot_store_register) (regno))
5492 return;
5493
5494 regaddr = register_addr (usrregs, regno);
5495 if (regaddr == -1)
5496 return;
5497
5498 size = ((register_size (regcache->tdesc, regno)
5499 + sizeof (PTRACE_XFER_TYPE) - 1)
5500 & -sizeof (PTRACE_XFER_TYPE));
5501 buf = (char *) alloca (size);
5502 memset (buf, 0, size);
5503
5504 if (the_low_target.collect_ptrace_register)
5505 the_low_target.collect_ptrace_register (regcache, regno, buf);
5506 else
5507 collect_register (regcache, regno, buf);
5508
5509 pid = lwpid_of (current_thread);
5510 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5511 {
5512 errno = 0;
5513 ptrace (PTRACE_POKEUSER, pid,
5514 /* Coerce to a uintptr_t first to avoid potential gcc warning
5515 about coercing an 8 byte integer to a 4 byte pointer. */
5516 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5517 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5518 if (errno != 0)
5519 {
5520 /* At this point, ESRCH should mean the process is
5521 already gone, in which case we simply ignore attempts
5522 to change its registers. See also the related
5523 comment in linux_resume_one_lwp. */
5524 if (errno == ESRCH)
5525 return;
5526
5527 if ((*the_low_target.cannot_store_register) (regno) == 0)
5528 error ("writing register %d: %s", regno, safe_strerror (errno));
5529 }
5530 regaddr += sizeof (PTRACE_XFER_TYPE);
5531 }
5532 }
5533
5534 /* Fetch all registers, or just one, from the child process.
5535 If REGNO is -1, do this for all registers, skipping any that are
5536 assumed to have been retrieved by regsets_fetch_inferior_registers,
5537 unless ALL is non-zero.
5538 Otherwise, REGNO specifies which register (so we can save time). */
5539 static void
5540 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5541 struct regcache *regcache, int regno, int all)
5542 {
5543 struct usrregs_info *usr = regs_info->usrregs;
5544
5545 if (regno == -1)
5546 {
5547 for (regno = 0; regno < usr->num_regs; regno++)
5548 if (all || !linux_register_in_regsets (regs_info, regno))
5549 fetch_register (usr, regcache, regno);
5550 }
5551 else
5552 fetch_register (usr, regcache, regno);
5553 }
5554
5555 /* Store our register values back into the inferior.
5556 If REGNO is -1, do this for all registers, skipping any that are
5557 assumed to have been saved by regsets_store_inferior_registers,
5558 unless ALL is non-zero.
5559 Otherwise, REGNO specifies which register (so we can save time). */
5560 static void
5561 usr_store_inferior_registers (const struct regs_info *regs_info,
5562 struct regcache *regcache, int regno, int all)
5563 {
5564 struct usrregs_info *usr = regs_info->usrregs;
5565
5566 if (regno == -1)
5567 {
5568 for (regno = 0; regno < usr->num_regs; regno++)
5569 if (all || !linux_register_in_regsets (regs_info, regno))
5570 store_register (usr, regcache, regno);
5571 }
5572 else
5573 store_register (usr, regcache, regno);
5574 }
5575
5576 #else /* !HAVE_LINUX_USRREGS */
5577
5578 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5579 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5580
5581 #endif
5582
5583
5584 void
5585 linux_process_target::fetch_registers (regcache *regcache, int regno)
5586 {
5587 int use_regsets;
5588 int all = 0;
5589 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5590
5591 if (regno == -1)
5592 {
5593 if (the_low_target.fetch_register != NULL
5594 && regs_info->usrregs != NULL)
5595 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5596 (*the_low_target.fetch_register) (regcache, regno);
5597
5598 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5599 if (regs_info->usrregs != NULL)
5600 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5601 }
5602 else
5603 {
5604 if (the_low_target.fetch_register != NULL
5605 && (*the_low_target.fetch_register) (regcache, regno))
5606 return;
5607
5608 use_regsets = linux_register_in_regsets (regs_info, regno);
5609 if (use_regsets)
5610 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5611 regcache);
5612 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5613 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5614 }
5615 }
5616
5617 void
5618 linux_process_target::store_registers (regcache *regcache, int regno)
5619 {
5620 int use_regsets;
5621 int all = 0;
5622 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5623
5624 if (regno == -1)
5625 {
5626 all = regsets_store_inferior_registers (regs_info->regsets_info,
5627 regcache);
5628 if (regs_info->usrregs != NULL)
5629 usr_store_inferior_registers (regs_info, regcache, regno, all);
5630 }
5631 else
5632 {
5633 use_regsets = linux_register_in_regsets (regs_info, regno);
5634 if (use_regsets)
5635 all = regsets_store_inferior_registers (regs_info->regsets_info,
5636 regcache);
5637 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5638 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5639 }
5640 }
5641
5642
5643 /* A wrapper for the read_memory target op. */
5644
5645 static int
5646 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5647 {
5648 return the_target->read_memory (memaddr, myaddr, len);
5649 }
5650
5651 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5652 to debugger memory starting at MYADDR. */
5653
5654 int
5655 linux_process_target::read_memory (CORE_ADDR memaddr,
5656 unsigned char *myaddr, int len)
5657 {
5658 int pid = lwpid_of (current_thread);
5659 PTRACE_XFER_TYPE *buffer;
5660 CORE_ADDR addr;
5661 int count;
5662 char filename[64];
5663 int i;
5664 int ret;
5665 int fd;
5666
5667 /* Try using /proc. Don't bother for one word. */
5668 if (len >= 3 * sizeof (long))
5669 {
5670 int bytes;
5671
5672 /* We could keep this file open and cache it - possibly one per
5673 thread. That requires some juggling, but is even faster. */
5674 sprintf (filename, "/proc/%d/mem", pid);
5675 fd = open (filename, O_RDONLY | O_LARGEFILE);
5676 if (fd == -1)
5677 goto no_proc;
5678
5679 /* If pread64 is available, use it. It's faster if the kernel
5680 supports it (only one syscall), and it's 64-bit safe even on
5681 32-bit platforms (for instance, SPARC debugging a SPARC64
5682 application). */
5683 #ifdef HAVE_PREAD64
5684 bytes = pread64 (fd, myaddr, len, memaddr);
5685 #else
5686 bytes = -1;
5687 if (lseek (fd, memaddr, SEEK_SET) != -1)
5688 bytes = read (fd, myaddr, len);
5689 #endif
5690
5691 close (fd);
5692 if (bytes == len)
5693 return 0;
5694
5695 /* Some data was read, we'll try to get the rest with ptrace. */
5696 if (bytes > 0)
5697 {
5698 memaddr += bytes;
5699 myaddr += bytes;
5700 len -= bytes;
5701 }
5702 }
5703
5704 no_proc:
5705 /* Round starting address down to longword boundary. */
5706 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5707 /* Round ending address up; get number of longwords that makes. */
5708 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5709 / sizeof (PTRACE_XFER_TYPE));
5710 /* Allocate buffer of that many longwords. */
5711 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5712
5713 /* Read all the longwords */
5714 errno = 0;
5715 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5716 {
5717 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5718 about coercing an 8 byte integer to a 4 byte pointer. */
5719 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5720 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5721 (PTRACE_TYPE_ARG4) 0);
5722 if (errno)
5723 break;
5724 }
5725 ret = errno;
5726
5727 /* Copy appropriate bytes out of the buffer. */
5728 if (i > 0)
5729 {
5730 i *= sizeof (PTRACE_XFER_TYPE);
5731 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5732 memcpy (myaddr,
5733 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5734 i < len ? i : len);
5735 }
5736
5737 return ret;
5738 }
5739
5740 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5741 memory at MEMADDR. On failure (cannot write to the inferior)
5742 returns the value of errno. Always succeeds if LEN is zero. */
5743
5744 int
5745 linux_process_target::write_memory (CORE_ADDR memaddr,
5746 const unsigned char *myaddr, int len)
5747 {
5748 int i;
5749 /* Round starting address down to longword boundary. */
5750 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5751 /* Round ending address up; get number of longwords that makes. */
5752 int count
5753 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5754 / sizeof (PTRACE_XFER_TYPE);
5755
5756 /* Allocate buffer of that many longwords. */
5757 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5758
5759 int pid = lwpid_of (current_thread);
5760
5761 if (len == 0)
5762 {
5763 /* Zero length write always succeeds. */
5764 return 0;
5765 }
5766
5767 if (debug_threads)
5768 {
5769 /* Dump up to four bytes. */
5770 char str[4 * 2 + 1];
5771 char *p = str;
5772 int dump = len < 4 ? len : 4;
5773
5774 for (i = 0; i < dump; i++)
5775 {
5776 sprintf (p, "%02x", myaddr[i]);
5777 p += 2;
5778 }
5779 *p = '\0';
5780
5781 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5782 str, (long) memaddr, pid);
5783 }
5784
5785 /* Fill start and end extra bytes of buffer with existing memory data. */
5786
5787 errno = 0;
5788 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5789 about coercing an 8 byte integer to a 4 byte pointer. */
5790 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5791 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5792 (PTRACE_TYPE_ARG4) 0);
5793 if (errno)
5794 return errno;
5795
5796 if (count > 1)
5797 {
5798 errno = 0;
5799 buffer[count - 1]
5800 = ptrace (PTRACE_PEEKTEXT, pid,
5801 /* Coerce to a uintptr_t first to avoid potential gcc warning
5802 about coercing an 8 byte integer to a 4 byte pointer. */
5803 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5804 * sizeof (PTRACE_XFER_TYPE)),
5805 (PTRACE_TYPE_ARG4) 0);
5806 if (errno)
5807 return errno;
5808 }
5809
5810 /* Copy data to be written over corresponding part of buffer. */
5811
5812 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5813 myaddr, len);
5814
5815 /* Write the entire buffer. */
5816
5817 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5818 {
5819 errno = 0;
5820 ptrace (PTRACE_POKETEXT, pid,
5821 /* Coerce to a uintptr_t first to avoid potential gcc warning
5822 about coercing an 8 byte integer to a 4 byte pointer. */
5823 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5824 (PTRACE_TYPE_ARG4) buffer[i]);
5825 if (errno)
5826 return errno;
5827 }
5828
5829 return 0;
5830 }
5831
5832 void
5833 linux_process_target::look_up_symbols ()
5834 {
5835 #ifdef USE_THREAD_DB
5836 struct process_info *proc = current_process ();
5837
5838 if (proc->priv->thread_db != NULL)
5839 return;
5840
5841 thread_db_init ();
5842 #endif
5843 }
5844
5845 void
5846 linux_process_target::request_interrupt ()
5847 {
5848 /* Send a SIGINT to the process group. This acts just like the user
5849 typed a ^C on the controlling terminal. */
5850 ::kill (-signal_pid, SIGINT);
5851 }
5852
5853 bool
5854 linux_process_target::supports_read_auxv ()
5855 {
5856 return true;
5857 }
5858
5859 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5860 to debugger memory starting at MYADDR. */
5861
5862 int
5863 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5864 unsigned int len)
5865 {
5866 char filename[PATH_MAX];
5867 int fd, n;
5868 int pid = lwpid_of (current_thread);
5869
5870 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5871
5872 fd = open (filename, O_RDONLY);
5873 if (fd < 0)
5874 return -1;
5875
5876 if (offset != (CORE_ADDR) 0
5877 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5878 n = -1;
5879 else
5880 n = read (fd, myaddr, len);
5881
5882 close (fd);
5883
5884 return n;
5885 }
5886
5887 /* These breakpoint and watchpoint related wrapper functions simply
5888 pass on the function call if the target has registered a
5889 corresponding function. */
5890
5891 bool
5892 linux_process_target::supports_z_point_type (char z_type)
5893 {
5894 return (the_low_target.supports_z_point_type != NULL
5895 && the_low_target.supports_z_point_type (z_type));
5896 }
5897
5898 int
5899 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5900 int size, raw_breakpoint *bp)
5901 {
5902 if (type == raw_bkpt_type_sw)
5903 return insert_memory_breakpoint (bp);
5904 else if (the_low_target.insert_point != NULL)
5905 return the_low_target.insert_point (type, addr, size, bp);
5906 else
5907 /* Unsupported (see target.h). */
5908 return 1;
5909 }
5910
5911 int
5912 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5913 int size, raw_breakpoint *bp)
5914 {
5915 if (type == raw_bkpt_type_sw)
5916 return remove_memory_breakpoint (bp);
5917 else if (the_low_target.remove_point != NULL)
5918 return the_low_target.remove_point (type, addr, size, bp);
5919 else
5920 /* Unsupported (see target.h). */
5921 return 1;
5922 }
5923
5924 /* Implement the stopped_by_sw_breakpoint target_ops
5925 method. */
5926
5927 bool
5928 linux_process_target::stopped_by_sw_breakpoint ()
5929 {
5930 struct lwp_info *lwp = get_thread_lwp (current_thread);
5931
5932 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5933 }
5934
5935 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5936 method. */
5937
5938 bool
5939 linux_process_target::supports_stopped_by_sw_breakpoint ()
5940 {
5941 return USE_SIGTRAP_SIGINFO;
5942 }
5943
5944 /* Implement the stopped_by_hw_breakpoint target_ops
5945 method. */
5946
5947 bool
5948 linux_process_target::stopped_by_hw_breakpoint ()
5949 {
5950 struct lwp_info *lwp = get_thread_lwp (current_thread);
5951
5952 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5953 }
5954
5955 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5956 method. */
5957
5958 bool
5959 linux_process_target::supports_stopped_by_hw_breakpoint ()
5960 {
5961 return USE_SIGTRAP_SIGINFO;
5962 }
5963
5964 /* Implement the supports_hardware_single_step target_ops method. */
5965
5966 bool
5967 linux_process_target::supports_hardware_single_step ()
5968 {
5969 return can_hardware_single_step ();
5970 }
5971
5972 bool
5973 linux_process_target::supports_software_single_step ()
5974 {
5975 return can_software_single_step ();
5976 }
5977
5978 bool
5979 linux_process_target::stopped_by_watchpoint ()
5980 {
5981 struct lwp_info *lwp = get_thread_lwp (current_thread);
5982
5983 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5984 }
5985
5986 CORE_ADDR
5987 linux_process_target::stopped_data_address ()
5988 {
5989 struct lwp_info *lwp = get_thread_lwp (current_thread);
5990
5991 return lwp->stopped_data_address;
5992 }
5993
5994 /* This is only used for targets that define PT_TEXT_ADDR,
5995 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5996 the target has different ways of acquiring this information, like
5997 loadmaps. */
5998
5999 bool
6000 linux_process_target::supports_read_offsets ()
6001 {
6002 #ifdef SUPPORTS_READ_OFFSETS
6003 return true;
6004 #else
6005 return false;
6006 #endif
6007 }
6008
6009 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6010 to tell gdb about. */
6011
6012 int
6013 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6014 {
6015 #ifdef SUPPORTS_READ_OFFSETS
6016 unsigned long text, text_end, data;
6017 int pid = lwpid_of (current_thread);
6018
6019 errno = 0;
6020
6021 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6022 (PTRACE_TYPE_ARG4) 0);
6023 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6024 (PTRACE_TYPE_ARG4) 0);
6025 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6026 (PTRACE_TYPE_ARG4) 0);
6027
6028 if (errno == 0)
6029 {
6030 /* Both text and data offsets produced at compile-time (and so
6031 used by gdb) are relative to the beginning of the program,
6032 with the data segment immediately following the text segment.
6033 However, the actual runtime layout in memory may put the data
6034 somewhere else, so when we send gdb a data base-address, we
6035 use the real data base address and subtract the compile-time
6036 data base-address from it (which is just the length of the
6037 text segment). BSS immediately follows data in both
6038 cases. */
6039 *text_p = text;
6040 *data_p = data - (text_end - text);
6041
6042 return 1;
6043 }
6044 return 0;
6045 #else
6046 gdb_assert_not_reached ("target op read_offsets not supported");
6047 #endif
6048 }
6049
6050 bool
6051 linux_process_target::supports_get_tls_address ()
6052 {
6053 #ifdef USE_THREAD_DB
6054 return true;
6055 #else
6056 return false;
6057 #endif
6058 }
6059
6060 int
6061 linux_process_target::get_tls_address (thread_info *thread,
6062 CORE_ADDR offset,
6063 CORE_ADDR load_module,
6064 CORE_ADDR *address)
6065 {
6066 #ifdef USE_THREAD_DB
6067 return thread_db_get_tls_address (thread, offset, load_module, address);
6068 #else
6069 return -1;
6070 #endif
6071 }
6072
6073 bool
6074 linux_process_target::supports_qxfer_osdata ()
6075 {
6076 return true;
6077 }
6078
6079 int
6080 linux_process_target::qxfer_osdata (const char *annex,
6081 unsigned char *readbuf,
6082 unsigned const char *writebuf,
6083 CORE_ADDR offset, int len)
6084 {
6085 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6086 }
6087
6088 /* Convert a native/host siginfo object, into/from the siginfo in the
6089 layout of the inferiors' architecture. */
6090
6091 static void
6092 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6093 {
6094 int done = 0;
6095
6096 if (the_low_target.siginfo_fixup != NULL)
6097 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6098
6099 /* If there was no callback, or the callback didn't do anything,
6100 then just do a straight memcpy. */
6101 if (!done)
6102 {
6103 if (direction == 1)
6104 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6105 else
6106 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6107 }
6108 }
6109
6110 bool
6111 linux_process_target::supports_qxfer_siginfo ()
6112 {
6113 return true;
6114 }
6115
6116 int
6117 linux_process_target::qxfer_siginfo (const char *annex,
6118 unsigned char *readbuf,
6119 unsigned const char *writebuf,
6120 CORE_ADDR offset, int len)
6121 {
6122 int pid;
6123 siginfo_t siginfo;
6124 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6125
6126 if (current_thread == NULL)
6127 return -1;
6128
6129 pid = lwpid_of (current_thread);
6130
6131 if (debug_threads)
6132 debug_printf ("%s siginfo for lwp %d.\n",
6133 readbuf != NULL ? "Reading" : "Writing",
6134 pid);
6135
6136 if (offset >= sizeof (siginfo))
6137 return -1;
6138
6139 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6140 return -1;
6141
6142 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6143 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6144 inferior with a 64-bit GDBSERVER should look the same as debugging it
6145 with a 32-bit GDBSERVER, we need to convert it. */
6146 siginfo_fixup (&siginfo, inf_siginfo, 0);
6147
6148 if (offset + len > sizeof (siginfo))
6149 len = sizeof (siginfo) - offset;
6150
6151 if (readbuf != NULL)
6152 memcpy (readbuf, inf_siginfo + offset, len);
6153 else
6154 {
6155 memcpy (inf_siginfo + offset, writebuf, len);
6156
6157 /* Convert back to ptrace layout before flushing it out. */
6158 siginfo_fixup (&siginfo, inf_siginfo, 1);
6159
6160 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6161 return -1;
6162 }
6163
6164 return len;
6165 }
6166
6167 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6168 so we notice when children change state; as the handler for the
6169 sigsuspend in my_waitpid. */
6170
6171 static void
6172 sigchld_handler (int signo)
6173 {
6174 int old_errno = errno;
6175
6176 if (debug_threads)
6177 {
6178 do
6179 {
6180 /* Use the async signal safe debug function. */
6181 if (debug_write ("sigchld_handler\n",
6182 sizeof ("sigchld_handler\n") - 1) < 0)
6183 break; /* just ignore */
6184 } while (0);
6185 }
6186
6187 if (target_is_async_p ())
6188 async_file_mark (); /* trigger a linux_wait */
6189
6190 errno = old_errno;
6191 }
6192
6193 bool
6194 linux_process_target::supports_non_stop ()
6195 {
6196 return true;
6197 }
6198
6199 bool
6200 linux_process_target::async (bool enable)
6201 {
6202 bool previous = target_is_async_p ();
6203
6204 if (debug_threads)
6205 debug_printf ("linux_async (%d), previous=%d\n",
6206 enable, previous);
6207
6208 if (previous != enable)
6209 {
6210 sigset_t mask;
6211 sigemptyset (&mask);
6212 sigaddset (&mask, SIGCHLD);
6213
6214 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6215
6216 if (enable)
6217 {
6218 if (pipe (linux_event_pipe) == -1)
6219 {
6220 linux_event_pipe[0] = -1;
6221 linux_event_pipe[1] = -1;
6222 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6223
6224 warning ("creating event pipe failed.");
6225 return previous;
6226 }
6227
6228 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6229 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6230
6231 /* Register the event loop handler. */
6232 add_file_handler (linux_event_pipe[0],
6233 handle_target_event, NULL);
6234
6235 /* Always trigger a linux_wait. */
6236 async_file_mark ();
6237 }
6238 else
6239 {
6240 delete_file_handler (linux_event_pipe[0]);
6241
6242 close (linux_event_pipe[0]);
6243 close (linux_event_pipe[1]);
6244 linux_event_pipe[0] = -1;
6245 linux_event_pipe[1] = -1;
6246 }
6247
6248 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6249 }
6250
6251 return previous;
6252 }
6253
6254 int
6255 linux_process_target::start_non_stop (bool nonstop)
6256 {
6257 /* Register or unregister from event-loop accordingly. */
6258 target_async (nonstop);
6259
6260 if (target_is_async_p () != (nonstop != false))
6261 return -1;
6262
6263 return 0;
6264 }
6265
6266 bool
6267 linux_process_target::supports_multi_process ()
6268 {
6269 return true;
6270 }
6271
6272 /* Check if fork events are supported. */
6273
6274 bool
6275 linux_process_target::supports_fork_events ()
6276 {
6277 return linux_supports_tracefork ();
6278 }
6279
6280 /* Check if vfork events are supported. */
6281
6282 bool
6283 linux_process_target::supports_vfork_events ()
6284 {
6285 return linux_supports_tracefork ();
6286 }
6287
6288 /* Check if exec events are supported. */
6289
6290 bool
6291 linux_process_target::supports_exec_events ()
6292 {
6293 return linux_supports_traceexec ();
6294 }
6295
6296 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6297 ptrace flags for all inferiors. This is in case the new GDB connection
6298 doesn't support the same set of events that the previous one did. */
6299
6300 void
6301 linux_process_target::handle_new_gdb_connection ()
6302 {
6303 /* Request that all the lwps reset their ptrace options. */
6304 for_each_thread ([] (thread_info *thread)
6305 {
6306 struct lwp_info *lwp = get_thread_lwp (thread);
6307
6308 if (!lwp->stopped)
6309 {
6310 /* Stop the lwp so we can modify its ptrace options. */
6311 lwp->must_set_ptrace_flags = 1;
6312 linux_stop_lwp (lwp);
6313 }
6314 else
6315 {
6316 /* Already stopped; go ahead and set the ptrace options. */
6317 struct process_info *proc = find_process_pid (pid_of (thread));
6318 int options = linux_low_ptrace_options (proc->attached);
6319
6320 linux_enable_event_reporting (lwpid_of (thread), options);
6321 lwp->must_set_ptrace_flags = 0;
6322 }
6323 });
6324 }
6325
6326 int
6327 linux_process_target::handle_monitor_command (char *mon)
6328 {
6329 #ifdef USE_THREAD_DB
6330 return thread_db_handle_monitor_command (mon);
6331 #else
6332 return 0;
6333 #endif
6334 }
6335
6336 int
6337 linux_process_target::core_of_thread (ptid_t ptid)
6338 {
6339 return linux_common_core_of_thread (ptid);
6340 }
6341
6342 bool
6343 linux_process_target::supports_disable_randomization ()
6344 {
6345 #ifdef HAVE_PERSONALITY
6346 return true;
6347 #else
6348 return false;
6349 #endif
6350 }
6351
6352 bool
6353 linux_process_target::supports_agent ()
6354 {
6355 return true;
6356 }
6357
6358 bool
6359 linux_process_target::supports_range_stepping ()
6360 {
6361 if (can_software_single_step ())
6362 return true;
6363 if (*the_low_target.supports_range_stepping == NULL)
6364 return false;
6365
6366 return (*the_low_target.supports_range_stepping) ();
6367 }
6368
6369 bool
6370 linux_process_target::supports_pid_to_exec_file ()
6371 {
6372 return true;
6373 }
6374
6375 char *
6376 linux_process_target::pid_to_exec_file (int pid)
6377 {
6378 return linux_proc_pid_to_exec_file (pid);
6379 }
6380
6381 bool
6382 linux_process_target::supports_multifs ()
6383 {
6384 return true;
6385 }
6386
6387 int
6388 linux_process_target::multifs_open (int pid, const char *filename,
6389 int flags, mode_t mode)
6390 {
6391 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6392 }
6393
6394 int
6395 linux_process_target::multifs_unlink (int pid, const char *filename)
6396 {
6397 return linux_mntns_unlink (pid, filename);
6398 }
6399
6400 ssize_t
6401 linux_process_target::multifs_readlink (int pid, const char *filename,
6402 char *buf, size_t bufsiz)
6403 {
6404 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6405 }
6406
6407 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6408 struct target_loadseg
6409 {
6410 /* Core address to which the segment is mapped. */
6411 Elf32_Addr addr;
6412 /* VMA recorded in the program header. */
6413 Elf32_Addr p_vaddr;
6414 /* Size of this segment in memory. */
6415 Elf32_Word p_memsz;
6416 };
6417
6418 # if defined PT_GETDSBT
6419 struct target_loadmap
6420 {
6421 /* Protocol version number, must be zero. */
6422 Elf32_Word version;
6423 /* Pointer to the DSBT table, its size, and the DSBT index. */
6424 unsigned *dsbt_table;
6425 unsigned dsbt_size, dsbt_index;
6426 /* Number of segments in this map. */
6427 Elf32_Word nsegs;
6428 /* The actual memory map. */
6429 struct target_loadseg segs[/*nsegs*/];
6430 };
6431 # define LINUX_LOADMAP PT_GETDSBT
6432 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6433 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6434 # else
6435 struct target_loadmap
6436 {
6437 /* Protocol version number, must be zero. */
6438 Elf32_Half version;
6439 /* Number of segments in this map. */
6440 Elf32_Half nsegs;
6441 /* The actual memory map. */
6442 struct target_loadseg segs[/*nsegs*/];
6443 };
6444 # define LINUX_LOADMAP PTRACE_GETFDPIC
6445 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6446 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6447 # endif
6448
6449 bool
6450 linux_process_target::supports_read_loadmap ()
6451 {
6452 return true;
6453 }
6454
6455 int
6456 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6457 unsigned char *myaddr, unsigned int len)
6458 {
6459 int pid = lwpid_of (current_thread);
6460 int addr = -1;
6461 struct target_loadmap *data = NULL;
6462 unsigned int actual_length, copy_length;
6463
6464 if (strcmp (annex, "exec") == 0)
6465 addr = (int) LINUX_LOADMAP_EXEC;
6466 else if (strcmp (annex, "interp") == 0)
6467 addr = (int) LINUX_LOADMAP_INTERP;
6468 else
6469 return -1;
6470
6471 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6472 return -1;
6473
6474 if (data == NULL)
6475 return -1;
6476
6477 actual_length = sizeof (struct target_loadmap)
6478 + sizeof (struct target_loadseg) * data->nsegs;
6479
6480 if (offset < 0 || offset > actual_length)
6481 return -1;
6482
6483 copy_length = actual_length - offset < len ? actual_length - offset : len;
6484 memcpy (myaddr, (char *) data + offset, copy_length);
6485 return copy_length;
6486 }
6487 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6488
6489 void
6490 linux_process_target::process_qsupported (char **features, int count)
6491 {
6492 if (the_low_target.process_qsupported != NULL)
6493 the_low_target.process_qsupported (features, count);
6494 }
6495
6496 bool
6497 linux_process_target::supports_catch_syscall ()
6498 {
6499 return (the_low_target.get_syscall_trapinfo != NULL
6500 && linux_supports_tracesysgood ());
6501 }
6502
6503 int
6504 linux_process_target::get_ipa_tdesc_idx ()
6505 {
6506 if (the_low_target.get_ipa_tdesc_idx == NULL)
6507 return 0;
6508
6509 return (*the_low_target.get_ipa_tdesc_idx) ();
6510 }
6511
6512 bool
6513 linux_process_target::supports_tracepoints ()
6514 {
6515 if (*the_low_target.supports_tracepoints == NULL)
6516 return false;
6517
6518 return (*the_low_target.supports_tracepoints) ();
6519 }
6520
6521 CORE_ADDR
6522 linux_process_target::read_pc (regcache *regcache)
6523 {
6524 if (the_low_target.get_pc == NULL)
6525 return 0;
6526
6527 return (*the_low_target.get_pc) (regcache);
6528 }
6529
6530 void
6531 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6532 {
6533 gdb_assert (the_low_target.set_pc != NULL);
6534
6535 (*the_low_target.set_pc) (regcache, pc);
6536 }
6537
6538 bool
6539 linux_process_target::supports_thread_stopped ()
6540 {
6541 return true;
6542 }
6543
6544 bool
6545 linux_process_target::thread_stopped (thread_info *thread)
6546 {
6547 return get_thread_lwp (thread)->stopped;
6548 }
6549
6550 /* This exposes stop-all-threads functionality to other modules. */
6551
6552 void
6553 linux_process_target::pause_all (bool freeze)
6554 {
6555 stop_all_lwps (freeze, NULL);
6556 }
6557
6558 /* This exposes unstop-all-threads functionality to other gdbserver
6559 modules. */
6560
6561 void
6562 linux_process_target::unpause_all (bool unfreeze)
6563 {
6564 unstop_all_lwps (unfreeze, NULL);
6565 }
6566
6567 int
6568 linux_process_target::prepare_to_access_memory ()
6569 {
6570 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6571 running LWP. */
6572 if (non_stop)
6573 target_pause_all (true);
6574 return 0;
6575 }
6576
6577 void
6578 linux_process_target::done_accessing_memory ()
6579 {
6580 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6581 running LWP. */
6582 if (non_stop)
6583 target_unpause_all (true);
6584 }
6585
6586 bool
6587 linux_process_target::supports_fast_tracepoints ()
6588 {
6589 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6590 }
6591
6592 int
6593 linux_process_target::install_fast_tracepoint_jump_pad
6594 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6595 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6596 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6597 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6598 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6599 char *err)
6600 {
6601 return (*the_low_target.install_fast_tracepoint_jump_pad)
6602 (tpoint, tpaddr, collector, lockaddr, orig_size,
6603 jump_entry, trampoline, trampoline_size,
6604 jjump_pad_insn, jjump_pad_insn_size,
6605 adjusted_insn_addr, adjusted_insn_addr_end,
6606 err);
6607 }
6608
6609 emit_ops *
6610 linux_process_target::emit_ops ()
6611 {
6612 if (the_low_target.emit_ops != NULL)
6613 return (*the_low_target.emit_ops) ();
6614 else
6615 return NULL;
6616 }
6617
6618 int
6619 linux_process_target::get_min_fast_tracepoint_insn_len ()
6620 {
6621 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6622 }
6623
6624 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6625
6626 static int
6627 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6628 CORE_ADDR *phdr_memaddr, int *num_phdr)
6629 {
6630 char filename[PATH_MAX];
6631 int fd;
6632 const int auxv_size = is_elf64
6633 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6634 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6635
6636 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6637
6638 fd = open (filename, O_RDONLY);
6639 if (fd < 0)
6640 return 1;
6641
6642 *phdr_memaddr = 0;
6643 *num_phdr = 0;
6644 while (read (fd, buf, auxv_size) == auxv_size
6645 && (*phdr_memaddr == 0 || *num_phdr == 0))
6646 {
6647 if (is_elf64)
6648 {
6649 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6650
6651 switch (aux->a_type)
6652 {
6653 case AT_PHDR:
6654 *phdr_memaddr = aux->a_un.a_val;
6655 break;
6656 case AT_PHNUM:
6657 *num_phdr = aux->a_un.a_val;
6658 break;
6659 }
6660 }
6661 else
6662 {
6663 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6664
6665 switch (aux->a_type)
6666 {
6667 case AT_PHDR:
6668 *phdr_memaddr = aux->a_un.a_val;
6669 break;
6670 case AT_PHNUM:
6671 *num_phdr = aux->a_un.a_val;
6672 break;
6673 }
6674 }
6675 }
6676
6677 close (fd);
6678
6679 if (*phdr_memaddr == 0 || *num_phdr == 0)
6680 {
6681 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6682 "phdr_memaddr = %ld, phdr_num = %d",
6683 (long) *phdr_memaddr, *num_phdr);
6684 return 2;
6685 }
6686
6687 return 0;
6688 }
6689
6690 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6691
6692 static CORE_ADDR
6693 get_dynamic (const int pid, const int is_elf64)
6694 {
6695 CORE_ADDR phdr_memaddr, relocation;
6696 int num_phdr, i;
6697 unsigned char *phdr_buf;
6698 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6699
6700 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6701 return 0;
6702
6703 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6704 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6705
6706 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6707 return 0;
6708
6709 /* Compute relocation: it is expected to be 0 for "regular" executables,
6710 non-zero for PIE ones. */
6711 relocation = -1;
6712 for (i = 0; relocation == -1 && i < num_phdr; i++)
6713 if (is_elf64)
6714 {
6715 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6716
6717 if (p->p_type == PT_PHDR)
6718 relocation = phdr_memaddr - p->p_vaddr;
6719 }
6720 else
6721 {
6722 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6723
6724 if (p->p_type == PT_PHDR)
6725 relocation = phdr_memaddr - p->p_vaddr;
6726 }
6727
6728 if (relocation == -1)
6729 {
6730 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6731 any real world executables, including PIE executables, have always
6732 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6733 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6734 or present DT_DEBUG anyway (fpc binaries are statically linked).
6735
6736 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6737
6738 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6739
6740 return 0;
6741 }
6742
6743 for (i = 0; i < num_phdr; i++)
6744 {
6745 if (is_elf64)
6746 {
6747 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6748
6749 if (p->p_type == PT_DYNAMIC)
6750 return p->p_vaddr + relocation;
6751 }
6752 else
6753 {
6754 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6755
6756 if (p->p_type == PT_DYNAMIC)
6757 return p->p_vaddr + relocation;
6758 }
6759 }
6760
6761 return 0;
6762 }
6763
6764 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6765 can be 0 if the inferior does not yet have the library list initialized.
6766 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6767 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6768
6769 static CORE_ADDR
6770 get_r_debug (const int pid, const int is_elf64)
6771 {
6772 CORE_ADDR dynamic_memaddr;
6773 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6774 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6775 CORE_ADDR map = -1;
6776
6777 dynamic_memaddr = get_dynamic (pid, is_elf64);
6778 if (dynamic_memaddr == 0)
6779 return map;
6780
6781 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6782 {
6783 if (is_elf64)
6784 {
6785 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6786 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6787 union
6788 {
6789 Elf64_Xword map;
6790 unsigned char buf[sizeof (Elf64_Xword)];
6791 }
6792 rld_map;
6793 #endif
6794 #ifdef DT_MIPS_RLD_MAP
6795 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6796 {
6797 if (linux_read_memory (dyn->d_un.d_val,
6798 rld_map.buf, sizeof (rld_map.buf)) == 0)
6799 return rld_map.map;
6800 else
6801 break;
6802 }
6803 #endif /* DT_MIPS_RLD_MAP */
6804 #ifdef DT_MIPS_RLD_MAP_REL
6805 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6806 {
6807 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6808 rld_map.buf, sizeof (rld_map.buf)) == 0)
6809 return rld_map.map;
6810 else
6811 break;
6812 }
6813 #endif /* DT_MIPS_RLD_MAP_REL */
6814
6815 if (dyn->d_tag == DT_DEBUG && map == -1)
6816 map = dyn->d_un.d_val;
6817
6818 if (dyn->d_tag == DT_NULL)
6819 break;
6820 }
6821 else
6822 {
6823 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6824 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6825 union
6826 {
6827 Elf32_Word map;
6828 unsigned char buf[sizeof (Elf32_Word)];
6829 }
6830 rld_map;
6831 #endif
6832 #ifdef DT_MIPS_RLD_MAP
6833 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6834 {
6835 if (linux_read_memory (dyn->d_un.d_val,
6836 rld_map.buf, sizeof (rld_map.buf)) == 0)
6837 return rld_map.map;
6838 else
6839 break;
6840 }
6841 #endif /* DT_MIPS_RLD_MAP */
6842 #ifdef DT_MIPS_RLD_MAP_REL
6843 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6844 {
6845 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6846 rld_map.buf, sizeof (rld_map.buf)) == 0)
6847 return rld_map.map;
6848 else
6849 break;
6850 }
6851 #endif /* DT_MIPS_RLD_MAP_REL */
6852
6853 if (dyn->d_tag == DT_DEBUG && map == -1)
6854 map = dyn->d_un.d_val;
6855
6856 if (dyn->d_tag == DT_NULL)
6857 break;
6858 }
6859
6860 dynamic_memaddr += dyn_size;
6861 }
6862
6863 return map;
6864 }
6865
6866 /* Read one pointer from MEMADDR in the inferior. */
6867
6868 static int
6869 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6870 {
6871 int ret;
6872
6873 /* Go through a union so this works on either big or little endian
6874 hosts, when the inferior's pointer size is smaller than the size
6875 of CORE_ADDR. It is assumed the inferior's endianness is the
6876 same of the superior's. */
6877 union
6878 {
6879 CORE_ADDR core_addr;
6880 unsigned int ui;
6881 unsigned char uc;
6882 } addr;
6883
6884 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6885 if (ret == 0)
6886 {
6887 if (ptr_size == sizeof (CORE_ADDR))
6888 *ptr = addr.core_addr;
6889 else if (ptr_size == sizeof (unsigned int))
6890 *ptr = addr.ui;
6891 else
6892 gdb_assert_not_reached ("unhandled pointer size");
6893 }
6894 return ret;
6895 }
6896
6897 bool
6898 linux_process_target::supports_qxfer_libraries_svr4 ()
6899 {
6900 return true;
6901 }
6902
6903 struct link_map_offsets
6904 {
6905 /* Offset and size of r_debug.r_version. */
6906 int r_version_offset;
6907
6908 /* Offset and size of r_debug.r_map. */
6909 int r_map_offset;
6910
6911 /* Offset to l_addr field in struct link_map. */
6912 int l_addr_offset;
6913
6914 /* Offset to l_name field in struct link_map. */
6915 int l_name_offset;
6916
6917 /* Offset to l_ld field in struct link_map. */
6918 int l_ld_offset;
6919
6920 /* Offset to l_next field in struct link_map. */
6921 int l_next_offset;
6922
6923 /* Offset to l_prev field in struct link_map. */
6924 int l_prev_offset;
6925 };
6926
6927 /* Construct qXfer:libraries-svr4:read reply. */
6928
6929 int
6930 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6931 unsigned char *readbuf,
6932 unsigned const char *writebuf,
6933 CORE_ADDR offset, int len)
6934 {
6935 struct process_info_private *const priv = current_process ()->priv;
6936 char filename[PATH_MAX];
6937 int pid, is_elf64;
6938
6939 static const struct link_map_offsets lmo_32bit_offsets =
6940 {
6941 0, /* r_version offset. */
6942 4, /* r_debug.r_map offset. */
6943 0, /* l_addr offset in link_map. */
6944 4, /* l_name offset in link_map. */
6945 8, /* l_ld offset in link_map. */
6946 12, /* l_next offset in link_map. */
6947 16 /* l_prev offset in link_map. */
6948 };
6949
6950 static const struct link_map_offsets lmo_64bit_offsets =
6951 {
6952 0, /* r_version offset. */
6953 8, /* r_debug.r_map offset. */
6954 0, /* l_addr offset in link_map. */
6955 8, /* l_name offset in link_map. */
6956 16, /* l_ld offset in link_map. */
6957 24, /* l_next offset in link_map. */
6958 32 /* l_prev offset in link_map. */
6959 };
6960 const struct link_map_offsets *lmo;
6961 unsigned int machine;
6962 int ptr_size;
6963 CORE_ADDR lm_addr = 0, lm_prev = 0;
6964 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6965 int header_done = 0;
6966
6967 if (writebuf != NULL)
6968 return -2;
6969 if (readbuf == NULL)
6970 return -1;
6971
6972 pid = lwpid_of (current_thread);
6973 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6974 is_elf64 = elf_64_file_p (filename, &machine);
6975 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6976 ptr_size = is_elf64 ? 8 : 4;
6977
6978 while (annex[0] != '\0')
6979 {
6980 const char *sep;
6981 CORE_ADDR *addrp;
6982 int name_len;
6983
6984 sep = strchr (annex, '=');
6985 if (sep == NULL)
6986 break;
6987
6988 name_len = sep - annex;
6989 if (name_len == 5 && startswith (annex, "start"))
6990 addrp = &lm_addr;
6991 else if (name_len == 4 && startswith (annex, "prev"))
6992 addrp = &lm_prev;
6993 else
6994 {
6995 annex = strchr (sep, ';');
6996 if (annex == NULL)
6997 break;
6998 annex++;
6999 continue;
7000 }
7001
7002 annex = decode_address_to_semicolon (addrp, sep + 1);
7003 }
7004
7005 if (lm_addr == 0)
7006 {
7007 int r_version = 0;
7008
7009 if (priv->r_debug == 0)
7010 priv->r_debug = get_r_debug (pid, is_elf64);
7011
7012 /* We failed to find DT_DEBUG. Such situation will not change
7013 for this inferior - do not retry it. Report it to GDB as
7014 E01, see for the reasons at the GDB solib-svr4.c side. */
7015 if (priv->r_debug == (CORE_ADDR) -1)
7016 return -1;
7017
7018 if (priv->r_debug != 0)
7019 {
7020 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7021 (unsigned char *) &r_version,
7022 sizeof (r_version)) != 0
7023 || r_version != 1)
7024 {
7025 warning ("unexpected r_debug version %d", r_version);
7026 }
7027 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7028 &lm_addr, ptr_size) != 0)
7029 {
7030 warning ("unable to read r_map from 0x%lx",
7031 (long) priv->r_debug + lmo->r_map_offset);
7032 }
7033 }
7034 }
7035
7036 std::string document = "<library-list-svr4 version=\"1.0\"";
7037
7038 while (lm_addr
7039 && read_one_ptr (lm_addr + lmo->l_name_offset,
7040 &l_name, ptr_size) == 0
7041 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7042 &l_addr, ptr_size) == 0
7043 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7044 &l_ld, ptr_size) == 0
7045 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7046 &l_prev, ptr_size) == 0
7047 && read_one_ptr (lm_addr + lmo->l_next_offset,
7048 &l_next, ptr_size) == 0)
7049 {
7050 unsigned char libname[PATH_MAX];
7051
7052 if (lm_prev != l_prev)
7053 {
7054 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7055 (long) lm_prev, (long) l_prev);
7056 break;
7057 }
7058
7059 /* Ignore the first entry even if it has valid name as the first entry
7060 corresponds to the main executable. The first entry should not be
7061 skipped if the dynamic loader was loaded late by a static executable
7062 (see solib-svr4.c parameter ignore_first). But in such case the main
7063 executable does not have PT_DYNAMIC present and this function already
7064 exited above due to failed get_r_debug. */
7065 if (lm_prev == 0)
7066 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7067 else
7068 {
7069 /* Not checking for error because reading may stop before
7070 we've got PATH_MAX worth of characters. */
7071 libname[0] = '\0';
7072 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7073 libname[sizeof (libname) - 1] = '\0';
7074 if (libname[0] != '\0')
7075 {
7076 if (!header_done)
7077 {
7078 /* Terminate `<library-list-svr4'. */
7079 document += '>';
7080 header_done = 1;
7081 }
7082
7083 string_appendf (document, "<library name=\"");
7084 xml_escape_text_append (&document, (char *) libname);
7085 string_appendf (document, "\" lm=\"0x%lx\" "
7086 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7087 (unsigned long) lm_addr, (unsigned long) l_addr,
7088 (unsigned long) l_ld);
7089 }
7090 }
7091
7092 lm_prev = lm_addr;
7093 lm_addr = l_next;
7094 }
7095
7096 if (!header_done)
7097 {
7098 /* Empty list; terminate `<library-list-svr4'. */
7099 document += "/>";
7100 }
7101 else
7102 document += "</library-list-svr4>";
7103
7104 int document_len = document.length ();
7105 if (offset < document_len)
7106 document_len -= offset;
7107 else
7108 document_len = 0;
7109 if (len > document_len)
7110 len = document_len;
7111
7112 memcpy (readbuf, document.data () + offset, len);
7113
7114 return len;
7115 }
7116
7117 #ifdef HAVE_LINUX_BTRACE
7118
7119 btrace_target_info *
7120 linux_process_target::enable_btrace (ptid_t ptid,
7121 const btrace_config *conf)
7122 {
7123 return linux_enable_btrace (ptid, conf);
7124 }
7125
7126 /* See to_disable_btrace target method. */
7127
7128 int
7129 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7130 {
7131 enum btrace_error err;
7132
7133 err = linux_disable_btrace (tinfo);
7134 return (err == BTRACE_ERR_NONE ? 0 : -1);
7135 }
7136
7137 /* Encode an Intel Processor Trace configuration. */
7138
7139 static void
7140 linux_low_encode_pt_config (struct buffer *buffer,
7141 const struct btrace_data_pt_config *config)
7142 {
7143 buffer_grow_str (buffer, "<pt-config>\n");
7144
7145 switch (config->cpu.vendor)
7146 {
7147 case CV_INTEL:
7148 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7149 "model=\"%u\" stepping=\"%u\"/>\n",
7150 config->cpu.family, config->cpu.model,
7151 config->cpu.stepping);
7152 break;
7153
7154 default:
7155 break;
7156 }
7157
7158 buffer_grow_str (buffer, "</pt-config>\n");
7159 }
7160
7161 /* Encode a raw buffer. */
7162
7163 static void
7164 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7165 unsigned int size)
7166 {
7167 if (size == 0)
7168 return;
7169
7170 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7171 buffer_grow_str (buffer, "<raw>\n");
7172
7173 while (size-- > 0)
7174 {
7175 char elem[2];
7176
7177 elem[0] = tohex ((*data >> 4) & 0xf);
7178 elem[1] = tohex (*data++ & 0xf);
7179
7180 buffer_grow (buffer, elem, 2);
7181 }
7182
7183 buffer_grow_str (buffer, "</raw>\n");
7184 }
7185
7186 /* See to_read_btrace target method. */
7187
7188 int
7189 linux_process_target::read_btrace (btrace_target_info *tinfo,
7190 buffer *buffer,
7191 enum btrace_read_type type)
7192 {
7193 struct btrace_data btrace;
7194 enum btrace_error err;
7195
7196 err = linux_read_btrace (&btrace, tinfo, type);
7197 if (err != BTRACE_ERR_NONE)
7198 {
7199 if (err == BTRACE_ERR_OVERFLOW)
7200 buffer_grow_str0 (buffer, "E.Overflow.");
7201 else
7202 buffer_grow_str0 (buffer, "E.Generic Error.");
7203
7204 return -1;
7205 }
7206
7207 switch (btrace.format)
7208 {
7209 case BTRACE_FORMAT_NONE:
7210 buffer_grow_str0 (buffer, "E.No Trace.");
7211 return -1;
7212
7213 case BTRACE_FORMAT_BTS:
7214 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7215 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7216
7217 for (const btrace_block &block : *btrace.variant.bts.blocks)
7218 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7219 paddress (block.begin), paddress (block.end));
7220
7221 buffer_grow_str0 (buffer, "</btrace>\n");
7222 break;
7223
7224 case BTRACE_FORMAT_PT:
7225 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7226 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7227 buffer_grow_str (buffer, "<pt>\n");
7228
7229 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7230
7231 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7232 btrace.variant.pt.size);
7233
7234 buffer_grow_str (buffer, "</pt>\n");
7235 buffer_grow_str0 (buffer, "</btrace>\n");
7236 break;
7237
7238 default:
7239 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7240 return -1;
7241 }
7242
7243 return 0;
7244 }
7245
7246 /* See to_btrace_conf target method. */
7247
7248 int
7249 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7250 buffer *buffer)
7251 {
7252 const struct btrace_config *conf;
7253
7254 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7255 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7256
7257 conf = linux_btrace_conf (tinfo);
7258 if (conf != NULL)
7259 {
7260 switch (conf->format)
7261 {
7262 case BTRACE_FORMAT_NONE:
7263 break;
7264
7265 case BTRACE_FORMAT_BTS:
7266 buffer_xml_printf (buffer, "<bts");
7267 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7268 buffer_xml_printf (buffer, " />\n");
7269 break;
7270
7271 case BTRACE_FORMAT_PT:
7272 buffer_xml_printf (buffer, "<pt");
7273 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7274 buffer_xml_printf (buffer, "/>\n");
7275 break;
7276 }
7277 }
7278
7279 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7280 return 0;
7281 }
7282 #endif /* HAVE_LINUX_BTRACE */
7283
7284 /* See nat/linux-nat.h. */
7285
7286 ptid_t
7287 current_lwp_ptid (void)
7288 {
7289 return ptid_of (current_thread);
7290 }
7291
7292 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7293
7294 int
7295 linux_process_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7296 {
7297 if (the_low_target.breakpoint_kind_from_pc != NULL)
7298 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7299 else
7300 return process_stratum_target::breakpoint_kind_from_pc (pcptr);
7301 }
7302
7303 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7304
7305 const gdb_byte *
7306 linux_process_target::sw_breakpoint_from_kind (int kind, int *size)
7307 {
7308 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7309
7310 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7311 }
7312
7313 /* Implementation of the target_ops method
7314 "breakpoint_kind_from_current_state". */
7315
7316 int
7317 linux_process_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7318 {
7319 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7320 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7321 else
7322 return breakpoint_kind_from_pc (pcptr);
7323 }
7324
7325 const char *
7326 linux_process_target::thread_name (ptid_t thread)
7327 {
7328 return linux_proc_tid_get_name (thread);
7329 }
7330
7331 #if USE_THREAD_DB
7332 bool
7333 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7334 int *handle_len)
7335 {
7336 return thread_db_thread_handle (ptid, handle, handle_len);
7337 }
7338 #endif
7339
7340 /* Default implementation of linux_target_ops method "set_pc" for
7341 32-bit pc register which is literally named "pc". */
7342
7343 void
7344 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7345 {
7346 uint32_t newpc = pc;
7347
7348 supply_register_by_name (regcache, "pc", &newpc);
7349 }
7350
7351 /* Default implementation of linux_target_ops method "get_pc" for
7352 32-bit pc register which is literally named "pc". */
7353
7354 CORE_ADDR
7355 linux_get_pc_32bit (struct regcache *regcache)
7356 {
7357 uint32_t pc;
7358
7359 collect_register_by_name (regcache, "pc", &pc);
7360 if (debug_threads)
7361 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7362 return pc;
7363 }
7364
7365 /* Default implementation of linux_target_ops method "set_pc" for
7366 64-bit pc register which is literally named "pc". */
7367
7368 void
7369 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7370 {
7371 uint64_t newpc = pc;
7372
7373 supply_register_by_name (regcache, "pc", &newpc);
7374 }
7375
7376 /* Default implementation of linux_target_ops method "get_pc" for
7377 64-bit pc register which is literally named "pc". */
7378
7379 CORE_ADDR
7380 linux_get_pc_64bit (struct regcache *regcache)
7381 {
7382 uint64_t pc;
7383
7384 collect_register_by_name (regcache, "pc", &pc);
7385 if (debug_threads)
7386 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7387 return pc;
7388 }
7389
7390 /* See linux-low.h. */
7391
7392 int
7393 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7394 {
7395 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7396 int offset = 0;
7397
7398 gdb_assert (wordsize == 4 || wordsize == 8);
7399
7400 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7401 {
7402 if (wordsize == 4)
7403 {
7404 uint32_t *data_p = (uint32_t *) data;
7405 if (data_p[0] == match)
7406 {
7407 *valp = data_p[1];
7408 return 1;
7409 }
7410 }
7411 else
7412 {
7413 uint64_t *data_p = (uint64_t *) data;
7414 if (data_p[0] == match)
7415 {
7416 *valp = data_p[1];
7417 return 1;
7418 }
7419 }
7420
7421 offset += 2 * wordsize;
7422 }
7423
7424 return 0;
7425 }
7426
7427 /* See linux-low.h. */
7428
7429 CORE_ADDR
7430 linux_get_hwcap (int wordsize)
7431 {
7432 CORE_ADDR hwcap = 0;
7433 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7434 return hwcap;
7435 }
7436
7437 /* See linux-low.h. */
7438
7439 CORE_ADDR
7440 linux_get_hwcap2 (int wordsize)
7441 {
7442 CORE_ADDR hwcap2 = 0;
7443 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7444 return hwcap2;
7445 }
7446
7447 #ifdef HAVE_LINUX_REGSETS
7448 void
7449 initialize_regsets_info (struct regsets_info *info)
7450 {
7451 for (info->num_regsets = 0;
7452 info->regsets[info->num_regsets].size >= 0;
7453 info->num_regsets++)
7454 ;
7455 }
7456 #endif
7457
7458 void
7459 initialize_low (void)
7460 {
7461 struct sigaction sigchld_action;
7462
7463 memset (&sigchld_action, 0, sizeof (sigchld_action));
7464 set_target_ops (the_linux_target);
7465
7466 linux_ptrace_init_warnings ();
7467 linux_proc_init_warnings ();
7468
7469 sigchld_action.sa_handler = sigchld_handler;
7470 sigemptyset (&sigchld_action.sa_mask);
7471 sigchld_action.sa_flags = SA_RESTART;
7472 sigaction (SIGCHLD, &sigchld_action, NULL);
7473
7474 initialize_low_arch ();
7475
7476 linux_check_ptrace_features ();
7477 }
This page took 0.311774 seconds and 5 git commands to generate.