gdbserver: turn target op '{supports_}stopped_by_hw_breakpoint' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "gdbsupport/common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "gdbsupport/environ.h"
53 #include "gdbsupport/gdb-sigmask.h"
54 #include "gdbsupport/scoped_restore.h"
55 #ifndef ELFMAG0
56 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
57 then ELFMAG0 will have been defined. If it didn't get included by
58 gdb_proc_service.h then including it will likely introduce a duplicate
59 definition of elf_fpregset_t. */
60 #include <elf.h>
61 #endif
62 #include "nat/linux-namespaces.h"
63
64 #ifdef HAVE_PERSONALITY
65 # include <sys/personality.h>
66 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
67 # define ADDR_NO_RANDOMIZE 0x0040000
68 # endif
69 #endif
70
71 #ifndef O_LARGEFILE
72 #define O_LARGEFILE 0
73 #endif
74
75 #ifndef AT_HWCAP2
76 #define AT_HWCAP2 26
77 #endif
78
79 /* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82 #if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85 #if defined(__mcoldfire__)
86 /* These are still undefined in 3.10 kernels. */
87 #define PT_TEXT_ADDR 49*4
88 #define PT_DATA_ADDR 50*4
89 #define PT_TEXT_END_ADDR 51*4
90 /* BFIN already defines these since at least 2.6.32 kernels. */
91 #elif defined(BFIN)
92 #define PT_TEXT_ADDR 220
93 #define PT_TEXT_END_ADDR 224
94 #define PT_DATA_ADDR 228
95 /* These are still undefined in 3.10 kernels. */
96 #elif defined(__TMS320C6X__)
97 #define PT_TEXT_ADDR (0x10000*4)
98 #define PT_DATA_ADDR (0x10004*4)
99 #define PT_TEXT_END_ADDR (0x10008*4)
100 #endif
101 #endif
102
103 #ifdef HAVE_LINUX_BTRACE
104 # include "nat/linux-btrace.h"
105 # include "gdbsupport/btrace-common.h"
106 #endif
107
108 #ifndef HAVE_ELF32_AUXV_T
109 /* Copied from glibc's elf.h. */
110 typedef struct
111 {
112 uint32_t a_type; /* Entry type */
113 union
114 {
115 uint32_t a_val; /* Integer value */
116 /* We use to have pointer elements added here. We cannot do that,
117 though, since it does not work when using 32-bit definitions
118 on 64-bit platforms and vice versa. */
119 } a_un;
120 } Elf32_auxv_t;
121 #endif
122
123 #ifndef HAVE_ELF64_AUXV_T
124 /* Copied from glibc's elf.h. */
125 typedef struct
126 {
127 uint64_t a_type; /* Entry type */
128 union
129 {
130 uint64_t a_val; /* Integer value */
131 /* We use to have pointer elements added here. We cannot do that,
132 though, since it does not work when using 32-bit definitions
133 on 64-bit platforms and vice versa. */
134 } a_un;
135 } Elf64_auxv_t;
136 #endif
137
138 /* Does the current host support PTRACE_GETREGSET? */
139 int have_ptrace_getregset = -1;
140
141 /* LWP accessors. */
142
143 /* See nat/linux-nat.h. */
144
145 ptid_t
146 ptid_of_lwp (struct lwp_info *lwp)
147 {
148 return ptid_of (get_lwp_thread (lwp));
149 }
150
151 /* See nat/linux-nat.h. */
152
153 void
154 lwp_set_arch_private_info (struct lwp_info *lwp,
155 struct arch_lwp_info *info)
156 {
157 lwp->arch_private = info;
158 }
159
160 /* See nat/linux-nat.h. */
161
162 struct arch_lwp_info *
163 lwp_arch_private_info (struct lwp_info *lwp)
164 {
165 return lwp->arch_private;
166 }
167
168 /* See nat/linux-nat.h. */
169
170 int
171 lwp_is_stopped (struct lwp_info *lwp)
172 {
173 return lwp->stopped;
174 }
175
176 /* See nat/linux-nat.h. */
177
178 enum target_stop_reason
179 lwp_stop_reason (struct lwp_info *lwp)
180 {
181 return lwp->stop_reason;
182 }
183
184 /* See nat/linux-nat.h. */
185
186 int
187 lwp_is_stepping (struct lwp_info *lwp)
188 {
189 return lwp->stepping;
190 }
191
192 /* A list of all unknown processes which receive stop signals. Some
193 other process will presumably claim each of these as forked
194 children momentarily. */
195
196 struct simple_pid_list
197 {
198 /* The process ID. */
199 int pid;
200
201 /* The status as reported by waitpid. */
202 int status;
203
204 /* Next in chain. */
205 struct simple_pid_list *next;
206 };
207 struct simple_pid_list *stopped_pids;
208
209 /* Trivial list manipulation functions to keep track of a list of new
210 stopped processes. */
211
212 static void
213 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
214 {
215 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
216
217 new_pid->pid = pid;
218 new_pid->status = status;
219 new_pid->next = *listp;
220 *listp = new_pid;
221 }
222
223 static int
224 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
225 {
226 struct simple_pid_list **p;
227
228 for (p = listp; *p != NULL; p = &(*p)->next)
229 if ((*p)->pid == pid)
230 {
231 struct simple_pid_list *next = (*p)->next;
232
233 *statusp = (*p)->status;
234 xfree (*p);
235 *p = next;
236 return 1;
237 }
238 return 0;
239 }
240
241 enum stopping_threads_kind
242 {
243 /* Not stopping threads presently. */
244 NOT_STOPPING_THREADS,
245
246 /* Stopping threads. */
247 STOPPING_THREADS,
248
249 /* Stopping and suspending threads. */
250 STOPPING_AND_SUSPENDING_THREADS
251 };
252
253 /* This is set while stop_all_lwps is in effect. */
254 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
255
256 /* FIXME make into a target method? */
257 int using_threads = 1;
258
259 /* True if we're presently stabilizing threads (moving them out of
260 jump pads). */
261 static int stabilizing_threads;
262
263 static void linux_resume_one_lwp (struct lwp_info *lwp,
264 int step, int signal, siginfo_t *info);
265 static void stop_all_lwps (int suspend, struct lwp_info *except);
266 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
267 static void unsuspend_all_lwps (struct lwp_info *except);
268 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
269 int *wstat, int options);
270 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static int linux_stopped_by_watchpoint (void);
273 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
274 static int lwp_is_marked_dead (struct lwp_info *lwp);
275 static void proceed_all_lwps (void);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static void complete_ongoing_step_over (void);
280 static int linux_low_ptrace_options (int attached);
281 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
282 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
283
284 /* When the event-loop is doing a step-over, this points at the thread
285 being stepped. */
286 ptid_t step_over_bkpt;
287
288 /* True if the low target can hardware single-step. */
289
290 static int
291 can_hardware_single_step (void)
292 {
293 if (the_low_target.supports_hardware_single_step != NULL)
294 return the_low_target.supports_hardware_single_step ();
295 else
296 return 0;
297 }
298
299 /* True if the low target can software single-step. Such targets
300 implement the GET_NEXT_PCS callback. */
301
302 static int
303 can_software_single_step (void)
304 {
305 return (the_low_target.get_next_pcs != NULL);
306 }
307
308 /* True if the low target supports memory breakpoints. If so, we'll
309 have a GET_PC implementation. */
310
311 static int
312 supports_breakpoints (void)
313 {
314 return (the_low_target.get_pc != NULL);
315 }
316
317 /* Returns true if this target can support fast tracepoints. This
318 does not mean that the in-process agent has been loaded in the
319 inferior. */
320
321 static int
322 supports_fast_tracepoints (void)
323 {
324 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
325 }
326
327 /* True if LWP is stopped in its stepping range. */
328
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
331 {
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 }
336
337 struct pending_signals
338 {
339 int signal;
340 siginfo_t info;
341 struct pending_signals *prev;
342 };
343
344 /* The read/write ends of the pipe registered as waitable file in the
345 event loop. */
346 static int linux_event_pipe[2] = { -1, -1 };
347
348 /* True if we're currently in async mode. */
349 #define target_is_async_p() (linux_event_pipe[0] != -1)
350
351 static void send_sigstop (struct lwp_info *lwp);
352 static void wait_for_sigstop (void);
353
354 /* Return non-zero if HEADER is a 64-bit ELF file. */
355
356 static int
357 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
358 {
359 if (header->e_ident[EI_MAG0] == ELFMAG0
360 && header->e_ident[EI_MAG1] == ELFMAG1
361 && header->e_ident[EI_MAG2] == ELFMAG2
362 && header->e_ident[EI_MAG3] == ELFMAG3)
363 {
364 *machine = header->e_machine;
365 return header->e_ident[EI_CLASS] == ELFCLASS64;
366
367 }
368 *machine = EM_NONE;
369 return -1;
370 }
371
372 /* Return non-zero if FILE is a 64-bit ELF file,
373 zero if the file is not a 64-bit ELF file,
374 and -1 if the file is not accessible or doesn't exist. */
375
376 static int
377 elf_64_file_p (const char *file, unsigned int *machine)
378 {
379 Elf64_Ehdr header;
380 int fd;
381
382 fd = open (file, O_RDONLY);
383 if (fd < 0)
384 return -1;
385
386 if (read (fd, &header, sizeof (header)) != sizeof (header))
387 {
388 close (fd);
389 return 0;
390 }
391 close (fd);
392
393 return elf_64_header_p (&header, machine);
394 }
395
396 /* Accepts an integer PID; Returns true if the executable PID is
397 running is a 64-bit ELF file.. */
398
399 int
400 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
401 {
402 char file[PATH_MAX];
403
404 sprintf (file, "/proc/%d/exe", pid);
405 return elf_64_file_p (file, machine);
406 }
407
408 static void
409 delete_lwp (struct lwp_info *lwp)
410 {
411 struct thread_info *thr = get_lwp_thread (lwp);
412
413 if (debug_threads)
414 debug_printf ("deleting %ld\n", lwpid_of (thr));
415
416 remove_thread (thr);
417
418 if (the_low_target.delete_thread != NULL)
419 the_low_target.delete_thread (lwp->arch_private);
420 else
421 gdb_assert (lwp->arch_private == NULL);
422
423 free (lwp);
424 }
425
426 /* Add a process to the common process list, and set its private
427 data. */
428
429 static struct process_info *
430 linux_add_process (int pid, int attached)
431 {
432 struct process_info *proc;
433
434 proc = add_process (pid, attached);
435 proc->priv = XCNEW (struct process_info_private);
436
437 if (the_low_target.new_process != NULL)
438 proc->priv->arch_private = the_low_target.new_process ();
439
440 return proc;
441 }
442
443 static CORE_ADDR get_pc (struct lwp_info *lwp);
444
445 /* Call the target arch_setup function on the current thread. */
446
447 static void
448 linux_arch_setup (void)
449 {
450 the_low_target.arch_setup ();
451 }
452
453 /* Call the target arch_setup function on THREAD. */
454
455 static void
456 linux_arch_setup_thread (struct thread_info *thread)
457 {
458 struct thread_info *saved_thread;
459
460 saved_thread = current_thread;
461 current_thread = thread;
462
463 linux_arch_setup ();
464
465 current_thread = saved_thread;
466 }
467
468 /* Handle a GNU/Linux extended wait response. If we see a clone,
469 fork, or vfork event, we need to add the new LWP to our list
470 (and return 0 so as not to report the trap to higher layers).
471 If we see an exec event, we will modify ORIG_EVENT_LWP to point
472 to a new LWP representing the new program. */
473
474 static int
475 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
476 {
477 client_state &cs = get_client_state ();
478 struct lwp_info *event_lwp = *orig_event_lwp;
479 int event = linux_ptrace_get_extended_event (wstat);
480 struct thread_info *event_thr = get_lwp_thread (event_lwp);
481 struct lwp_info *new_lwp;
482
483 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
484
485 /* All extended events we currently use are mid-syscall. Only
486 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
487 you have to be using PTRACE_SEIZE to get that. */
488 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
489
490 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
491 || (event == PTRACE_EVENT_CLONE))
492 {
493 ptid_t ptid;
494 unsigned long new_pid;
495 int ret, status;
496
497 /* Get the pid of the new lwp. */
498 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
499 &new_pid);
500
501 /* If we haven't already seen the new PID stop, wait for it now. */
502 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
503 {
504 /* The new child has a pending SIGSTOP. We can't affect it until it
505 hits the SIGSTOP, but we're already attached. */
506
507 ret = my_waitpid (new_pid, &status, __WALL);
508
509 if (ret == -1)
510 perror_with_name ("waiting for new child");
511 else if (ret != new_pid)
512 warning ("wait returned unexpected PID %d", ret);
513 else if (!WIFSTOPPED (status))
514 warning ("wait returned unexpected status 0x%x", status);
515 }
516
517 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
518 {
519 struct process_info *parent_proc;
520 struct process_info *child_proc;
521 struct lwp_info *child_lwp;
522 struct thread_info *child_thr;
523 struct target_desc *tdesc;
524
525 ptid = ptid_t (new_pid, new_pid, 0);
526
527 if (debug_threads)
528 {
529 debug_printf ("HEW: Got fork event from LWP %ld, "
530 "new child is %d\n",
531 ptid_of (event_thr).lwp (),
532 ptid.pid ());
533 }
534
535 /* Add the new process to the tables and clone the breakpoint
536 lists of the parent. We need to do this even if the new process
537 will be detached, since we will need the process object and the
538 breakpoints to remove any breakpoints from memory when we
539 detach, and the client side will access registers. */
540 child_proc = linux_add_process (new_pid, 0);
541 gdb_assert (child_proc != NULL);
542 child_lwp = add_lwp (ptid);
543 gdb_assert (child_lwp != NULL);
544 child_lwp->stopped = 1;
545 child_lwp->must_set_ptrace_flags = 1;
546 child_lwp->status_pending_p = 0;
547 child_thr = get_lwp_thread (child_lwp);
548 child_thr->last_resume_kind = resume_stop;
549 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
550
551 /* If we're suspending all threads, leave this one suspended
552 too. If the fork/clone parent is stepping over a breakpoint,
553 all other threads have been suspended already. Leave the
554 child suspended too. */
555 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
556 || event_lwp->bp_reinsert != 0)
557 {
558 if (debug_threads)
559 debug_printf ("HEW: leaving child suspended\n");
560 child_lwp->suspended = 1;
561 }
562
563 parent_proc = get_thread_process (event_thr);
564 child_proc->attached = parent_proc->attached;
565
566 if (event_lwp->bp_reinsert != 0
567 && can_software_single_step ()
568 && event == PTRACE_EVENT_VFORK)
569 {
570 /* If we leave single-step breakpoints there, child will
571 hit it, so uninsert single-step breakpoints from parent
572 (and child). Once vfork child is done, reinsert
573 them back to parent. */
574 uninsert_single_step_breakpoints (event_thr);
575 }
576
577 clone_all_breakpoints (child_thr, event_thr);
578
579 tdesc = allocate_target_description ();
580 copy_target_description (tdesc, parent_proc->tdesc);
581 child_proc->tdesc = tdesc;
582
583 /* Clone arch-specific process data. */
584 if (the_low_target.new_fork != NULL)
585 the_low_target.new_fork (parent_proc, child_proc);
586
587 /* Save fork info in the parent thread. */
588 if (event == PTRACE_EVENT_FORK)
589 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
590 else if (event == PTRACE_EVENT_VFORK)
591 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
592
593 event_lwp->waitstatus.value.related_pid = ptid;
594
595 /* The status_pending field contains bits denoting the
596 extended event, so when the pending event is handled,
597 the handler will look at lwp->waitstatus. */
598 event_lwp->status_pending_p = 1;
599 event_lwp->status_pending = wstat;
600
601 /* Link the threads until the parent event is passed on to
602 higher layers. */
603 event_lwp->fork_relative = child_lwp;
604 child_lwp->fork_relative = event_lwp;
605
606 /* If the parent thread is doing step-over with single-step
607 breakpoints, the list of single-step breakpoints are cloned
608 from the parent's. Remove them from the child process.
609 In case of vfork, we'll reinsert them back once vforked
610 child is done. */
611 if (event_lwp->bp_reinsert != 0
612 && can_software_single_step ())
613 {
614 /* The child process is forked and stopped, so it is safe
615 to access its memory without stopping all other threads
616 from other processes. */
617 delete_single_step_breakpoints (child_thr);
618
619 gdb_assert (has_single_step_breakpoints (event_thr));
620 gdb_assert (!has_single_step_breakpoints (child_thr));
621 }
622
623 /* Report the event. */
624 return 0;
625 }
626
627 if (debug_threads)
628 debug_printf ("HEW: Got clone event "
629 "from LWP %ld, new child is LWP %ld\n",
630 lwpid_of (event_thr), new_pid);
631
632 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
633 new_lwp = add_lwp (ptid);
634
635 /* Either we're going to immediately resume the new thread
636 or leave it stopped. linux_resume_one_lwp is a nop if it
637 thinks the thread is currently running, so set this first
638 before calling linux_resume_one_lwp. */
639 new_lwp->stopped = 1;
640
641 /* If we're suspending all threads, leave this one suspended
642 too. If the fork/clone parent is stepping over a breakpoint,
643 all other threads have been suspended already. Leave the
644 child suspended too. */
645 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
646 || event_lwp->bp_reinsert != 0)
647 new_lwp->suspended = 1;
648
649 /* Normally we will get the pending SIGSTOP. But in some cases
650 we might get another signal delivered to the group first.
651 If we do get another signal, be sure not to lose it. */
652 if (WSTOPSIG (status) != SIGSTOP)
653 {
654 new_lwp->stop_expected = 1;
655 new_lwp->status_pending_p = 1;
656 new_lwp->status_pending = status;
657 }
658 else if (cs.report_thread_events)
659 {
660 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
661 new_lwp->status_pending_p = 1;
662 new_lwp->status_pending = status;
663 }
664
665 #ifdef USE_THREAD_DB
666 thread_db_notice_clone (event_thr, ptid);
667 #endif
668
669 /* Don't report the event. */
670 return 1;
671 }
672 else if (event == PTRACE_EVENT_VFORK_DONE)
673 {
674 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
675
676 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
677 {
678 reinsert_single_step_breakpoints (event_thr);
679
680 gdb_assert (has_single_step_breakpoints (event_thr));
681 }
682
683 /* Report the event. */
684 return 0;
685 }
686 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
687 {
688 struct process_info *proc;
689 std::vector<int> syscalls_to_catch;
690 ptid_t event_ptid;
691 pid_t event_pid;
692
693 if (debug_threads)
694 {
695 debug_printf ("HEW: Got exec event from LWP %ld\n",
696 lwpid_of (event_thr));
697 }
698
699 /* Get the event ptid. */
700 event_ptid = ptid_of (event_thr);
701 event_pid = event_ptid.pid ();
702
703 /* Save the syscall list from the execing process. */
704 proc = get_thread_process (event_thr);
705 syscalls_to_catch = std::move (proc->syscalls_to_catch);
706
707 /* Delete the execing process and all its threads. */
708 the_target->pt->mourn (proc);
709 current_thread = NULL;
710
711 /* Create a new process/lwp/thread. */
712 proc = linux_add_process (event_pid, 0);
713 event_lwp = add_lwp (event_ptid);
714 event_thr = get_lwp_thread (event_lwp);
715 gdb_assert (current_thread == event_thr);
716 linux_arch_setup_thread (event_thr);
717
718 /* Set the event status. */
719 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
720 event_lwp->waitstatus.value.execd_pathname
721 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
722
723 /* Mark the exec status as pending. */
724 event_lwp->stopped = 1;
725 event_lwp->status_pending_p = 1;
726 event_lwp->status_pending = wstat;
727 event_thr->last_resume_kind = resume_continue;
728 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
729
730 /* Update syscall state in the new lwp, effectively mid-syscall too. */
731 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
732
733 /* Restore the list to catch. Don't rely on the client, which is free
734 to avoid sending a new list when the architecture doesn't change.
735 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
736 proc->syscalls_to_catch = std::move (syscalls_to_catch);
737
738 /* Report the event. */
739 *orig_event_lwp = event_lwp;
740 return 0;
741 }
742
743 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
744 }
745
746 /* Return the PC as read from the regcache of LWP, without any
747 adjustment. */
748
749 static CORE_ADDR
750 get_pc (struct lwp_info *lwp)
751 {
752 struct thread_info *saved_thread;
753 struct regcache *regcache;
754 CORE_ADDR pc;
755
756 if (the_low_target.get_pc == NULL)
757 return 0;
758
759 saved_thread = current_thread;
760 current_thread = get_lwp_thread (lwp);
761
762 regcache = get_thread_regcache (current_thread, 1);
763 pc = (*the_low_target.get_pc) (regcache);
764
765 if (debug_threads)
766 debug_printf ("pc is 0x%lx\n", (long) pc);
767
768 current_thread = saved_thread;
769 return pc;
770 }
771
772 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
773 Fill *SYSNO with the syscall nr trapped. */
774
775 static void
776 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
777 {
778 struct thread_info *saved_thread;
779 struct regcache *regcache;
780
781 if (the_low_target.get_syscall_trapinfo == NULL)
782 {
783 /* If we cannot get the syscall trapinfo, report an unknown
784 system call number. */
785 *sysno = UNKNOWN_SYSCALL;
786 return;
787 }
788
789 saved_thread = current_thread;
790 current_thread = get_lwp_thread (lwp);
791
792 regcache = get_thread_regcache (current_thread, 1);
793 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
794
795 if (debug_threads)
796 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
797
798 current_thread = saved_thread;
799 }
800
801 static int check_stopped_by_watchpoint (struct lwp_info *child);
802
803 /* Called when the LWP stopped for a signal/trap. If it stopped for a
804 trap check what caused it (breakpoint, watchpoint, trace, etc.),
805 and save the result in the LWP's stop_reason field. If it stopped
806 for a breakpoint, decrement the PC if necessary on the lwp's
807 architecture. Returns true if we now have the LWP's stop PC. */
808
809 static int
810 save_stop_reason (struct lwp_info *lwp)
811 {
812 CORE_ADDR pc;
813 CORE_ADDR sw_breakpoint_pc;
814 struct thread_info *saved_thread;
815 #if USE_SIGTRAP_SIGINFO
816 siginfo_t siginfo;
817 #endif
818
819 if (the_low_target.get_pc == NULL)
820 return 0;
821
822 pc = get_pc (lwp);
823 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
824
825 /* breakpoint_at reads from the current thread. */
826 saved_thread = current_thread;
827 current_thread = get_lwp_thread (lwp);
828
829 #if USE_SIGTRAP_SIGINFO
830 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
831 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
832 {
833 if (siginfo.si_signo == SIGTRAP)
834 {
835 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
836 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
837 {
838 /* The si_code is ambiguous on this arch -- check debug
839 registers. */
840 if (!check_stopped_by_watchpoint (lwp))
841 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
842 }
843 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
844 {
845 /* If we determine the LWP stopped for a SW breakpoint,
846 trust it. Particularly don't check watchpoint
847 registers, because at least on s390, we'd find
848 stopped-by-watchpoint as long as there's a watchpoint
849 set. */
850 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
851 }
852 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
853 {
854 /* This can indicate either a hardware breakpoint or
855 hardware watchpoint. Check debug registers. */
856 if (!check_stopped_by_watchpoint (lwp))
857 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
858 }
859 else if (siginfo.si_code == TRAP_TRACE)
860 {
861 /* We may have single stepped an instruction that
862 triggered a watchpoint. In that case, on some
863 architectures (such as x86), instead of TRAP_HWBKPT,
864 si_code indicates TRAP_TRACE, and we need to check
865 the debug registers separately. */
866 if (!check_stopped_by_watchpoint (lwp))
867 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
868 }
869 }
870 }
871 #else
872 /* We may have just stepped a breakpoint instruction. E.g., in
873 non-stop mode, GDB first tells the thread A to step a range, and
874 then the user inserts a breakpoint inside the range. In that
875 case we need to report the breakpoint PC. */
876 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
877 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
879
880 if (hardware_breakpoint_inserted_here (pc))
881 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
882
883 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
884 check_stopped_by_watchpoint (lwp);
885 #endif
886
887 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
888 {
889 if (debug_threads)
890 {
891 struct thread_info *thr = get_lwp_thread (lwp);
892
893 debug_printf ("CSBB: %s stopped by software breakpoint\n",
894 target_pid_to_str (ptid_of (thr)));
895 }
896
897 /* Back up the PC if necessary. */
898 if (pc != sw_breakpoint_pc)
899 {
900 struct regcache *regcache
901 = get_thread_regcache (current_thread, 1);
902 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
903 }
904
905 /* Update this so we record the correct stop PC below. */
906 pc = sw_breakpoint_pc;
907 }
908 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
909 {
910 if (debug_threads)
911 {
912 struct thread_info *thr = get_lwp_thread (lwp);
913
914 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
915 target_pid_to_str (ptid_of (thr)));
916 }
917 }
918 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
919 {
920 if (debug_threads)
921 {
922 struct thread_info *thr = get_lwp_thread (lwp);
923
924 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
925 target_pid_to_str (ptid_of (thr)));
926 }
927 }
928 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
929 {
930 if (debug_threads)
931 {
932 struct thread_info *thr = get_lwp_thread (lwp);
933
934 debug_printf ("CSBB: %s stopped by trace\n",
935 target_pid_to_str (ptid_of (thr)));
936 }
937 }
938
939 lwp->stop_pc = pc;
940 current_thread = saved_thread;
941 return 1;
942 }
943
944 static struct lwp_info *
945 add_lwp (ptid_t ptid)
946 {
947 struct lwp_info *lwp;
948
949 lwp = XCNEW (struct lwp_info);
950
951 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
952
953 lwp->thread = add_thread (ptid, lwp);
954
955 if (the_low_target.new_thread != NULL)
956 the_low_target.new_thread (lwp);
957
958 return lwp;
959 }
960
961 /* Callback to be used when calling fork_inferior, responsible for
962 actually initiating the tracing of the inferior. */
963
964 static void
965 linux_ptrace_fun ()
966 {
967 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
968 (PTRACE_TYPE_ARG4) 0) < 0)
969 trace_start_error_with_name ("ptrace");
970
971 if (setpgid (0, 0) < 0)
972 trace_start_error_with_name ("setpgid");
973
974 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
975 stdout to stderr so that inferior i/o doesn't corrupt the connection.
976 Also, redirect stdin to /dev/null. */
977 if (remote_connection_is_stdio ())
978 {
979 if (close (0) < 0)
980 trace_start_error_with_name ("close");
981 if (open ("/dev/null", O_RDONLY) < 0)
982 trace_start_error_with_name ("open");
983 if (dup2 (2, 1) < 0)
984 trace_start_error_with_name ("dup2");
985 if (write (2, "stdin/stdout redirected\n",
986 sizeof ("stdin/stdout redirected\n") - 1) < 0)
987 {
988 /* Errors ignored. */;
989 }
990 }
991 }
992
993 /* Start an inferior process and returns its pid.
994 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
995 are its arguments. */
996
997 int
998 linux_process_target::create_inferior (const char *program,
999 const std::vector<char *> &program_args)
1000 {
1001 client_state &cs = get_client_state ();
1002 struct lwp_info *new_lwp;
1003 int pid;
1004 ptid_t ptid;
1005
1006 {
1007 maybe_disable_address_space_randomization restore_personality
1008 (cs.disable_randomization);
1009 std::string str_program_args = stringify_argv (program_args);
1010
1011 pid = fork_inferior (program,
1012 str_program_args.c_str (),
1013 get_environ ()->envp (), linux_ptrace_fun,
1014 NULL, NULL, NULL, NULL);
1015 }
1016
1017 linux_add_process (pid, 0);
1018
1019 ptid = ptid_t (pid, pid, 0);
1020 new_lwp = add_lwp (ptid);
1021 new_lwp->must_set_ptrace_flags = 1;
1022
1023 post_fork_inferior (pid, program);
1024
1025 return pid;
1026 }
1027
1028 /* Implement the post_create_inferior target_ops method. */
1029
1030 void
1031 linux_process_target::post_create_inferior ()
1032 {
1033 struct lwp_info *lwp = get_thread_lwp (current_thread);
1034
1035 linux_arch_setup ();
1036
1037 if (lwp->must_set_ptrace_flags)
1038 {
1039 struct process_info *proc = current_process ();
1040 int options = linux_low_ptrace_options (proc->attached);
1041
1042 linux_enable_event_reporting (lwpid_of (current_thread), options);
1043 lwp->must_set_ptrace_flags = 0;
1044 }
1045 }
1046
1047 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1048 error. */
1049
1050 int
1051 linux_attach_lwp (ptid_t ptid)
1052 {
1053 struct lwp_info *new_lwp;
1054 int lwpid = ptid.lwp ();
1055
1056 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1057 != 0)
1058 return errno;
1059
1060 new_lwp = add_lwp (ptid);
1061
1062 /* We need to wait for SIGSTOP before being able to make the next
1063 ptrace call on this LWP. */
1064 new_lwp->must_set_ptrace_flags = 1;
1065
1066 if (linux_proc_pid_is_stopped (lwpid))
1067 {
1068 if (debug_threads)
1069 debug_printf ("Attached to a stopped process\n");
1070
1071 /* The process is definitely stopped. It is in a job control
1072 stop, unless the kernel predates the TASK_STOPPED /
1073 TASK_TRACED distinction, in which case it might be in a
1074 ptrace stop. Make sure it is in a ptrace stop; from there we
1075 can kill it, signal it, et cetera.
1076
1077 First make sure there is a pending SIGSTOP. Since we are
1078 already attached, the process can not transition from stopped
1079 to running without a PTRACE_CONT; so we know this signal will
1080 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1081 probably already in the queue (unless this kernel is old
1082 enough to use TASK_STOPPED for ptrace stops); but since
1083 SIGSTOP is not an RT signal, it can only be queued once. */
1084 kill_lwp (lwpid, SIGSTOP);
1085
1086 /* Finally, resume the stopped process. This will deliver the
1087 SIGSTOP (or a higher priority signal, just like normal
1088 PTRACE_ATTACH), which we'll catch later on. */
1089 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1090 }
1091
1092 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1093 brings it to a halt.
1094
1095 There are several cases to consider here:
1096
1097 1) gdbserver has already attached to the process and is being notified
1098 of a new thread that is being created.
1099 In this case we should ignore that SIGSTOP and resume the
1100 process. This is handled below by setting stop_expected = 1,
1101 and the fact that add_thread sets last_resume_kind ==
1102 resume_continue.
1103
1104 2) This is the first thread (the process thread), and we're attaching
1105 to it via attach_inferior.
1106 In this case we want the process thread to stop.
1107 This is handled by having linux_attach set last_resume_kind ==
1108 resume_stop after we return.
1109
1110 If the pid we are attaching to is also the tgid, we attach to and
1111 stop all the existing threads. Otherwise, we attach to pid and
1112 ignore any other threads in the same group as this pid.
1113
1114 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1115 existing threads.
1116 In this case we want the thread to stop.
1117 FIXME: This case is currently not properly handled.
1118 We should wait for the SIGSTOP but don't. Things work apparently
1119 because enough time passes between when we ptrace (ATTACH) and when
1120 gdb makes the next ptrace call on the thread.
1121
1122 On the other hand, if we are currently trying to stop all threads, we
1123 should treat the new thread as if we had sent it a SIGSTOP. This works
1124 because we are guaranteed that the add_lwp call above added us to the
1125 end of the list, and so the new thread has not yet reached
1126 wait_for_sigstop (but will). */
1127 new_lwp->stop_expected = 1;
1128
1129 return 0;
1130 }
1131
1132 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1133 already attached. Returns true if a new LWP is found, false
1134 otherwise. */
1135
1136 static int
1137 attach_proc_task_lwp_callback (ptid_t ptid)
1138 {
1139 /* Is this a new thread? */
1140 if (find_thread_ptid (ptid) == NULL)
1141 {
1142 int lwpid = ptid.lwp ();
1143 int err;
1144
1145 if (debug_threads)
1146 debug_printf ("Found new lwp %d\n", lwpid);
1147
1148 err = linux_attach_lwp (ptid);
1149
1150 /* Be quiet if we simply raced with the thread exiting. EPERM
1151 is returned if the thread's task still exists, and is marked
1152 as exited or zombie, as well as other conditions, so in that
1153 case, confirm the status in /proc/PID/status. */
1154 if (err == ESRCH
1155 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1156 {
1157 if (debug_threads)
1158 {
1159 debug_printf ("Cannot attach to lwp %d: "
1160 "thread is gone (%d: %s)\n",
1161 lwpid, err, safe_strerror (err));
1162 }
1163 }
1164 else if (err != 0)
1165 {
1166 std::string reason
1167 = linux_ptrace_attach_fail_reason_string (ptid, err);
1168
1169 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1170 }
1171
1172 return 1;
1173 }
1174 return 0;
1175 }
1176
1177 static void async_file_mark (void);
1178
1179 /* Attach to PID. If PID is the tgid, attach to it and all
1180 of its threads. */
1181
1182 int
1183 linux_process_target::attach (unsigned long pid)
1184 {
1185 struct process_info *proc;
1186 struct thread_info *initial_thread;
1187 ptid_t ptid = ptid_t (pid, pid, 0);
1188 int err;
1189
1190 proc = linux_add_process (pid, 1);
1191
1192 /* Attach to PID. We will check for other threads
1193 soon. */
1194 err = linux_attach_lwp (ptid);
1195 if (err != 0)
1196 {
1197 remove_process (proc);
1198
1199 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1200 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1201 }
1202
1203 /* Don't ignore the initial SIGSTOP if we just attached to this
1204 process. It will be collected by wait shortly. */
1205 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1206 initial_thread->last_resume_kind = resume_stop;
1207
1208 /* We must attach to every LWP. If /proc is mounted, use that to
1209 find them now. On the one hand, the inferior may be using raw
1210 clone instead of using pthreads. On the other hand, even if it
1211 is using pthreads, GDB may not be connected yet (thread_db needs
1212 to do symbol lookups, through qSymbol). Also, thread_db walks
1213 structures in the inferior's address space to find the list of
1214 threads/LWPs, and those structures may well be corrupted. Note
1215 that once thread_db is loaded, we'll still use it to list threads
1216 and associate pthread info with each LWP. */
1217 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1218
1219 /* GDB will shortly read the xml target description for this
1220 process, to figure out the process' architecture. But the target
1221 description is only filled in when the first process/thread in
1222 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1223 that now, otherwise, if GDB is fast enough, it could read the
1224 target description _before_ that initial stop. */
1225 if (non_stop)
1226 {
1227 struct lwp_info *lwp;
1228 int wstat, lwpid;
1229 ptid_t pid_ptid = ptid_t (pid);
1230
1231 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1232 &wstat, __WALL);
1233 gdb_assert (lwpid > 0);
1234
1235 lwp = find_lwp_pid (ptid_t (lwpid));
1236
1237 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1238 {
1239 lwp->status_pending_p = 1;
1240 lwp->status_pending = wstat;
1241 }
1242
1243 initial_thread->last_resume_kind = resume_continue;
1244
1245 async_file_mark ();
1246
1247 gdb_assert (proc->tdesc != NULL);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int
1254 last_thread_of_process_p (int pid)
1255 {
1256 bool seen_one = false;
1257
1258 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1259 {
1260 if (!seen_one)
1261 {
1262 /* This is the first thread of this process we see. */
1263 seen_one = true;
1264 return false;
1265 }
1266 else
1267 {
1268 /* This is the second thread of this process we see. */
1269 return true;
1270 }
1271 });
1272
1273 return thread == NULL;
1274 }
1275
1276 /* Kill LWP. */
1277
1278 static void
1279 linux_kill_one_lwp (struct lwp_info *lwp)
1280 {
1281 struct thread_info *thr = get_lwp_thread (lwp);
1282 int pid = lwpid_of (thr);
1283
1284 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1285 there is no signal context, and ptrace(PTRACE_KILL) (or
1286 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1287 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1288 alternative is to kill with SIGKILL. We only need one SIGKILL
1289 per process, not one for each thread. But since we still support
1290 support debugging programs using raw clone without CLONE_THREAD,
1291 we send one for each thread. For years, we used PTRACE_KILL
1292 only, so we're being a bit paranoid about some old kernels where
1293 PTRACE_KILL might work better (dubious if there are any such, but
1294 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1295 second, and so we're fine everywhere. */
1296
1297 errno = 0;
1298 kill_lwp (pid, SIGKILL);
1299 if (debug_threads)
1300 {
1301 int save_errno = errno;
1302
1303 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1304 target_pid_to_str (ptid_of (thr)),
1305 save_errno ? safe_strerror (save_errno) : "OK");
1306 }
1307
1308 errno = 0;
1309 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1310 if (debug_threads)
1311 {
1312 int save_errno = errno;
1313
1314 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1315 target_pid_to_str (ptid_of (thr)),
1316 save_errno ? safe_strerror (save_errno) : "OK");
1317 }
1318 }
1319
1320 /* Kill LWP and wait for it to die. */
1321
1322 static void
1323 kill_wait_lwp (struct lwp_info *lwp)
1324 {
1325 struct thread_info *thr = get_lwp_thread (lwp);
1326 int pid = ptid_of (thr).pid ();
1327 int lwpid = ptid_of (thr).lwp ();
1328 int wstat;
1329 int res;
1330
1331 if (debug_threads)
1332 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1333
1334 do
1335 {
1336 linux_kill_one_lwp (lwp);
1337
1338 /* Make sure it died. Notes:
1339
1340 - The loop is most likely unnecessary.
1341
1342 - We don't use linux_wait_for_event as that could delete lwps
1343 while we're iterating over them. We're not interested in
1344 any pending status at this point, only in making sure all
1345 wait status on the kernel side are collected until the
1346 process is reaped.
1347
1348 - We don't use __WALL here as the __WALL emulation relies on
1349 SIGCHLD, and killing a stopped process doesn't generate
1350 one, nor an exit status.
1351 */
1352 res = my_waitpid (lwpid, &wstat, 0);
1353 if (res == -1 && errno == ECHILD)
1354 res = my_waitpid (lwpid, &wstat, __WCLONE);
1355 } while (res > 0 && WIFSTOPPED (wstat));
1356
1357 /* Even if it was stopped, the child may have already disappeared.
1358 E.g., if it was killed by SIGKILL. */
1359 if (res < 0 && errno != ECHILD)
1360 perror_with_name ("kill_wait_lwp");
1361 }
1362
1363 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1364 except the leader. */
1365
1366 static void
1367 kill_one_lwp_callback (thread_info *thread, int pid)
1368 {
1369 struct lwp_info *lwp = get_thread_lwp (thread);
1370
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1374 forever. */
1375
1376 if (lwpid_of (thread) == pid)
1377 {
1378 if (debug_threads)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread->id));
1381 return;
1382 }
1383
1384 kill_wait_lwp (lwp);
1385 }
1386
1387 int
1388 linux_process_target::kill (process_info *process)
1389 {
1390 int pid = process->pid;
1391
1392 /* If we're killing a running inferior, make sure it is stopped
1393 first, as PTRACE_KILL will not work otherwise. */
1394 stop_all_lwps (0, NULL);
1395
1396 for_each_thread (pid, [&] (thread_info *thread)
1397 {
1398 kill_one_lwp_callback (thread, pid);
1399 });
1400
1401 /* See the comment in linux_kill_one_lwp. We did not kill the first
1402 thread in the list, so do so now. */
1403 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1404
1405 if (lwp == NULL)
1406 {
1407 if (debug_threads)
1408 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1409 pid);
1410 }
1411 else
1412 kill_wait_lwp (lwp);
1413
1414 mourn (process);
1415
1416 /* Since we presently can only stop all lwps of all processes, we
1417 need to unstop lwps of other processes. */
1418 unstop_all_lwps (0, NULL);
1419 return 0;
1420 }
1421
1422 /* Get pending signal of THREAD, for detaching purposes. This is the
1423 signal the thread last stopped for, which we need to deliver to the
1424 thread when detaching, otherwise, it'd be suppressed/lost. */
1425
1426 static int
1427 get_detach_signal (struct thread_info *thread)
1428 {
1429 client_state &cs = get_client_state ();
1430 enum gdb_signal signo = GDB_SIGNAL_0;
1431 int status;
1432 struct lwp_info *lp = get_thread_lwp (thread);
1433
1434 if (lp->status_pending_p)
1435 status = lp->status_pending;
1436 else
1437 {
1438 /* If the thread had been suspended by gdbserver, and it stopped
1439 cleanly, then it'll have stopped with SIGSTOP. But we don't
1440 want to deliver that SIGSTOP. */
1441 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1442 || thread->last_status.value.sig == GDB_SIGNAL_0)
1443 return 0;
1444
1445 /* Otherwise, we may need to deliver the signal we
1446 intercepted. */
1447 status = lp->last_status;
1448 }
1449
1450 if (!WIFSTOPPED (status))
1451 {
1452 if (debug_threads)
1453 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread)));
1455 return 0;
1456 }
1457
1458 /* Extended wait statuses aren't real SIGTRAPs. */
1459 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1460 {
1461 if (debug_threads)
1462 debug_printf ("GPS: lwp %s had stopped with extended "
1463 "status: no pending signal\n",
1464 target_pid_to_str (ptid_of (thread)));
1465 return 0;
1466 }
1467
1468 signo = gdb_signal_from_host (WSTOPSIG (status));
1469
1470 if (cs.program_signals_p && !cs.program_signals[signo])
1471 {
1472 if (debug_threads)
1473 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1474 target_pid_to_str (ptid_of (thread)),
1475 gdb_signal_to_string (signo));
1476 return 0;
1477 }
1478 else if (!cs.program_signals_p
1479 /* If we have no way to know which signals GDB does not
1480 want to have passed to the program, assume
1481 SIGTRAP/SIGINT, which is GDB's default. */
1482 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1483 {
1484 if (debug_threads)
1485 debug_printf ("GPS: lwp %s had signal %s, "
1486 "but we don't know if we should pass it. "
1487 "Default to not.\n",
1488 target_pid_to_str (ptid_of (thread)),
1489 gdb_signal_to_string (signo));
1490 return 0;
1491 }
1492 else
1493 {
1494 if (debug_threads)
1495 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1496 target_pid_to_str (ptid_of (thread)),
1497 gdb_signal_to_string (signo));
1498
1499 return WSTOPSIG (status);
1500 }
1501 }
1502
1503 /* Detach from LWP. */
1504
1505 static void
1506 linux_detach_one_lwp (struct lwp_info *lwp)
1507 {
1508 struct thread_info *thread = get_lwp_thread (lwp);
1509 int sig;
1510 int lwpid;
1511
1512 /* If there is a pending SIGSTOP, get rid of it. */
1513 if (lwp->stop_expected)
1514 {
1515 if (debug_threads)
1516 debug_printf ("Sending SIGCONT to %s\n",
1517 target_pid_to_str (ptid_of (thread)));
1518
1519 kill_lwp (lwpid_of (thread), SIGCONT);
1520 lwp->stop_expected = 0;
1521 }
1522
1523 /* Pass on any pending signal for this thread. */
1524 sig = get_detach_signal (thread);
1525
1526 /* Preparing to resume may try to write registers, and fail if the
1527 lwp is zombie. If that happens, ignore the error. We'll handle
1528 it below, when detach fails with ESRCH. */
1529 try
1530 {
1531 /* Flush any pending changes to the process's registers. */
1532 regcache_invalidate_thread (thread);
1533
1534 /* Finally, let it resume. */
1535 if (the_low_target.prepare_to_resume != NULL)
1536 the_low_target.prepare_to_resume (lwp);
1537 }
1538 catch (const gdb_exception_error &ex)
1539 {
1540 if (!check_ptrace_stopped_lwp_gone (lwp))
1541 throw;
1542 }
1543
1544 lwpid = lwpid_of (thread);
1545 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1546 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1547 {
1548 int save_errno = errno;
1549
1550 /* We know the thread exists, so ESRCH must mean the lwp is
1551 zombie. This can happen if one of the already-detached
1552 threads exits the whole thread group. In that case we're
1553 still attached, and must reap the lwp. */
1554 if (save_errno == ESRCH)
1555 {
1556 int ret, status;
1557
1558 ret = my_waitpid (lwpid, &status, __WALL);
1559 if (ret == -1)
1560 {
1561 warning (_("Couldn't reap LWP %d while detaching: %s"),
1562 lwpid, safe_strerror (errno));
1563 }
1564 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1565 {
1566 warning (_("Reaping LWP %d while detaching "
1567 "returned unexpected status 0x%x"),
1568 lwpid, status);
1569 }
1570 }
1571 else
1572 {
1573 error (_("Can't detach %s: %s"),
1574 target_pid_to_str (ptid_of (thread)),
1575 safe_strerror (save_errno));
1576 }
1577 }
1578 else if (debug_threads)
1579 {
1580 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1581 target_pid_to_str (ptid_of (thread)),
1582 strsignal (sig));
1583 }
1584
1585 delete_lwp (lwp);
1586 }
1587
1588 /* Callback for for_each_thread. Detaches from non-leader threads of a
1589 given process. */
1590
1591 static void
1592 linux_detach_lwp_callback (thread_info *thread)
1593 {
1594 /* We don't actually detach from the thread group leader just yet.
1595 If the thread group exits, we must reap the zombie clone lwps
1596 before we're able to reap the leader. */
1597 if (thread->id.pid () == thread->id.lwp ())
1598 return;
1599
1600 lwp_info *lwp = get_thread_lwp (thread);
1601 linux_detach_one_lwp (lwp);
1602 }
1603
1604 int
1605 linux_process_target::detach (process_info *process)
1606 {
1607 struct lwp_info *main_lwp;
1608
1609 /* As there's a step over already in progress, let it finish first,
1610 otherwise nesting a stabilize_threads operation on top gets real
1611 messy. */
1612 complete_ongoing_step_over ();
1613
1614 /* Stop all threads before detaching. First, ptrace requires that
1615 the thread is stopped to successfully detach. Second, thread_db
1616 may need to uninstall thread event breakpoints from memory, which
1617 only works with a stopped process anyway. */
1618 stop_all_lwps (0, NULL);
1619
1620 #ifdef USE_THREAD_DB
1621 thread_db_detach (process);
1622 #endif
1623
1624 /* Stabilize threads (move out of jump pads). */
1625 stabilize_threads ();
1626
1627 /* Detach from the clone lwps first. If the thread group exits just
1628 while we're detaching, we must reap the clone lwps before we're
1629 able to reap the leader. */
1630 for_each_thread (process->pid, linux_detach_lwp_callback);
1631
1632 main_lwp = find_lwp_pid (ptid_t (process->pid));
1633 linux_detach_one_lwp (main_lwp);
1634
1635 mourn (process);
1636
1637 /* Since we presently can only stop all lwps of all processes, we
1638 need to unstop lwps of other processes. */
1639 unstop_all_lwps (0, NULL);
1640 return 0;
1641 }
1642
1643 /* Remove all LWPs that belong to process PROC from the lwp list. */
1644
1645 void
1646 linux_process_target::mourn (process_info *process)
1647 {
1648 struct process_info_private *priv;
1649
1650 #ifdef USE_THREAD_DB
1651 thread_db_mourn (process);
1652 #endif
1653
1654 for_each_thread (process->pid, [] (thread_info *thread)
1655 {
1656 delete_lwp (get_thread_lwp (thread));
1657 });
1658
1659 /* Freeing all private data. */
1660 priv = process->priv;
1661 if (the_low_target.delete_process != NULL)
1662 the_low_target.delete_process (priv->arch_private);
1663 else
1664 gdb_assert (priv->arch_private == NULL);
1665 free (priv);
1666 process->priv = NULL;
1667
1668 remove_process (process);
1669 }
1670
1671 void
1672 linux_process_target::join (int pid)
1673 {
1674 int status, ret;
1675
1676 do {
1677 ret = my_waitpid (pid, &status, 0);
1678 if (WIFEXITED (status) || WIFSIGNALED (status))
1679 break;
1680 } while (ret != -1 || errno != ECHILD);
1681 }
1682
1683 /* Return true if the given thread is still alive. */
1684
1685 bool
1686 linux_process_target::thread_alive (ptid_t ptid)
1687 {
1688 struct lwp_info *lwp = find_lwp_pid (ptid);
1689
1690 /* We assume we always know if a thread exits. If a whole process
1691 exited but we still haven't been able to report it to GDB, we'll
1692 hold on to the last lwp of the dead process. */
1693 if (lwp != NULL)
1694 return !lwp_is_marked_dead (lwp);
1695 else
1696 return 0;
1697 }
1698
1699 /* Return 1 if this lwp still has an interesting status pending. If
1700 not (e.g., it had stopped for a breakpoint that is gone), return
1701 false. */
1702
1703 static int
1704 thread_still_has_status_pending_p (struct thread_info *thread)
1705 {
1706 struct lwp_info *lp = get_thread_lwp (thread);
1707
1708 if (!lp->status_pending_p)
1709 return 0;
1710
1711 if (thread->last_resume_kind != resume_stop
1712 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1713 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1714 {
1715 struct thread_info *saved_thread;
1716 CORE_ADDR pc;
1717 int discard = 0;
1718
1719 gdb_assert (lp->last_status != 0);
1720
1721 pc = get_pc (lp);
1722
1723 saved_thread = current_thread;
1724 current_thread = thread;
1725
1726 if (pc != lp->stop_pc)
1727 {
1728 if (debug_threads)
1729 debug_printf ("PC of %ld changed\n",
1730 lwpid_of (thread));
1731 discard = 1;
1732 }
1733
1734 #if !USE_SIGTRAP_SIGINFO
1735 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1736 && !(*the_low_target.breakpoint_at) (pc))
1737 {
1738 if (debug_threads)
1739 debug_printf ("previous SW breakpoint of %ld gone\n",
1740 lwpid_of (thread));
1741 discard = 1;
1742 }
1743 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1744 && !hardware_breakpoint_inserted_here (pc))
1745 {
1746 if (debug_threads)
1747 debug_printf ("previous HW breakpoint of %ld gone\n",
1748 lwpid_of (thread));
1749 discard = 1;
1750 }
1751 #endif
1752
1753 current_thread = saved_thread;
1754
1755 if (discard)
1756 {
1757 if (debug_threads)
1758 debug_printf ("discarding pending breakpoint status\n");
1759 lp->status_pending_p = 0;
1760 return 0;
1761 }
1762 }
1763
1764 return 1;
1765 }
1766
1767 /* Returns true if LWP is resumed from the client's perspective. */
1768
1769 static int
1770 lwp_resumed (struct lwp_info *lwp)
1771 {
1772 struct thread_info *thread = get_lwp_thread (lwp);
1773
1774 if (thread->last_resume_kind != resume_stop)
1775 return 1;
1776
1777 /* Did gdb send us a `vCont;t', but we haven't reported the
1778 corresponding stop to gdb yet? If so, the thread is still
1779 resumed/running from gdb's perspective. */
1780 if (thread->last_resume_kind == resume_stop
1781 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1782 return 1;
1783
1784 return 0;
1785 }
1786
1787 /* Return true if this lwp has an interesting status pending. */
1788 static bool
1789 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1790 {
1791 struct lwp_info *lp = get_thread_lwp (thread);
1792
1793 /* Check if we're only interested in events from a specific process
1794 or a specific LWP. */
1795 if (!thread->id.matches (ptid))
1796 return 0;
1797
1798 if (!lwp_resumed (lp))
1799 return 0;
1800
1801 if (lp->status_pending_p
1802 && !thread_still_has_status_pending_p (thread))
1803 {
1804 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1805 return 0;
1806 }
1807
1808 return lp->status_pending_p;
1809 }
1810
1811 struct lwp_info *
1812 find_lwp_pid (ptid_t ptid)
1813 {
1814 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1815 {
1816 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1817 return thr_arg->id.lwp () == lwp;
1818 });
1819
1820 if (thread == NULL)
1821 return NULL;
1822
1823 return get_thread_lwp (thread);
1824 }
1825
1826 /* Return the number of known LWPs in the tgid given by PID. */
1827
1828 static int
1829 num_lwps (int pid)
1830 {
1831 int count = 0;
1832
1833 for_each_thread (pid, [&] (thread_info *thread)
1834 {
1835 count++;
1836 });
1837
1838 return count;
1839 }
1840
1841 /* See nat/linux-nat.h. */
1842
1843 struct lwp_info *
1844 iterate_over_lwps (ptid_t filter,
1845 gdb::function_view<iterate_over_lwps_ftype> callback)
1846 {
1847 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1848 {
1849 lwp_info *lwp = get_thread_lwp (thr_arg);
1850
1851 return callback (lwp);
1852 });
1853
1854 if (thread == NULL)
1855 return NULL;
1856
1857 return get_thread_lwp (thread);
1858 }
1859
1860 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1861 their exits until all other threads in the group have exited. */
1862
1863 static void
1864 check_zombie_leaders (void)
1865 {
1866 for_each_process ([] (process_info *proc) {
1867 pid_t leader_pid = pid_of (proc);
1868 struct lwp_info *leader_lp;
1869
1870 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1871
1872 if (debug_threads)
1873 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1874 "num_lwps=%d, zombie=%d\n",
1875 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1876 linux_proc_pid_is_zombie (leader_pid));
1877
1878 if (leader_lp != NULL && !leader_lp->stopped
1879 /* Check if there are other threads in the group, as we may
1880 have raced with the inferior simply exiting. */
1881 && !last_thread_of_process_p (leader_pid)
1882 && linux_proc_pid_is_zombie (leader_pid))
1883 {
1884 /* A leader zombie can mean one of two things:
1885
1886 - It exited, and there's an exit status pending
1887 available, or only the leader exited (not the whole
1888 program). In the latter case, we can't waitpid the
1889 leader's exit status until all other threads are gone.
1890
1891 - There are 3 or more threads in the group, and a thread
1892 other than the leader exec'd. On an exec, the Linux
1893 kernel destroys all other threads (except the execing
1894 one) in the thread group, and resets the execing thread's
1895 tid to the tgid. No exit notification is sent for the
1896 execing thread -- from the ptracer's perspective, it
1897 appears as though the execing thread just vanishes.
1898 Until we reap all other threads except the leader and the
1899 execing thread, the leader will be zombie, and the
1900 execing thread will be in `D (disc sleep)'. As soon as
1901 all other threads are reaped, the execing thread changes
1902 it's tid to the tgid, and the previous (zombie) leader
1903 vanishes, giving place to the "new" leader. We could try
1904 distinguishing the exit and exec cases, by waiting once
1905 more, and seeing if something comes out, but it doesn't
1906 sound useful. The previous leader _does_ go away, and
1907 we'll re-add the new one once we see the exec event
1908 (which is just the same as what would happen if the
1909 previous leader did exit voluntarily before some other
1910 thread execs). */
1911
1912 if (debug_threads)
1913 debug_printf ("CZL: Thread group leader %d zombie "
1914 "(it exited, or another thread execd).\n",
1915 leader_pid);
1916
1917 delete_lwp (leader_lp);
1918 }
1919 });
1920 }
1921
1922 /* Callback for `find_thread'. Returns the first LWP that is not
1923 stopped. */
1924
1925 static bool
1926 not_stopped_callback (thread_info *thread, ptid_t filter)
1927 {
1928 if (!thread->id.matches (filter))
1929 return false;
1930
1931 lwp_info *lwp = get_thread_lwp (thread);
1932
1933 return !lwp->stopped;
1934 }
1935
1936 /* Increment LWP's suspend count. */
1937
1938 static void
1939 lwp_suspended_inc (struct lwp_info *lwp)
1940 {
1941 lwp->suspended++;
1942
1943 if (debug_threads && lwp->suspended > 4)
1944 {
1945 struct thread_info *thread = get_lwp_thread (lwp);
1946
1947 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1948 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1949 }
1950 }
1951
1952 /* Decrement LWP's suspend count. */
1953
1954 static void
1955 lwp_suspended_decr (struct lwp_info *lwp)
1956 {
1957 lwp->suspended--;
1958
1959 if (lwp->suspended < 0)
1960 {
1961 struct thread_info *thread = get_lwp_thread (lwp);
1962
1963 internal_error (__FILE__, __LINE__,
1964 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1965 lwp->suspended);
1966 }
1967 }
1968
1969 /* This function should only be called if the LWP got a SIGTRAP.
1970
1971 Handle any tracepoint steps or hits. Return true if a tracepoint
1972 event was handled, 0 otherwise. */
1973
1974 static int
1975 handle_tracepoints (struct lwp_info *lwp)
1976 {
1977 struct thread_info *tinfo = get_lwp_thread (lwp);
1978 int tpoint_related_event = 0;
1979
1980 gdb_assert (lwp->suspended == 0);
1981
1982 /* If this tracepoint hit causes a tracing stop, we'll immediately
1983 uninsert tracepoints. To do this, we temporarily pause all
1984 threads, unpatch away, and then unpause threads. We need to make
1985 sure the unpausing doesn't resume LWP too. */
1986 lwp_suspended_inc (lwp);
1987
1988 /* And we need to be sure that any all-threads-stopping doesn't try
1989 to move threads out of the jump pads, as it could deadlock the
1990 inferior (LWP could be in the jump pad, maybe even holding the
1991 lock.) */
1992
1993 /* Do any necessary step collect actions. */
1994 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1995
1996 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1997
1998 /* See if we just hit a tracepoint and do its main collect
1999 actions. */
2000 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2001
2002 lwp_suspended_decr (lwp);
2003
2004 gdb_assert (lwp->suspended == 0);
2005 gdb_assert (!stabilizing_threads
2006 || (lwp->collecting_fast_tracepoint
2007 != fast_tpoint_collect_result::not_collecting));
2008
2009 if (tpoint_related_event)
2010 {
2011 if (debug_threads)
2012 debug_printf ("got a tracepoint event\n");
2013 return 1;
2014 }
2015
2016 return 0;
2017 }
2018
2019 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2020 collection status. */
2021
2022 static fast_tpoint_collect_result
2023 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2024 struct fast_tpoint_collect_status *status)
2025 {
2026 CORE_ADDR thread_area;
2027 struct thread_info *thread = get_lwp_thread (lwp);
2028
2029 if (the_low_target.get_thread_area == NULL)
2030 return fast_tpoint_collect_result::not_collecting;
2031
2032 /* Get the thread area address. This is used to recognize which
2033 thread is which when tracing with the in-process agent library.
2034 We don't read anything from the address, and treat it as opaque;
2035 it's the address itself that we assume is unique per-thread. */
2036 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2037 return fast_tpoint_collect_result::not_collecting;
2038
2039 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2040 }
2041
2042 /* The reason we resume in the caller, is because we want to be able
2043 to pass lwp->status_pending as WSTAT, and we need to clear
2044 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2045 refuses to resume. */
2046
2047 static int
2048 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2049 {
2050 struct thread_info *saved_thread;
2051
2052 saved_thread = current_thread;
2053 current_thread = get_lwp_thread (lwp);
2054
2055 if ((wstat == NULL
2056 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2057 && supports_fast_tracepoints ()
2058 && agent_loaded_p ())
2059 {
2060 struct fast_tpoint_collect_status status;
2061
2062 if (debug_threads)
2063 debug_printf ("Checking whether LWP %ld needs to move out of the "
2064 "jump pad.\n",
2065 lwpid_of (current_thread));
2066
2067 fast_tpoint_collect_result r
2068 = linux_fast_tracepoint_collecting (lwp, &status);
2069
2070 if (wstat == NULL
2071 || (WSTOPSIG (*wstat) != SIGILL
2072 && WSTOPSIG (*wstat) != SIGFPE
2073 && WSTOPSIG (*wstat) != SIGSEGV
2074 && WSTOPSIG (*wstat) != SIGBUS))
2075 {
2076 lwp->collecting_fast_tracepoint = r;
2077
2078 if (r != fast_tpoint_collect_result::not_collecting)
2079 {
2080 if (r == fast_tpoint_collect_result::before_insn
2081 && lwp->exit_jump_pad_bkpt == NULL)
2082 {
2083 /* Haven't executed the original instruction yet.
2084 Set breakpoint there, and wait till it's hit,
2085 then single-step until exiting the jump pad. */
2086 lwp->exit_jump_pad_bkpt
2087 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2088 }
2089
2090 if (debug_threads)
2091 debug_printf ("Checking whether LWP %ld needs to move out of "
2092 "the jump pad...it does\n",
2093 lwpid_of (current_thread));
2094 current_thread = saved_thread;
2095
2096 return 1;
2097 }
2098 }
2099 else
2100 {
2101 /* If we get a synchronous signal while collecting, *and*
2102 while executing the (relocated) original instruction,
2103 reset the PC to point at the tpoint address, before
2104 reporting to GDB. Otherwise, it's an IPA lib bug: just
2105 report the signal to GDB, and pray for the best. */
2106
2107 lwp->collecting_fast_tracepoint
2108 = fast_tpoint_collect_result::not_collecting;
2109
2110 if (r != fast_tpoint_collect_result::not_collecting
2111 && (status.adjusted_insn_addr <= lwp->stop_pc
2112 && lwp->stop_pc < status.adjusted_insn_addr_end))
2113 {
2114 siginfo_t info;
2115 struct regcache *regcache;
2116
2117 /* The si_addr on a few signals references the address
2118 of the faulting instruction. Adjust that as
2119 well. */
2120 if ((WSTOPSIG (*wstat) == SIGILL
2121 || WSTOPSIG (*wstat) == SIGFPE
2122 || WSTOPSIG (*wstat) == SIGBUS
2123 || WSTOPSIG (*wstat) == SIGSEGV)
2124 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2125 (PTRACE_TYPE_ARG3) 0, &info) == 0
2126 /* Final check just to make sure we don't clobber
2127 the siginfo of non-kernel-sent signals. */
2128 && (uintptr_t) info.si_addr == lwp->stop_pc)
2129 {
2130 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2131 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2132 (PTRACE_TYPE_ARG3) 0, &info);
2133 }
2134
2135 regcache = get_thread_regcache (current_thread, 1);
2136 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2137 lwp->stop_pc = status.tpoint_addr;
2138
2139 /* Cancel any fast tracepoint lock this thread was
2140 holding. */
2141 force_unlock_trace_buffer ();
2142 }
2143
2144 if (lwp->exit_jump_pad_bkpt != NULL)
2145 {
2146 if (debug_threads)
2147 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2148 "stopping all threads momentarily.\n");
2149
2150 stop_all_lwps (1, lwp);
2151
2152 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2153 lwp->exit_jump_pad_bkpt = NULL;
2154
2155 unstop_all_lwps (1, lwp);
2156
2157 gdb_assert (lwp->suspended >= 0);
2158 }
2159 }
2160 }
2161
2162 if (debug_threads)
2163 debug_printf ("Checking whether LWP %ld needs to move out of the "
2164 "jump pad...no\n",
2165 lwpid_of (current_thread));
2166
2167 current_thread = saved_thread;
2168 return 0;
2169 }
2170
2171 /* Enqueue one signal in the "signals to report later when out of the
2172 jump pad" list. */
2173
2174 static void
2175 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2176 {
2177 struct pending_signals *p_sig;
2178 struct thread_info *thread = get_lwp_thread (lwp);
2179
2180 if (debug_threads)
2181 debug_printf ("Deferring signal %d for LWP %ld.\n",
2182 WSTOPSIG (*wstat), lwpid_of (thread));
2183
2184 if (debug_threads)
2185 {
2186 struct pending_signals *sig;
2187
2188 for (sig = lwp->pending_signals_to_report;
2189 sig != NULL;
2190 sig = sig->prev)
2191 debug_printf (" Already queued %d\n",
2192 sig->signal);
2193
2194 debug_printf (" (no more currently queued signals)\n");
2195 }
2196
2197 /* Don't enqueue non-RT signals if they are already in the deferred
2198 queue. (SIGSTOP being the easiest signal to see ending up here
2199 twice) */
2200 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2201 {
2202 struct pending_signals *sig;
2203
2204 for (sig = lwp->pending_signals_to_report;
2205 sig != NULL;
2206 sig = sig->prev)
2207 {
2208 if (sig->signal == WSTOPSIG (*wstat))
2209 {
2210 if (debug_threads)
2211 debug_printf ("Not requeuing already queued non-RT signal %d"
2212 " for LWP %ld\n",
2213 sig->signal,
2214 lwpid_of (thread));
2215 return;
2216 }
2217 }
2218 }
2219
2220 p_sig = XCNEW (struct pending_signals);
2221 p_sig->prev = lwp->pending_signals_to_report;
2222 p_sig->signal = WSTOPSIG (*wstat);
2223
2224 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2225 &p_sig->info);
2226
2227 lwp->pending_signals_to_report = p_sig;
2228 }
2229
2230 /* Dequeue one signal from the "signals to report later when out of
2231 the jump pad" list. */
2232
2233 static int
2234 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2235 {
2236 struct thread_info *thread = get_lwp_thread (lwp);
2237
2238 if (lwp->pending_signals_to_report != NULL)
2239 {
2240 struct pending_signals **p_sig;
2241
2242 p_sig = &lwp->pending_signals_to_report;
2243 while ((*p_sig)->prev != NULL)
2244 p_sig = &(*p_sig)->prev;
2245
2246 *wstat = W_STOPCODE ((*p_sig)->signal);
2247 if ((*p_sig)->info.si_signo != 0)
2248 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2249 &(*p_sig)->info);
2250 free (*p_sig);
2251 *p_sig = NULL;
2252
2253 if (debug_threads)
2254 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2255 WSTOPSIG (*wstat), lwpid_of (thread));
2256
2257 if (debug_threads)
2258 {
2259 struct pending_signals *sig;
2260
2261 for (sig = lwp->pending_signals_to_report;
2262 sig != NULL;
2263 sig = sig->prev)
2264 debug_printf (" Still queued %d\n",
2265 sig->signal);
2266
2267 debug_printf (" (no more queued signals)\n");
2268 }
2269
2270 return 1;
2271 }
2272
2273 return 0;
2274 }
2275
2276 /* Fetch the possibly triggered data watchpoint info and store it in
2277 CHILD.
2278
2279 On some archs, like x86, that use debug registers to set
2280 watchpoints, it's possible that the way to know which watched
2281 address trapped, is to check the register that is used to select
2282 which address to watch. Problem is, between setting the watchpoint
2283 and reading back which data address trapped, the user may change
2284 the set of watchpoints, and, as a consequence, GDB changes the
2285 debug registers in the inferior. To avoid reading back a stale
2286 stopped-data-address when that happens, we cache in LP the fact
2287 that a watchpoint trapped, and the corresponding data address, as
2288 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2289 registers meanwhile, we have the cached data we can rely on. */
2290
2291 static int
2292 check_stopped_by_watchpoint (struct lwp_info *child)
2293 {
2294 if (the_low_target.stopped_by_watchpoint != NULL)
2295 {
2296 struct thread_info *saved_thread;
2297
2298 saved_thread = current_thread;
2299 current_thread = get_lwp_thread (child);
2300
2301 if (the_low_target.stopped_by_watchpoint ())
2302 {
2303 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2304
2305 if (the_low_target.stopped_data_address != NULL)
2306 child->stopped_data_address
2307 = the_low_target.stopped_data_address ();
2308 else
2309 child->stopped_data_address = 0;
2310 }
2311
2312 current_thread = saved_thread;
2313 }
2314
2315 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2316 }
2317
2318 /* Return the ptrace options that we want to try to enable. */
2319
2320 static int
2321 linux_low_ptrace_options (int attached)
2322 {
2323 client_state &cs = get_client_state ();
2324 int options = 0;
2325
2326 if (!attached)
2327 options |= PTRACE_O_EXITKILL;
2328
2329 if (cs.report_fork_events)
2330 options |= PTRACE_O_TRACEFORK;
2331
2332 if (cs.report_vfork_events)
2333 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2334
2335 if (cs.report_exec_events)
2336 options |= PTRACE_O_TRACEEXEC;
2337
2338 options |= PTRACE_O_TRACESYSGOOD;
2339
2340 return options;
2341 }
2342
2343 /* Do low-level handling of the event, and check if we should go on
2344 and pass it to caller code. Return the affected lwp if we are, or
2345 NULL otherwise. */
2346
2347 static struct lwp_info *
2348 linux_low_filter_event (int lwpid, int wstat)
2349 {
2350 client_state &cs = get_client_state ();
2351 struct lwp_info *child;
2352 struct thread_info *thread;
2353 int have_stop_pc = 0;
2354
2355 child = find_lwp_pid (ptid_t (lwpid));
2356
2357 /* Check for stop events reported by a process we didn't already
2358 know about - anything not already in our LWP list.
2359
2360 If we're expecting to receive stopped processes after
2361 fork, vfork, and clone events, then we'll just add the
2362 new one to our list and go back to waiting for the event
2363 to be reported - the stopped process might be returned
2364 from waitpid before or after the event is.
2365
2366 But note the case of a non-leader thread exec'ing after the
2367 leader having exited, and gone from our lists (because
2368 check_zombie_leaders deleted it). The non-leader thread
2369 changes its tid to the tgid. */
2370
2371 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2372 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2373 {
2374 ptid_t child_ptid;
2375
2376 /* A multi-thread exec after we had seen the leader exiting. */
2377 if (debug_threads)
2378 {
2379 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2380 "after exec.\n", lwpid);
2381 }
2382
2383 child_ptid = ptid_t (lwpid, lwpid, 0);
2384 child = add_lwp (child_ptid);
2385 child->stopped = 1;
2386 current_thread = child->thread;
2387 }
2388
2389 /* If we didn't find a process, one of two things presumably happened:
2390 - A process we started and then detached from has exited. Ignore it.
2391 - A process we are controlling has forked and the new child's stop
2392 was reported to us by the kernel. Save its PID. */
2393 if (child == NULL && WIFSTOPPED (wstat))
2394 {
2395 add_to_pid_list (&stopped_pids, lwpid, wstat);
2396 return NULL;
2397 }
2398 else if (child == NULL)
2399 return NULL;
2400
2401 thread = get_lwp_thread (child);
2402
2403 child->stopped = 1;
2404
2405 child->last_status = wstat;
2406
2407 /* Check if the thread has exited. */
2408 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2409 {
2410 if (debug_threads)
2411 debug_printf ("LLFE: %d exited.\n", lwpid);
2412
2413 if (finish_step_over (child))
2414 {
2415 /* Unsuspend all other LWPs, and set them back running again. */
2416 unsuspend_all_lwps (child);
2417 }
2418
2419 /* If there is at least one more LWP, then the exit signal was
2420 not the end of the debugged application and should be
2421 ignored, unless GDB wants to hear about thread exits. */
2422 if (cs.report_thread_events
2423 || last_thread_of_process_p (pid_of (thread)))
2424 {
2425 /* Since events are serialized to GDB core, and we can't
2426 report this one right now. Leave the status pending for
2427 the next time we're able to report it. */
2428 mark_lwp_dead (child, wstat);
2429 return child;
2430 }
2431 else
2432 {
2433 delete_lwp (child);
2434 return NULL;
2435 }
2436 }
2437
2438 gdb_assert (WIFSTOPPED (wstat));
2439
2440 if (WIFSTOPPED (wstat))
2441 {
2442 struct process_info *proc;
2443
2444 /* Architecture-specific setup after inferior is running. */
2445 proc = find_process_pid (pid_of (thread));
2446 if (proc->tdesc == NULL)
2447 {
2448 if (proc->attached)
2449 {
2450 /* This needs to happen after we have attached to the
2451 inferior and it is stopped for the first time, but
2452 before we access any inferior registers. */
2453 linux_arch_setup_thread (thread);
2454 }
2455 else
2456 {
2457 /* The process is started, but GDBserver will do
2458 architecture-specific setup after the program stops at
2459 the first instruction. */
2460 child->status_pending_p = 1;
2461 child->status_pending = wstat;
2462 return child;
2463 }
2464 }
2465 }
2466
2467 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2468 {
2469 struct process_info *proc = find_process_pid (pid_of (thread));
2470 int options = linux_low_ptrace_options (proc->attached);
2471
2472 linux_enable_event_reporting (lwpid, options);
2473 child->must_set_ptrace_flags = 0;
2474 }
2475
2476 /* Always update syscall_state, even if it will be filtered later. */
2477 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2478 {
2479 child->syscall_state
2480 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2481 ? TARGET_WAITKIND_SYSCALL_RETURN
2482 : TARGET_WAITKIND_SYSCALL_ENTRY);
2483 }
2484 else
2485 {
2486 /* Almost all other ptrace-stops are known to be outside of system
2487 calls, with further exceptions in handle_extended_wait. */
2488 child->syscall_state = TARGET_WAITKIND_IGNORE;
2489 }
2490
2491 /* Be careful to not overwrite stop_pc until save_stop_reason is
2492 called. */
2493 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2494 && linux_is_extended_waitstatus (wstat))
2495 {
2496 child->stop_pc = get_pc (child);
2497 if (handle_extended_wait (&child, wstat))
2498 {
2499 /* The event has been handled, so just return without
2500 reporting it. */
2501 return NULL;
2502 }
2503 }
2504
2505 if (linux_wstatus_maybe_breakpoint (wstat))
2506 {
2507 if (save_stop_reason (child))
2508 have_stop_pc = 1;
2509 }
2510
2511 if (!have_stop_pc)
2512 child->stop_pc = get_pc (child);
2513
2514 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2515 && child->stop_expected)
2516 {
2517 if (debug_threads)
2518 debug_printf ("Expected stop.\n");
2519 child->stop_expected = 0;
2520
2521 if (thread->last_resume_kind == resume_stop)
2522 {
2523 /* We want to report the stop to the core. Treat the
2524 SIGSTOP as a normal event. */
2525 if (debug_threads)
2526 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2527 target_pid_to_str (ptid_of (thread)));
2528 }
2529 else if (stopping_threads != NOT_STOPPING_THREADS)
2530 {
2531 /* Stopping threads. We don't want this SIGSTOP to end up
2532 pending. */
2533 if (debug_threads)
2534 debug_printf ("LLW: SIGSTOP caught for %s "
2535 "while stopping threads.\n",
2536 target_pid_to_str (ptid_of (thread)));
2537 return NULL;
2538 }
2539 else
2540 {
2541 /* This is a delayed SIGSTOP. Filter out the event. */
2542 if (debug_threads)
2543 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2544 child->stepping ? "step" : "continue",
2545 target_pid_to_str (ptid_of (thread)));
2546
2547 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2548 return NULL;
2549 }
2550 }
2551
2552 child->status_pending_p = 1;
2553 child->status_pending = wstat;
2554 return child;
2555 }
2556
2557 /* Return true if THREAD is doing hardware single step. */
2558
2559 static int
2560 maybe_hw_step (struct thread_info *thread)
2561 {
2562 if (can_hardware_single_step ())
2563 return 1;
2564 else
2565 {
2566 /* GDBserver must insert single-step breakpoint for software
2567 single step. */
2568 gdb_assert (has_single_step_breakpoints (thread));
2569 return 0;
2570 }
2571 }
2572
2573 /* Resume LWPs that are currently stopped without any pending status
2574 to report, but are resumed from the core's perspective. */
2575
2576 static void
2577 resume_stopped_resumed_lwps (thread_info *thread)
2578 {
2579 struct lwp_info *lp = get_thread_lwp (thread);
2580
2581 if (lp->stopped
2582 && !lp->suspended
2583 && !lp->status_pending_p
2584 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2585 {
2586 int step = 0;
2587
2588 if (thread->last_resume_kind == resume_step)
2589 step = maybe_hw_step (thread);
2590
2591 if (debug_threads)
2592 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2593 target_pid_to_str (ptid_of (thread)),
2594 paddress (lp->stop_pc),
2595 step);
2596
2597 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2598 }
2599 }
2600
2601 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2602 match FILTER_PTID (leaving others pending). The PTIDs can be:
2603 minus_one_ptid, to specify any child; a pid PTID, specifying all
2604 lwps of a thread group; or a PTID representing a single lwp. Store
2605 the stop status through the status pointer WSTAT. OPTIONS is
2606 passed to the waitpid call. Return 0 if no event was found and
2607 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2608 was found. Return the PID of the stopped child otherwise. */
2609
2610 static int
2611 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2612 int *wstatp, int options)
2613 {
2614 struct thread_info *event_thread;
2615 struct lwp_info *event_child, *requested_child;
2616 sigset_t block_mask, prev_mask;
2617
2618 retry:
2619 /* N.B. event_thread points to the thread_info struct that contains
2620 event_child. Keep them in sync. */
2621 event_thread = NULL;
2622 event_child = NULL;
2623 requested_child = NULL;
2624
2625 /* Check for a lwp with a pending status. */
2626
2627 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2628 {
2629 event_thread = find_thread_in_random ([&] (thread_info *thread)
2630 {
2631 return status_pending_p_callback (thread, filter_ptid);
2632 });
2633
2634 if (event_thread != NULL)
2635 event_child = get_thread_lwp (event_thread);
2636 if (debug_threads && event_thread)
2637 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2638 }
2639 else if (filter_ptid != null_ptid)
2640 {
2641 requested_child = find_lwp_pid (filter_ptid);
2642
2643 if (stopping_threads == NOT_STOPPING_THREADS
2644 && requested_child->status_pending_p
2645 && (requested_child->collecting_fast_tracepoint
2646 != fast_tpoint_collect_result::not_collecting))
2647 {
2648 enqueue_one_deferred_signal (requested_child,
2649 &requested_child->status_pending);
2650 requested_child->status_pending_p = 0;
2651 requested_child->status_pending = 0;
2652 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2653 }
2654
2655 if (requested_child->suspended
2656 && requested_child->status_pending_p)
2657 {
2658 internal_error (__FILE__, __LINE__,
2659 "requesting an event out of a"
2660 " suspended child?");
2661 }
2662
2663 if (requested_child->status_pending_p)
2664 {
2665 event_child = requested_child;
2666 event_thread = get_lwp_thread (event_child);
2667 }
2668 }
2669
2670 if (event_child != NULL)
2671 {
2672 if (debug_threads)
2673 debug_printf ("Got an event from pending child %ld (%04x)\n",
2674 lwpid_of (event_thread), event_child->status_pending);
2675 *wstatp = event_child->status_pending;
2676 event_child->status_pending_p = 0;
2677 event_child->status_pending = 0;
2678 current_thread = event_thread;
2679 return lwpid_of (event_thread);
2680 }
2681
2682 /* But if we don't find a pending event, we'll have to wait.
2683
2684 We only enter this loop if no process has a pending wait status.
2685 Thus any action taken in response to a wait status inside this
2686 loop is responding as soon as we detect the status, not after any
2687 pending events. */
2688
2689 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2690 all signals while here. */
2691 sigfillset (&block_mask);
2692 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2693
2694 /* Always pull all events out of the kernel. We'll randomly select
2695 an event LWP out of all that have events, to prevent
2696 starvation. */
2697 while (event_child == NULL)
2698 {
2699 pid_t ret = 0;
2700
2701 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2702 quirks:
2703
2704 - If the thread group leader exits while other threads in the
2705 thread group still exist, waitpid(TGID, ...) hangs. That
2706 waitpid won't return an exit status until the other threads
2707 in the group are reaped.
2708
2709 - When a non-leader thread execs, that thread just vanishes
2710 without reporting an exit (so we'd hang if we waited for it
2711 explicitly in that case). The exec event is reported to
2712 the TGID pid. */
2713 errno = 0;
2714 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2715
2716 if (debug_threads)
2717 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2718 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2719
2720 if (ret > 0)
2721 {
2722 if (debug_threads)
2723 {
2724 debug_printf ("LLW: waitpid %ld received %s\n",
2725 (long) ret, status_to_str (*wstatp));
2726 }
2727
2728 /* Filter all events. IOW, leave all events pending. We'll
2729 randomly select an event LWP out of all that have events
2730 below. */
2731 linux_low_filter_event (ret, *wstatp);
2732 /* Retry until nothing comes out of waitpid. A single
2733 SIGCHLD can indicate more than one child stopped. */
2734 continue;
2735 }
2736
2737 /* Now that we've pulled all events out of the kernel, resume
2738 LWPs that don't have an interesting event to report. */
2739 if (stopping_threads == NOT_STOPPING_THREADS)
2740 for_each_thread (resume_stopped_resumed_lwps);
2741
2742 /* ... and find an LWP with a status to report to the core, if
2743 any. */
2744 event_thread = find_thread_in_random ([&] (thread_info *thread)
2745 {
2746 return status_pending_p_callback (thread, filter_ptid);
2747 });
2748
2749 if (event_thread != NULL)
2750 {
2751 event_child = get_thread_lwp (event_thread);
2752 *wstatp = event_child->status_pending;
2753 event_child->status_pending_p = 0;
2754 event_child->status_pending = 0;
2755 break;
2756 }
2757
2758 /* Check for zombie thread group leaders. Those can't be reaped
2759 until all other threads in the thread group are. */
2760 check_zombie_leaders ();
2761
2762 auto not_stopped = [&] (thread_info *thread)
2763 {
2764 return not_stopped_callback (thread, wait_ptid);
2765 };
2766
2767 /* If there are no resumed children left in the set of LWPs we
2768 want to wait for, bail. We can't just block in
2769 waitpid/sigsuspend, because lwps might have been left stopped
2770 in trace-stop state, and we'd be stuck forever waiting for
2771 their status to change (which would only happen if we resumed
2772 them). Even if WNOHANG is set, this return code is preferred
2773 over 0 (below), as it is more detailed. */
2774 if (find_thread (not_stopped) == NULL)
2775 {
2776 if (debug_threads)
2777 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2778 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2779 return -1;
2780 }
2781
2782 /* No interesting event to report to the caller. */
2783 if ((options & WNOHANG))
2784 {
2785 if (debug_threads)
2786 debug_printf ("WNOHANG set, no event found\n");
2787
2788 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2789 return 0;
2790 }
2791
2792 /* Block until we get an event reported with SIGCHLD. */
2793 if (debug_threads)
2794 debug_printf ("sigsuspend'ing\n");
2795
2796 sigsuspend (&prev_mask);
2797 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2798 goto retry;
2799 }
2800
2801 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2802
2803 current_thread = event_thread;
2804
2805 return lwpid_of (event_thread);
2806 }
2807
2808 /* Wait for an event from child(ren) PTID. PTIDs can be:
2809 minus_one_ptid, to specify any child; a pid PTID, specifying all
2810 lwps of a thread group; or a PTID representing a single lwp. Store
2811 the stop status through the status pointer WSTAT. OPTIONS is
2812 passed to the waitpid call. Return 0 if no event was found and
2813 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2814 was found. Return the PID of the stopped child otherwise. */
2815
2816 static int
2817 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2818 {
2819 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2820 }
2821
2822 /* Select one LWP out of those that have events pending. */
2823
2824 static void
2825 select_event_lwp (struct lwp_info **orig_lp)
2826 {
2827 struct thread_info *event_thread = NULL;
2828
2829 /* In all-stop, give preference to the LWP that is being
2830 single-stepped. There will be at most one, and it's the LWP that
2831 the core is most interested in. If we didn't do this, then we'd
2832 have to handle pending step SIGTRAPs somehow in case the core
2833 later continues the previously-stepped thread, otherwise we'd
2834 report the pending SIGTRAP, and the core, not having stepped the
2835 thread, wouldn't understand what the trap was for, and therefore
2836 would report it to the user as a random signal. */
2837 if (!non_stop)
2838 {
2839 event_thread = find_thread ([] (thread_info *thread)
2840 {
2841 lwp_info *lp = get_thread_lwp (thread);
2842
2843 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2844 && thread->last_resume_kind == resume_step
2845 && lp->status_pending_p);
2846 });
2847
2848 if (event_thread != NULL)
2849 {
2850 if (debug_threads)
2851 debug_printf ("SEL: Select single-step %s\n",
2852 target_pid_to_str (ptid_of (event_thread)));
2853 }
2854 }
2855 if (event_thread == NULL)
2856 {
2857 /* No single-stepping LWP. Select one at random, out of those
2858 which have had events. */
2859
2860 event_thread = find_thread_in_random ([&] (thread_info *thread)
2861 {
2862 lwp_info *lp = get_thread_lwp (thread);
2863
2864 /* Only resumed LWPs that have an event pending. */
2865 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2866 && lp->status_pending_p);
2867 });
2868 }
2869
2870 if (event_thread != NULL)
2871 {
2872 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2873
2874 /* Switch the event LWP. */
2875 *orig_lp = event_lp;
2876 }
2877 }
2878
2879 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2880 NULL. */
2881
2882 static void
2883 unsuspend_all_lwps (struct lwp_info *except)
2884 {
2885 for_each_thread ([&] (thread_info *thread)
2886 {
2887 lwp_info *lwp = get_thread_lwp (thread);
2888
2889 if (lwp != except)
2890 lwp_suspended_decr (lwp);
2891 });
2892 }
2893
2894 static void move_out_of_jump_pad_callback (thread_info *thread);
2895 static bool stuck_in_jump_pad_callback (thread_info *thread);
2896 static bool lwp_running (thread_info *thread);
2897 static ptid_t linux_wait_1 (ptid_t ptid,
2898 struct target_waitstatus *ourstatus,
2899 int target_options);
2900
2901 /* Stabilize threads (move out of jump pads).
2902
2903 If a thread is midway collecting a fast tracepoint, we need to
2904 finish the collection and move it out of the jump pad before
2905 reporting the signal.
2906
2907 This avoids recursion while collecting (when a signal arrives
2908 midway, and the signal handler itself collects), which would trash
2909 the trace buffer. In case the user set a breakpoint in a signal
2910 handler, this avoids the backtrace showing the jump pad, etc..
2911 Most importantly, there are certain things we can't do safely if
2912 threads are stopped in a jump pad (or in its callee's). For
2913 example:
2914
2915 - starting a new trace run. A thread still collecting the
2916 previous run, could trash the trace buffer when resumed. The trace
2917 buffer control structures would have been reset but the thread had
2918 no way to tell. The thread could even midway memcpy'ing to the
2919 buffer, which would mean that when resumed, it would clobber the
2920 trace buffer that had been set for a new run.
2921
2922 - we can't rewrite/reuse the jump pads for new tracepoints
2923 safely. Say you do tstart while a thread is stopped midway while
2924 collecting. When the thread is later resumed, it finishes the
2925 collection, and returns to the jump pad, to execute the original
2926 instruction that was under the tracepoint jump at the time the
2927 older run had been started. If the jump pad had been rewritten
2928 since for something else in the new run, the thread would now
2929 execute the wrong / random instructions. */
2930
2931 static void
2932 linux_stabilize_threads (void)
2933 {
2934 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2935
2936 if (thread_stuck != NULL)
2937 {
2938 if (debug_threads)
2939 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2940 lwpid_of (thread_stuck));
2941 return;
2942 }
2943
2944 thread_info *saved_thread = current_thread;
2945
2946 stabilizing_threads = 1;
2947
2948 /* Kick 'em all. */
2949 for_each_thread (move_out_of_jump_pad_callback);
2950
2951 /* Loop until all are stopped out of the jump pads. */
2952 while (find_thread (lwp_running) != NULL)
2953 {
2954 struct target_waitstatus ourstatus;
2955 struct lwp_info *lwp;
2956 int wstat;
2957
2958 /* Note that we go through the full wait even loop. While
2959 moving threads out of jump pad, we need to be able to step
2960 over internal breakpoints and such. */
2961 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2962
2963 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2964 {
2965 lwp = get_thread_lwp (current_thread);
2966
2967 /* Lock it. */
2968 lwp_suspended_inc (lwp);
2969
2970 if (ourstatus.value.sig != GDB_SIGNAL_0
2971 || current_thread->last_resume_kind == resume_stop)
2972 {
2973 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2974 enqueue_one_deferred_signal (lwp, &wstat);
2975 }
2976 }
2977 }
2978
2979 unsuspend_all_lwps (NULL);
2980
2981 stabilizing_threads = 0;
2982
2983 current_thread = saved_thread;
2984
2985 if (debug_threads)
2986 {
2987 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2988
2989 if (thread_stuck != NULL)
2990 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2991 lwpid_of (thread_stuck));
2992 }
2993 }
2994
2995 /* Convenience function that is called when the kernel reports an
2996 event that is not passed out to GDB. */
2997
2998 static ptid_t
2999 ignore_event (struct target_waitstatus *ourstatus)
3000 {
3001 /* If we got an event, there may still be others, as a single
3002 SIGCHLD can indicate more than one child stopped. This forces
3003 another target_wait call. */
3004 async_file_mark ();
3005
3006 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3007 return null_ptid;
3008 }
3009
3010 /* Convenience function that is called when the kernel reports an exit
3011 event. This decides whether to report the event to GDB as a
3012 process exit event, a thread exit event, or to suppress the
3013 event. */
3014
3015 static ptid_t
3016 filter_exit_event (struct lwp_info *event_child,
3017 struct target_waitstatus *ourstatus)
3018 {
3019 client_state &cs = get_client_state ();
3020 struct thread_info *thread = get_lwp_thread (event_child);
3021 ptid_t ptid = ptid_of (thread);
3022
3023 if (!last_thread_of_process_p (pid_of (thread)))
3024 {
3025 if (cs.report_thread_events)
3026 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3027 else
3028 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3029
3030 delete_lwp (event_child);
3031 }
3032 return ptid;
3033 }
3034
3035 /* Returns 1 if GDB is interested in any event_child syscalls. */
3036
3037 static int
3038 gdb_catching_syscalls_p (struct lwp_info *event_child)
3039 {
3040 struct thread_info *thread = get_lwp_thread (event_child);
3041 struct process_info *proc = get_thread_process (thread);
3042
3043 return !proc->syscalls_to_catch.empty ();
3044 }
3045
3046 /* Returns 1 if GDB is interested in the event_child syscall.
3047 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3048
3049 static int
3050 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3051 {
3052 int sysno;
3053 struct thread_info *thread = get_lwp_thread (event_child);
3054 struct process_info *proc = get_thread_process (thread);
3055
3056 if (proc->syscalls_to_catch.empty ())
3057 return 0;
3058
3059 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3060 return 1;
3061
3062 get_syscall_trapinfo (event_child, &sysno);
3063
3064 for (int iter : proc->syscalls_to_catch)
3065 if (iter == sysno)
3066 return 1;
3067
3068 return 0;
3069 }
3070
3071 /* Wait for process, returns status. */
3072
3073 static ptid_t
3074 linux_wait_1 (ptid_t ptid,
3075 struct target_waitstatus *ourstatus, int target_options)
3076 {
3077 client_state &cs = get_client_state ();
3078 int w;
3079 struct lwp_info *event_child;
3080 int options;
3081 int pid;
3082 int step_over_finished;
3083 int bp_explains_trap;
3084 int maybe_internal_trap;
3085 int report_to_gdb;
3086 int trace_event;
3087 int in_step_range;
3088 int any_resumed;
3089
3090 if (debug_threads)
3091 {
3092 debug_enter ();
3093 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3094 }
3095
3096 /* Translate generic target options into linux options. */
3097 options = __WALL;
3098 if (target_options & TARGET_WNOHANG)
3099 options |= WNOHANG;
3100
3101 bp_explains_trap = 0;
3102 trace_event = 0;
3103 in_step_range = 0;
3104 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3105
3106 auto status_pending_p_any = [&] (thread_info *thread)
3107 {
3108 return status_pending_p_callback (thread, minus_one_ptid);
3109 };
3110
3111 auto not_stopped = [&] (thread_info *thread)
3112 {
3113 return not_stopped_callback (thread, minus_one_ptid);
3114 };
3115
3116 /* Find a resumed LWP, if any. */
3117 if (find_thread (status_pending_p_any) != NULL)
3118 any_resumed = 1;
3119 else if (find_thread (not_stopped) != NULL)
3120 any_resumed = 1;
3121 else
3122 any_resumed = 0;
3123
3124 if (step_over_bkpt == null_ptid)
3125 pid = linux_wait_for_event (ptid, &w, options);
3126 else
3127 {
3128 if (debug_threads)
3129 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3130 target_pid_to_str (step_over_bkpt));
3131 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3132 }
3133
3134 if (pid == 0 || (pid == -1 && !any_resumed))
3135 {
3136 gdb_assert (target_options & TARGET_WNOHANG);
3137
3138 if (debug_threads)
3139 {
3140 debug_printf ("linux_wait_1 ret = null_ptid, "
3141 "TARGET_WAITKIND_IGNORE\n");
3142 debug_exit ();
3143 }
3144
3145 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3146 return null_ptid;
3147 }
3148 else if (pid == -1)
3149 {
3150 if (debug_threads)
3151 {
3152 debug_printf ("linux_wait_1 ret = null_ptid, "
3153 "TARGET_WAITKIND_NO_RESUMED\n");
3154 debug_exit ();
3155 }
3156
3157 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3158 return null_ptid;
3159 }
3160
3161 event_child = get_thread_lwp (current_thread);
3162
3163 /* linux_wait_for_event only returns an exit status for the last
3164 child of a process. Report it. */
3165 if (WIFEXITED (w) || WIFSIGNALED (w))
3166 {
3167 if (WIFEXITED (w))
3168 {
3169 ourstatus->kind = TARGET_WAITKIND_EXITED;
3170 ourstatus->value.integer = WEXITSTATUS (w);
3171
3172 if (debug_threads)
3173 {
3174 debug_printf ("linux_wait_1 ret = %s, exited with "
3175 "retcode %d\n",
3176 target_pid_to_str (ptid_of (current_thread)),
3177 WEXITSTATUS (w));
3178 debug_exit ();
3179 }
3180 }
3181 else
3182 {
3183 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3184 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3185
3186 if (debug_threads)
3187 {
3188 debug_printf ("linux_wait_1 ret = %s, terminated with "
3189 "signal %d\n",
3190 target_pid_to_str (ptid_of (current_thread)),
3191 WTERMSIG (w));
3192 debug_exit ();
3193 }
3194 }
3195
3196 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3197 return filter_exit_event (event_child, ourstatus);
3198
3199 return ptid_of (current_thread);
3200 }
3201
3202 /* If step-over executes a breakpoint instruction, in the case of a
3203 hardware single step it means a gdb/gdbserver breakpoint had been
3204 planted on top of a permanent breakpoint, in the case of a software
3205 single step it may just mean that gdbserver hit the reinsert breakpoint.
3206 The PC has been adjusted by save_stop_reason to point at
3207 the breakpoint address.
3208 So in the case of the hardware single step advance the PC manually
3209 past the breakpoint and in the case of software single step advance only
3210 if it's not the single_step_breakpoint we are hitting.
3211 This avoids that a program would keep trapping a permanent breakpoint
3212 forever. */
3213 if (step_over_bkpt != null_ptid
3214 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3215 && (event_child->stepping
3216 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3217 {
3218 int increment_pc = 0;
3219 int breakpoint_kind = 0;
3220 CORE_ADDR stop_pc = event_child->stop_pc;
3221
3222 breakpoint_kind =
3223 the_target->breakpoint_kind_from_current_state (&stop_pc);
3224 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3225
3226 if (debug_threads)
3227 {
3228 debug_printf ("step-over for %s executed software breakpoint\n",
3229 target_pid_to_str (ptid_of (current_thread)));
3230 }
3231
3232 if (increment_pc != 0)
3233 {
3234 struct regcache *regcache
3235 = get_thread_regcache (current_thread, 1);
3236
3237 event_child->stop_pc += increment_pc;
3238 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3239
3240 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3241 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3242 }
3243 }
3244
3245 /* If this event was not handled before, and is not a SIGTRAP, we
3246 report it. SIGILL and SIGSEGV are also treated as traps in case
3247 a breakpoint is inserted at the current PC. If this target does
3248 not support internal breakpoints at all, we also report the
3249 SIGTRAP without further processing; it's of no concern to us. */
3250 maybe_internal_trap
3251 = (supports_breakpoints ()
3252 && (WSTOPSIG (w) == SIGTRAP
3253 || ((WSTOPSIG (w) == SIGILL
3254 || WSTOPSIG (w) == SIGSEGV)
3255 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3256
3257 if (maybe_internal_trap)
3258 {
3259 /* Handle anything that requires bookkeeping before deciding to
3260 report the event or continue waiting. */
3261
3262 /* First check if we can explain the SIGTRAP with an internal
3263 breakpoint, or if we should possibly report the event to GDB.
3264 Do this before anything that may remove or insert a
3265 breakpoint. */
3266 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3267
3268 /* We have a SIGTRAP, possibly a step-over dance has just
3269 finished. If so, tweak the state machine accordingly,
3270 reinsert breakpoints and delete any single-step
3271 breakpoints. */
3272 step_over_finished = finish_step_over (event_child);
3273
3274 /* Now invoke the callbacks of any internal breakpoints there. */
3275 check_breakpoints (event_child->stop_pc);
3276
3277 /* Handle tracepoint data collecting. This may overflow the
3278 trace buffer, and cause a tracing stop, removing
3279 breakpoints. */
3280 trace_event = handle_tracepoints (event_child);
3281
3282 if (bp_explains_trap)
3283 {
3284 if (debug_threads)
3285 debug_printf ("Hit a gdbserver breakpoint.\n");
3286 }
3287 }
3288 else
3289 {
3290 /* We have some other signal, possibly a step-over dance was in
3291 progress, and it should be cancelled too. */
3292 step_over_finished = finish_step_over (event_child);
3293 }
3294
3295 /* We have all the data we need. Either report the event to GDB, or
3296 resume threads and keep waiting for more. */
3297
3298 /* If we're collecting a fast tracepoint, finish the collection and
3299 move out of the jump pad before delivering a signal. See
3300 linux_stabilize_threads. */
3301
3302 if (WIFSTOPPED (w)
3303 && WSTOPSIG (w) != SIGTRAP
3304 && supports_fast_tracepoints ()
3305 && agent_loaded_p ())
3306 {
3307 if (debug_threads)
3308 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3309 "to defer or adjust it.\n",
3310 WSTOPSIG (w), lwpid_of (current_thread));
3311
3312 /* Allow debugging the jump pad itself. */
3313 if (current_thread->last_resume_kind != resume_step
3314 && maybe_move_out_of_jump_pad (event_child, &w))
3315 {
3316 enqueue_one_deferred_signal (event_child, &w);
3317
3318 if (debug_threads)
3319 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3320 WSTOPSIG (w), lwpid_of (current_thread));
3321
3322 linux_resume_one_lwp (event_child, 0, 0, NULL);
3323
3324 if (debug_threads)
3325 debug_exit ();
3326 return ignore_event (ourstatus);
3327 }
3328 }
3329
3330 if (event_child->collecting_fast_tracepoint
3331 != fast_tpoint_collect_result::not_collecting)
3332 {
3333 if (debug_threads)
3334 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3335 "Check if we're already there.\n",
3336 lwpid_of (current_thread),
3337 (int) event_child->collecting_fast_tracepoint);
3338
3339 trace_event = 1;
3340
3341 event_child->collecting_fast_tracepoint
3342 = linux_fast_tracepoint_collecting (event_child, NULL);
3343
3344 if (event_child->collecting_fast_tracepoint
3345 != fast_tpoint_collect_result::before_insn)
3346 {
3347 /* No longer need this breakpoint. */
3348 if (event_child->exit_jump_pad_bkpt != NULL)
3349 {
3350 if (debug_threads)
3351 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3352 "stopping all threads momentarily.\n");
3353
3354 /* Other running threads could hit this breakpoint.
3355 We don't handle moribund locations like GDB does,
3356 instead we always pause all threads when removing
3357 breakpoints, so that any step-over or
3358 decr_pc_after_break adjustment is always taken
3359 care of while the breakpoint is still
3360 inserted. */
3361 stop_all_lwps (1, event_child);
3362
3363 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3364 event_child->exit_jump_pad_bkpt = NULL;
3365
3366 unstop_all_lwps (1, event_child);
3367
3368 gdb_assert (event_child->suspended >= 0);
3369 }
3370 }
3371
3372 if (event_child->collecting_fast_tracepoint
3373 == fast_tpoint_collect_result::not_collecting)
3374 {
3375 if (debug_threads)
3376 debug_printf ("fast tracepoint finished "
3377 "collecting successfully.\n");
3378
3379 /* We may have a deferred signal to report. */
3380 if (dequeue_one_deferred_signal (event_child, &w))
3381 {
3382 if (debug_threads)
3383 debug_printf ("dequeued one signal.\n");
3384 }
3385 else
3386 {
3387 if (debug_threads)
3388 debug_printf ("no deferred signals.\n");
3389
3390 if (stabilizing_threads)
3391 {
3392 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3393 ourstatus->value.sig = GDB_SIGNAL_0;
3394
3395 if (debug_threads)
3396 {
3397 debug_printf ("linux_wait_1 ret = %s, stopped "
3398 "while stabilizing threads\n",
3399 target_pid_to_str (ptid_of (current_thread)));
3400 debug_exit ();
3401 }
3402
3403 return ptid_of (current_thread);
3404 }
3405 }
3406 }
3407 }
3408
3409 /* Check whether GDB would be interested in this event. */
3410
3411 /* Check if GDB is interested in this syscall. */
3412 if (WIFSTOPPED (w)
3413 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3414 && !gdb_catch_this_syscall_p (event_child))
3415 {
3416 if (debug_threads)
3417 {
3418 debug_printf ("Ignored syscall for LWP %ld.\n",
3419 lwpid_of (current_thread));
3420 }
3421
3422 linux_resume_one_lwp (event_child, event_child->stepping,
3423 0, NULL);
3424
3425 if (debug_threads)
3426 debug_exit ();
3427 return ignore_event (ourstatus);
3428 }
3429
3430 /* If GDB is not interested in this signal, don't stop other
3431 threads, and don't report it to GDB. Just resume the inferior
3432 right away. We do this for threading-related signals as well as
3433 any that GDB specifically requested we ignore. But never ignore
3434 SIGSTOP if we sent it ourselves, and do not ignore signals when
3435 stepping - they may require special handling to skip the signal
3436 handler. Also never ignore signals that could be caused by a
3437 breakpoint. */
3438 if (WIFSTOPPED (w)
3439 && current_thread->last_resume_kind != resume_step
3440 && (
3441 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3442 (current_process ()->priv->thread_db != NULL
3443 && (WSTOPSIG (w) == __SIGRTMIN
3444 || WSTOPSIG (w) == __SIGRTMIN + 1))
3445 ||
3446 #endif
3447 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3448 && !(WSTOPSIG (w) == SIGSTOP
3449 && current_thread->last_resume_kind == resume_stop)
3450 && !linux_wstatus_maybe_breakpoint (w))))
3451 {
3452 siginfo_t info, *info_p;
3453
3454 if (debug_threads)
3455 debug_printf ("Ignored signal %d for LWP %ld.\n",
3456 WSTOPSIG (w), lwpid_of (current_thread));
3457
3458 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3459 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3460 info_p = &info;
3461 else
3462 info_p = NULL;
3463
3464 if (step_over_finished)
3465 {
3466 /* We cancelled this thread's step-over above. We still
3467 need to unsuspend all other LWPs, and set them back
3468 running again while the signal handler runs. */
3469 unsuspend_all_lwps (event_child);
3470
3471 /* Enqueue the pending signal info so that proceed_all_lwps
3472 doesn't lose it. */
3473 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3474
3475 proceed_all_lwps ();
3476 }
3477 else
3478 {
3479 linux_resume_one_lwp (event_child, event_child->stepping,
3480 WSTOPSIG (w), info_p);
3481 }
3482
3483 if (debug_threads)
3484 debug_exit ();
3485
3486 return ignore_event (ourstatus);
3487 }
3488
3489 /* Note that all addresses are always "out of the step range" when
3490 there's no range to begin with. */
3491 in_step_range = lwp_in_step_range (event_child);
3492
3493 /* If GDB wanted this thread to single step, and the thread is out
3494 of the step range, we always want to report the SIGTRAP, and let
3495 GDB handle it. Watchpoints should always be reported. So should
3496 signals we can't explain. A SIGTRAP we can't explain could be a
3497 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3498 do, we're be able to handle GDB breakpoints on top of internal
3499 breakpoints, by handling the internal breakpoint and still
3500 reporting the event to GDB. If we don't, we're out of luck, GDB
3501 won't see the breakpoint hit. If we see a single-step event but
3502 the thread should be continuing, don't pass the trap to gdb.
3503 That indicates that we had previously finished a single-step but
3504 left the single-step pending -- see
3505 complete_ongoing_step_over. */
3506 report_to_gdb = (!maybe_internal_trap
3507 || (current_thread->last_resume_kind == resume_step
3508 && !in_step_range)
3509 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3510 || (!in_step_range
3511 && !bp_explains_trap
3512 && !trace_event
3513 && !step_over_finished
3514 && !(current_thread->last_resume_kind == resume_continue
3515 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3516 || (gdb_breakpoint_here (event_child->stop_pc)
3517 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3518 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3519 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3520
3521 run_breakpoint_commands (event_child->stop_pc);
3522
3523 /* We found no reason GDB would want us to stop. We either hit one
3524 of our own breakpoints, or finished an internal step GDB
3525 shouldn't know about. */
3526 if (!report_to_gdb)
3527 {
3528 if (debug_threads)
3529 {
3530 if (bp_explains_trap)
3531 debug_printf ("Hit a gdbserver breakpoint.\n");
3532 if (step_over_finished)
3533 debug_printf ("Step-over finished.\n");
3534 if (trace_event)
3535 debug_printf ("Tracepoint event.\n");
3536 if (lwp_in_step_range (event_child))
3537 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3538 paddress (event_child->stop_pc),
3539 paddress (event_child->step_range_start),
3540 paddress (event_child->step_range_end));
3541 }
3542
3543 /* We're not reporting this breakpoint to GDB, so apply the
3544 decr_pc_after_break adjustment to the inferior's regcache
3545 ourselves. */
3546
3547 if (the_low_target.set_pc != NULL)
3548 {
3549 struct regcache *regcache
3550 = get_thread_regcache (current_thread, 1);
3551 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3552 }
3553
3554 if (step_over_finished)
3555 {
3556 /* If we have finished stepping over a breakpoint, we've
3557 stopped and suspended all LWPs momentarily except the
3558 stepping one. This is where we resume them all again.
3559 We're going to keep waiting, so use proceed, which
3560 handles stepping over the next breakpoint. */
3561 unsuspend_all_lwps (event_child);
3562 }
3563 else
3564 {
3565 /* Remove the single-step breakpoints if any. Note that
3566 there isn't single-step breakpoint if we finished stepping
3567 over. */
3568 if (can_software_single_step ()
3569 && has_single_step_breakpoints (current_thread))
3570 {
3571 stop_all_lwps (0, event_child);
3572 delete_single_step_breakpoints (current_thread);
3573 unstop_all_lwps (0, event_child);
3574 }
3575 }
3576
3577 if (debug_threads)
3578 debug_printf ("proceeding all threads.\n");
3579 proceed_all_lwps ();
3580
3581 if (debug_threads)
3582 debug_exit ();
3583
3584 return ignore_event (ourstatus);
3585 }
3586
3587 if (debug_threads)
3588 {
3589 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3590 {
3591 std::string str
3592 = target_waitstatus_to_string (&event_child->waitstatus);
3593
3594 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3595 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3596 }
3597 if (current_thread->last_resume_kind == resume_step)
3598 {
3599 if (event_child->step_range_start == event_child->step_range_end)
3600 debug_printf ("GDB wanted to single-step, reporting event.\n");
3601 else if (!lwp_in_step_range (event_child))
3602 debug_printf ("Out of step range, reporting event.\n");
3603 }
3604 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3605 debug_printf ("Stopped by watchpoint.\n");
3606 else if (gdb_breakpoint_here (event_child->stop_pc))
3607 debug_printf ("Stopped by GDB breakpoint.\n");
3608 if (debug_threads)
3609 debug_printf ("Hit a non-gdbserver trap event.\n");
3610 }
3611
3612 /* Alright, we're going to report a stop. */
3613
3614 /* Remove single-step breakpoints. */
3615 if (can_software_single_step ())
3616 {
3617 /* Remove single-step breakpoints or not. It it is true, stop all
3618 lwps, so that other threads won't hit the breakpoint in the
3619 staled memory. */
3620 int remove_single_step_breakpoints_p = 0;
3621
3622 if (non_stop)
3623 {
3624 remove_single_step_breakpoints_p
3625 = has_single_step_breakpoints (current_thread);
3626 }
3627 else
3628 {
3629 /* In all-stop, a stop reply cancels all previous resume
3630 requests. Delete all single-step breakpoints. */
3631
3632 find_thread ([&] (thread_info *thread) {
3633 if (has_single_step_breakpoints (thread))
3634 {
3635 remove_single_step_breakpoints_p = 1;
3636 return true;
3637 }
3638
3639 return false;
3640 });
3641 }
3642
3643 if (remove_single_step_breakpoints_p)
3644 {
3645 /* If we remove single-step breakpoints from memory, stop all lwps,
3646 so that other threads won't hit the breakpoint in the staled
3647 memory. */
3648 stop_all_lwps (0, event_child);
3649
3650 if (non_stop)
3651 {
3652 gdb_assert (has_single_step_breakpoints (current_thread));
3653 delete_single_step_breakpoints (current_thread);
3654 }
3655 else
3656 {
3657 for_each_thread ([] (thread_info *thread){
3658 if (has_single_step_breakpoints (thread))
3659 delete_single_step_breakpoints (thread);
3660 });
3661 }
3662
3663 unstop_all_lwps (0, event_child);
3664 }
3665 }
3666
3667 if (!stabilizing_threads)
3668 {
3669 /* In all-stop, stop all threads. */
3670 if (!non_stop)
3671 stop_all_lwps (0, NULL);
3672
3673 if (step_over_finished)
3674 {
3675 if (!non_stop)
3676 {
3677 /* If we were doing a step-over, all other threads but
3678 the stepping one had been paused in start_step_over,
3679 with their suspend counts incremented. We don't want
3680 to do a full unstop/unpause, because we're in
3681 all-stop mode (so we want threads stopped), but we
3682 still need to unsuspend the other threads, to
3683 decrement their `suspended' count back. */
3684 unsuspend_all_lwps (event_child);
3685 }
3686 else
3687 {
3688 /* If we just finished a step-over, then all threads had
3689 been momentarily paused. In all-stop, that's fine,
3690 we want threads stopped by now anyway. In non-stop,
3691 we need to re-resume threads that GDB wanted to be
3692 running. */
3693 unstop_all_lwps (1, event_child);
3694 }
3695 }
3696
3697 /* If we're not waiting for a specific LWP, choose an event LWP
3698 from among those that have had events. Giving equal priority
3699 to all LWPs that have had events helps prevent
3700 starvation. */
3701 if (ptid == minus_one_ptid)
3702 {
3703 event_child->status_pending_p = 1;
3704 event_child->status_pending = w;
3705
3706 select_event_lwp (&event_child);
3707
3708 /* current_thread and event_child must stay in sync. */
3709 current_thread = get_lwp_thread (event_child);
3710
3711 event_child->status_pending_p = 0;
3712 w = event_child->status_pending;
3713 }
3714
3715
3716 /* Stabilize threads (move out of jump pads). */
3717 if (!non_stop)
3718 stabilize_threads ();
3719 }
3720 else
3721 {
3722 /* If we just finished a step-over, then all threads had been
3723 momentarily paused. In all-stop, that's fine, we want
3724 threads stopped by now anyway. In non-stop, we need to
3725 re-resume threads that GDB wanted to be running. */
3726 if (step_over_finished)
3727 unstop_all_lwps (1, event_child);
3728 }
3729
3730 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3731 {
3732 /* If the reported event is an exit, fork, vfork or exec, let
3733 GDB know. */
3734
3735 /* Break the unreported fork relationship chain. */
3736 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3737 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3738 {
3739 event_child->fork_relative->fork_relative = NULL;
3740 event_child->fork_relative = NULL;
3741 }
3742
3743 *ourstatus = event_child->waitstatus;
3744 /* Clear the event lwp's waitstatus since we handled it already. */
3745 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3746 }
3747 else
3748 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3749
3750 /* Now that we've selected our final event LWP, un-adjust its PC if
3751 it was a software breakpoint, and the client doesn't know we can
3752 adjust the breakpoint ourselves. */
3753 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3754 && !cs.swbreak_feature)
3755 {
3756 int decr_pc = the_low_target.decr_pc_after_break;
3757
3758 if (decr_pc != 0)
3759 {
3760 struct regcache *regcache
3761 = get_thread_regcache (current_thread, 1);
3762 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3763 }
3764 }
3765
3766 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3767 {
3768 get_syscall_trapinfo (event_child,
3769 &ourstatus->value.syscall_number);
3770 ourstatus->kind = event_child->syscall_state;
3771 }
3772 else if (current_thread->last_resume_kind == resume_stop
3773 && WSTOPSIG (w) == SIGSTOP)
3774 {
3775 /* A thread that has been requested to stop by GDB with vCont;t,
3776 and it stopped cleanly, so report as SIG0. The use of
3777 SIGSTOP is an implementation detail. */
3778 ourstatus->value.sig = GDB_SIGNAL_0;
3779 }
3780 else if (current_thread->last_resume_kind == resume_stop
3781 && WSTOPSIG (w) != SIGSTOP)
3782 {
3783 /* A thread that has been requested to stop by GDB with vCont;t,
3784 but, it stopped for other reasons. */
3785 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3786 }
3787 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3788 {
3789 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3790 }
3791
3792 gdb_assert (step_over_bkpt == null_ptid);
3793
3794 if (debug_threads)
3795 {
3796 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3797 target_pid_to_str (ptid_of (current_thread)),
3798 ourstatus->kind, ourstatus->value.sig);
3799 debug_exit ();
3800 }
3801
3802 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3803 return filter_exit_event (event_child, ourstatus);
3804
3805 return ptid_of (current_thread);
3806 }
3807
3808 /* Get rid of any pending event in the pipe. */
3809 static void
3810 async_file_flush (void)
3811 {
3812 int ret;
3813 char buf;
3814
3815 do
3816 ret = read (linux_event_pipe[0], &buf, 1);
3817 while (ret >= 0 || (ret == -1 && errno == EINTR));
3818 }
3819
3820 /* Put something in the pipe, so the event loop wakes up. */
3821 static void
3822 async_file_mark (void)
3823 {
3824 int ret;
3825
3826 async_file_flush ();
3827
3828 do
3829 ret = write (linux_event_pipe[1], "+", 1);
3830 while (ret == 0 || (ret == -1 && errno == EINTR));
3831
3832 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3833 be awakened anyway. */
3834 }
3835
3836 ptid_t
3837 linux_process_target::wait (ptid_t ptid,
3838 target_waitstatus *ourstatus,
3839 int target_options)
3840 {
3841 ptid_t event_ptid;
3842
3843 /* Flush the async file first. */
3844 if (target_is_async_p ())
3845 async_file_flush ();
3846
3847 do
3848 {
3849 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3850 }
3851 while ((target_options & TARGET_WNOHANG) == 0
3852 && event_ptid == null_ptid
3853 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3854
3855 /* If at least one stop was reported, there may be more. A single
3856 SIGCHLD can signal more than one child stop. */
3857 if (target_is_async_p ()
3858 && (target_options & TARGET_WNOHANG) != 0
3859 && event_ptid != null_ptid)
3860 async_file_mark ();
3861
3862 return event_ptid;
3863 }
3864
3865 /* Send a signal to an LWP. */
3866
3867 static int
3868 kill_lwp (unsigned long lwpid, int signo)
3869 {
3870 int ret;
3871
3872 errno = 0;
3873 ret = syscall (__NR_tkill, lwpid, signo);
3874 if (errno == ENOSYS)
3875 {
3876 /* If tkill fails, then we are not using nptl threads, a
3877 configuration we no longer support. */
3878 perror_with_name (("tkill"));
3879 }
3880 return ret;
3881 }
3882
3883 void
3884 linux_stop_lwp (struct lwp_info *lwp)
3885 {
3886 send_sigstop (lwp);
3887 }
3888
3889 static void
3890 send_sigstop (struct lwp_info *lwp)
3891 {
3892 int pid;
3893
3894 pid = lwpid_of (get_lwp_thread (lwp));
3895
3896 /* If we already have a pending stop signal for this process, don't
3897 send another. */
3898 if (lwp->stop_expected)
3899 {
3900 if (debug_threads)
3901 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3902
3903 return;
3904 }
3905
3906 if (debug_threads)
3907 debug_printf ("Sending sigstop to lwp %d\n", pid);
3908
3909 lwp->stop_expected = 1;
3910 kill_lwp (pid, SIGSTOP);
3911 }
3912
3913 static void
3914 send_sigstop (thread_info *thread, lwp_info *except)
3915 {
3916 struct lwp_info *lwp = get_thread_lwp (thread);
3917
3918 /* Ignore EXCEPT. */
3919 if (lwp == except)
3920 return;
3921
3922 if (lwp->stopped)
3923 return;
3924
3925 send_sigstop (lwp);
3926 }
3927
3928 /* Increment the suspend count of an LWP, and stop it, if not stopped
3929 yet. */
3930 static void
3931 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3932 {
3933 struct lwp_info *lwp = get_thread_lwp (thread);
3934
3935 /* Ignore EXCEPT. */
3936 if (lwp == except)
3937 return;
3938
3939 lwp_suspended_inc (lwp);
3940
3941 send_sigstop (thread, except);
3942 }
3943
3944 static void
3945 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3946 {
3947 /* Store the exit status for later. */
3948 lwp->status_pending_p = 1;
3949 lwp->status_pending = wstat;
3950
3951 /* Store in waitstatus as well, as there's nothing else to process
3952 for this event. */
3953 if (WIFEXITED (wstat))
3954 {
3955 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3956 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3957 }
3958 else if (WIFSIGNALED (wstat))
3959 {
3960 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3961 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3962 }
3963
3964 /* Prevent trying to stop it. */
3965 lwp->stopped = 1;
3966
3967 /* No further stops are expected from a dead lwp. */
3968 lwp->stop_expected = 0;
3969 }
3970
3971 /* Return true if LWP has exited already, and has a pending exit event
3972 to report to GDB. */
3973
3974 static int
3975 lwp_is_marked_dead (struct lwp_info *lwp)
3976 {
3977 return (lwp->status_pending_p
3978 && (WIFEXITED (lwp->status_pending)
3979 || WIFSIGNALED (lwp->status_pending)));
3980 }
3981
3982 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3983
3984 static void
3985 wait_for_sigstop (void)
3986 {
3987 struct thread_info *saved_thread;
3988 ptid_t saved_tid;
3989 int wstat;
3990 int ret;
3991
3992 saved_thread = current_thread;
3993 if (saved_thread != NULL)
3994 saved_tid = saved_thread->id;
3995 else
3996 saved_tid = null_ptid; /* avoid bogus unused warning */
3997
3998 if (debug_threads)
3999 debug_printf ("wait_for_sigstop: pulling events\n");
4000
4001 /* Passing NULL_PTID as filter indicates we want all events to be
4002 left pending. Eventually this returns when there are no
4003 unwaited-for children left. */
4004 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4005 &wstat, __WALL);
4006 gdb_assert (ret == -1);
4007
4008 if (saved_thread == NULL || mythread_alive (saved_tid))
4009 current_thread = saved_thread;
4010 else
4011 {
4012 if (debug_threads)
4013 debug_printf ("Previously current thread died.\n");
4014
4015 /* We can't change the current inferior behind GDB's back,
4016 otherwise, a subsequent command may apply to the wrong
4017 process. */
4018 current_thread = NULL;
4019 }
4020 }
4021
4022 /* Returns true if THREAD is stopped in a jump pad, and we can't
4023 move it out, because we need to report the stop event to GDB. For
4024 example, if the user puts a breakpoint in the jump pad, it's
4025 because she wants to debug it. */
4026
4027 static bool
4028 stuck_in_jump_pad_callback (thread_info *thread)
4029 {
4030 struct lwp_info *lwp = get_thread_lwp (thread);
4031
4032 if (lwp->suspended != 0)
4033 {
4034 internal_error (__FILE__, __LINE__,
4035 "LWP %ld is suspended, suspended=%d\n",
4036 lwpid_of (thread), lwp->suspended);
4037 }
4038 gdb_assert (lwp->stopped);
4039
4040 /* Allow debugging the jump pad, gdb_collect, etc.. */
4041 return (supports_fast_tracepoints ()
4042 && agent_loaded_p ()
4043 && (gdb_breakpoint_here (lwp->stop_pc)
4044 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4045 || thread->last_resume_kind == resume_step)
4046 && (linux_fast_tracepoint_collecting (lwp, NULL)
4047 != fast_tpoint_collect_result::not_collecting));
4048 }
4049
4050 static void
4051 move_out_of_jump_pad_callback (thread_info *thread)
4052 {
4053 struct thread_info *saved_thread;
4054 struct lwp_info *lwp = get_thread_lwp (thread);
4055 int *wstat;
4056
4057 if (lwp->suspended != 0)
4058 {
4059 internal_error (__FILE__, __LINE__,
4060 "LWP %ld is suspended, suspended=%d\n",
4061 lwpid_of (thread), lwp->suspended);
4062 }
4063 gdb_assert (lwp->stopped);
4064
4065 /* For gdb_breakpoint_here. */
4066 saved_thread = current_thread;
4067 current_thread = thread;
4068
4069 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4070
4071 /* Allow debugging the jump pad, gdb_collect, etc. */
4072 if (!gdb_breakpoint_here (lwp->stop_pc)
4073 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4074 && thread->last_resume_kind != resume_step
4075 && maybe_move_out_of_jump_pad (lwp, wstat))
4076 {
4077 if (debug_threads)
4078 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4079 lwpid_of (thread));
4080
4081 if (wstat)
4082 {
4083 lwp->status_pending_p = 0;
4084 enqueue_one_deferred_signal (lwp, wstat);
4085
4086 if (debug_threads)
4087 debug_printf ("Signal %d for LWP %ld deferred "
4088 "(in jump pad)\n",
4089 WSTOPSIG (*wstat), lwpid_of (thread));
4090 }
4091
4092 linux_resume_one_lwp (lwp, 0, 0, NULL);
4093 }
4094 else
4095 lwp_suspended_inc (lwp);
4096
4097 current_thread = saved_thread;
4098 }
4099
4100 static bool
4101 lwp_running (thread_info *thread)
4102 {
4103 struct lwp_info *lwp = get_thread_lwp (thread);
4104
4105 if (lwp_is_marked_dead (lwp))
4106 return false;
4107
4108 return !lwp->stopped;
4109 }
4110
4111 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4112 If SUSPEND, then also increase the suspend count of every LWP,
4113 except EXCEPT. */
4114
4115 static void
4116 stop_all_lwps (int suspend, struct lwp_info *except)
4117 {
4118 /* Should not be called recursively. */
4119 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4120
4121 if (debug_threads)
4122 {
4123 debug_enter ();
4124 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4125 suspend ? "stop-and-suspend" : "stop",
4126 except != NULL
4127 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4128 : "none");
4129 }
4130
4131 stopping_threads = (suspend
4132 ? STOPPING_AND_SUSPENDING_THREADS
4133 : STOPPING_THREADS);
4134
4135 if (suspend)
4136 for_each_thread ([&] (thread_info *thread)
4137 {
4138 suspend_and_send_sigstop (thread, except);
4139 });
4140 else
4141 for_each_thread ([&] (thread_info *thread)
4142 {
4143 send_sigstop (thread, except);
4144 });
4145
4146 wait_for_sigstop ();
4147 stopping_threads = NOT_STOPPING_THREADS;
4148
4149 if (debug_threads)
4150 {
4151 debug_printf ("stop_all_lwps done, setting stopping_threads "
4152 "back to !stopping\n");
4153 debug_exit ();
4154 }
4155 }
4156
4157 /* Enqueue one signal in the chain of signals which need to be
4158 delivered to this process on next resume. */
4159
4160 static void
4161 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4162 {
4163 struct pending_signals *p_sig = XNEW (struct pending_signals);
4164
4165 p_sig->prev = lwp->pending_signals;
4166 p_sig->signal = signal;
4167 if (info == NULL)
4168 memset (&p_sig->info, 0, sizeof (siginfo_t));
4169 else
4170 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4171 lwp->pending_signals = p_sig;
4172 }
4173
4174 /* Install breakpoints for software single stepping. */
4175
4176 static void
4177 install_software_single_step_breakpoints (struct lwp_info *lwp)
4178 {
4179 struct thread_info *thread = get_lwp_thread (lwp);
4180 struct regcache *regcache = get_thread_regcache (thread, 1);
4181
4182 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4183
4184 current_thread = thread;
4185 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4186
4187 for (CORE_ADDR pc : next_pcs)
4188 set_single_step_breakpoint (pc, current_ptid);
4189 }
4190
4191 /* Single step via hardware or software single step.
4192 Return 1 if hardware single stepping, 0 if software single stepping
4193 or can't single step. */
4194
4195 static int
4196 single_step (struct lwp_info* lwp)
4197 {
4198 int step = 0;
4199
4200 if (can_hardware_single_step ())
4201 {
4202 step = 1;
4203 }
4204 else if (can_software_single_step ())
4205 {
4206 install_software_single_step_breakpoints (lwp);
4207 step = 0;
4208 }
4209 else
4210 {
4211 if (debug_threads)
4212 debug_printf ("stepping is not implemented on this target");
4213 }
4214
4215 return step;
4216 }
4217
4218 /* The signal can be delivered to the inferior if we are not trying to
4219 finish a fast tracepoint collect. Since signal can be delivered in
4220 the step-over, the program may go to signal handler and trap again
4221 after return from the signal handler. We can live with the spurious
4222 double traps. */
4223
4224 static int
4225 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4226 {
4227 return (lwp->collecting_fast_tracepoint
4228 == fast_tpoint_collect_result::not_collecting);
4229 }
4230
4231 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4232 SIGNAL is nonzero, give it that signal. */
4233
4234 static void
4235 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4236 int step, int signal, siginfo_t *info)
4237 {
4238 struct thread_info *thread = get_lwp_thread (lwp);
4239 struct thread_info *saved_thread;
4240 int ptrace_request;
4241 struct process_info *proc = get_thread_process (thread);
4242
4243 /* Note that target description may not be initialised
4244 (proc->tdesc == NULL) at this point because the program hasn't
4245 stopped at the first instruction yet. It means GDBserver skips
4246 the extra traps from the wrapper program (see option --wrapper).
4247 Code in this function that requires register access should be
4248 guarded by proc->tdesc == NULL or something else. */
4249
4250 if (lwp->stopped == 0)
4251 return;
4252
4253 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4254
4255 fast_tpoint_collect_result fast_tp_collecting
4256 = lwp->collecting_fast_tracepoint;
4257
4258 gdb_assert (!stabilizing_threads
4259 || (fast_tp_collecting
4260 != fast_tpoint_collect_result::not_collecting));
4261
4262 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4263 user used the "jump" command, or "set $pc = foo"). */
4264 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4265 {
4266 /* Collecting 'while-stepping' actions doesn't make sense
4267 anymore. */
4268 release_while_stepping_state_list (thread);
4269 }
4270
4271 /* If we have pending signals or status, and a new signal, enqueue the
4272 signal. Also enqueue the signal if it can't be delivered to the
4273 inferior right now. */
4274 if (signal != 0
4275 && (lwp->status_pending_p
4276 || lwp->pending_signals != NULL
4277 || !lwp_signal_can_be_delivered (lwp)))
4278 {
4279 enqueue_pending_signal (lwp, signal, info);
4280
4281 /* Postpone any pending signal. It was enqueued above. */
4282 signal = 0;
4283 }
4284
4285 if (lwp->status_pending_p)
4286 {
4287 if (debug_threads)
4288 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4289 " has pending status\n",
4290 lwpid_of (thread), step ? "step" : "continue",
4291 lwp->stop_expected ? "expected" : "not expected");
4292 return;
4293 }
4294
4295 saved_thread = current_thread;
4296 current_thread = thread;
4297
4298 /* This bit needs some thinking about. If we get a signal that
4299 we must report while a single-step reinsert is still pending,
4300 we often end up resuming the thread. It might be better to
4301 (ew) allow a stack of pending events; then we could be sure that
4302 the reinsert happened right away and not lose any signals.
4303
4304 Making this stack would also shrink the window in which breakpoints are
4305 uninserted (see comment in linux_wait_for_lwp) but not enough for
4306 complete correctness, so it won't solve that problem. It may be
4307 worthwhile just to solve this one, however. */
4308 if (lwp->bp_reinsert != 0)
4309 {
4310 if (debug_threads)
4311 debug_printf (" pending reinsert at 0x%s\n",
4312 paddress (lwp->bp_reinsert));
4313
4314 if (can_hardware_single_step ())
4315 {
4316 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4317 {
4318 if (step == 0)
4319 warning ("BAD - reinserting but not stepping.");
4320 if (lwp->suspended)
4321 warning ("BAD - reinserting and suspended(%d).",
4322 lwp->suspended);
4323 }
4324 }
4325
4326 step = maybe_hw_step (thread);
4327 }
4328
4329 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4330 {
4331 if (debug_threads)
4332 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4333 " (exit-jump-pad-bkpt)\n",
4334 lwpid_of (thread));
4335 }
4336 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4337 {
4338 if (debug_threads)
4339 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4340 " single-stepping\n",
4341 lwpid_of (thread));
4342
4343 if (can_hardware_single_step ())
4344 step = 1;
4345 else
4346 {
4347 internal_error (__FILE__, __LINE__,
4348 "moving out of jump pad single-stepping"
4349 " not implemented on this target");
4350 }
4351 }
4352
4353 /* If we have while-stepping actions in this thread set it stepping.
4354 If we have a signal to deliver, it may or may not be set to
4355 SIG_IGN, we don't know. Assume so, and allow collecting
4356 while-stepping into a signal handler. A possible smart thing to
4357 do would be to set an internal breakpoint at the signal return
4358 address, continue, and carry on catching this while-stepping
4359 action only when that breakpoint is hit. A future
4360 enhancement. */
4361 if (thread->while_stepping != NULL)
4362 {
4363 if (debug_threads)
4364 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4365 lwpid_of (thread));
4366
4367 step = single_step (lwp);
4368 }
4369
4370 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4371 {
4372 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4373
4374 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4375
4376 if (debug_threads)
4377 {
4378 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4379 (long) lwp->stop_pc);
4380 }
4381 }
4382
4383 /* If we have pending signals, consume one if it can be delivered to
4384 the inferior. */
4385 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4386 {
4387 struct pending_signals **p_sig;
4388
4389 p_sig = &lwp->pending_signals;
4390 while ((*p_sig)->prev != NULL)
4391 p_sig = &(*p_sig)->prev;
4392
4393 signal = (*p_sig)->signal;
4394 if ((*p_sig)->info.si_signo != 0)
4395 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4396 &(*p_sig)->info);
4397
4398 free (*p_sig);
4399 *p_sig = NULL;
4400 }
4401
4402 if (debug_threads)
4403 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4404 lwpid_of (thread), step ? "step" : "continue", signal,
4405 lwp->stop_expected ? "expected" : "not expected");
4406
4407 if (the_low_target.prepare_to_resume != NULL)
4408 the_low_target.prepare_to_resume (lwp);
4409
4410 regcache_invalidate_thread (thread);
4411 errno = 0;
4412 lwp->stepping = step;
4413 if (step)
4414 ptrace_request = PTRACE_SINGLESTEP;
4415 else if (gdb_catching_syscalls_p (lwp))
4416 ptrace_request = PTRACE_SYSCALL;
4417 else
4418 ptrace_request = PTRACE_CONT;
4419 ptrace (ptrace_request,
4420 lwpid_of (thread),
4421 (PTRACE_TYPE_ARG3) 0,
4422 /* Coerce to a uintptr_t first to avoid potential gcc warning
4423 of coercing an 8 byte integer to a 4 byte pointer. */
4424 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4425
4426 current_thread = saved_thread;
4427 if (errno)
4428 perror_with_name ("resuming thread");
4429
4430 /* Successfully resumed. Clear state that no longer makes sense,
4431 and mark the LWP as running. Must not do this before resuming
4432 otherwise if that fails other code will be confused. E.g., we'd
4433 later try to stop the LWP and hang forever waiting for a stop
4434 status. Note that we must not throw after this is cleared,
4435 otherwise handle_zombie_lwp_error would get confused. */
4436 lwp->stopped = 0;
4437 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4438 }
4439
4440 /* Called when we try to resume a stopped LWP and that errors out. If
4441 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4442 or about to become), discard the error, clear any pending status
4443 the LWP may have, and return true (we'll collect the exit status
4444 soon enough). Otherwise, return false. */
4445
4446 static int
4447 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4448 {
4449 struct thread_info *thread = get_lwp_thread (lp);
4450
4451 /* If we get an error after resuming the LWP successfully, we'd
4452 confuse !T state for the LWP being gone. */
4453 gdb_assert (lp->stopped);
4454
4455 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4456 because even if ptrace failed with ESRCH, the tracee may be "not
4457 yet fully dead", but already refusing ptrace requests. In that
4458 case the tracee has 'R (Running)' state for a little bit
4459 (observed in Linux 3.18). See also the note on ESRCH in the
4460 ptrace(2) man page. Instead, check whether the LWP has any state
4461 other than ptrace-stopped. */
4462
4463 /* Don't assume anything if /proc/PID/status can't be read. */
4464 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4465 {
4466 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4467 lp->status_pending_p = 0;
4468 return 1;
4469 }
4470 return 0;
4471 }
4472
4473 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4474 disappears while we try to resume it. */
4475
4476 static void
4477 linux_resume_one_lwp (struct lwp_info *lwp,
4478 int step, int signal, siginfo_t *info)
4479 {
4480 try
4481 {
4482 linux_resume_one_lwp_throw (lwp, step, signal, info);
4483 }
4484 catch (const gdb_exception_error &ex)
4485 {
4486 if (!check_ptrace_stopped_lwp_gone (lwp))
4487 throw;
4488 }
4489 }
4490
4491 /* This function is called once per thread via for_each_thread.
4492 We look up which resume request applies to THREAD and mark it with a
4493 pointer to the appropriate resume request.
4494
4495 This algorithm is O(threads * resume elements), but resume elements
4496 is small (and will remain small at least until GDB supports thread
4497 suspension). */
4498
4499 static void
4500 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4501 {
4502 struct lwp_info *lwp = get_thread_lwp (thread);
4503
4504 for (int ndx = 0; ndx < n; ndx++)
4505 {
4506 ptid_t ptid = resume[ndx].thread;
4507 if (ptid == minus_one_ptid
4508 || ptid == thread->id
4509 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4510 of PID'. */
4511 || (ptid.pid () == pid_of (thread)
4512 && (ptid.is_pid ()
4513 || ptid.lwp () == -1)))
4514 {
4515 if (resume[ndx].kind == resume_stop
4516 && thread->last_resume_kind == resume_stop)
4517 {
4518 if (debug_threads)
4519 debug_printf ("already %s LWP %ld at GDB's request\n",
4520 (thread->last_status.kind
4521 == TARGET_WAITKIND_STOPPED)
4522 ? "stopped"
4523 : "stopping",
4524 lwpid_of (thread));
4525
4526 continue;
4527 }
4528
4529 /* Ignore (wildcard) resume requests for already-resumed
4530 threads. */
4531 if (resume[ndx].kind != resume_stop
4532 && thread->last_resume_kind != resume_stop)
4533 {
4534 if (debug_threads)
4535 debug_printf ("already %s LWP %ld at GDB's request\n",
4536 (thread->last_resume_kind
4537 == resume_step)
4538 ? "stepping"
4539 : "continuing",
4540 lwpid_of (thread));
4541 continue;
4542 }
4543
4544 /* Don't let wildcard resumes resume fork children that GDB
4545 does not yet know are new fork children. */
4546 if (lwp->fork_relative != NULL)
4547 {
4548 struct lwp_info *rel = lwp->fork_relative;
4549
4550 if (rel->status_pending_p
4551 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4552 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4553 {
4554 if (debug_threads)
4555 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4556 lwpid_of (thread));
4557 continue;
4558 }
4559 }
4560
4561 /* If the thread has a pending event that has already been
4562 reported to GDBserver core, but GDB has not pulled the
4563 event out of the vStopped queue yet, likewise, ignore the
4564 (wildcard) resume request. */
4565 if (in_queued_stop_replies (thread->id))
4566 {
4567 if (debug_threads)
4568 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4569 lwpid_of (thread));
4570 continue;
4571 }
4572
4573 lwp->resume = &resume[ndx];
4574 thread->last_resume_kind = lwp->resume->kind;
4575
4576 lwp->step_range_start = lwp->resume->step_range_start;
4577 lwp->step_range_end = lwp->resume->step_range_end;
4578
4579 /* If we had a deferred signal to report, dequeue one now.
4580 This can happen if LWP gets more than one signal while
4581 trying to get out of a jump pad. */
4582 if (lwp->stopped
4583 && !lwp->status_pending_p
4584 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4585 {
4586 lwp->status_pending_p = 1;
4587
4588 if (debug_threads)
4589 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4590 "leaving status pending.\n",
4591 WSTOPSIG (lwp->status_pending),
4592 lwpid_of (thread));
4593 }
4594
4595 return;
4596 }
4597 }
4598
4599 /* No resume action for this thread. */
4600 lwp->resume = NULL;
4601 }
4602
4603 /* find_thread callback for linux_resume. Return true if this lwp has an
4604 interesting status pending. */
4605
4606 static bool
4607 resume_status_pending_p (thread_info *thread)
4608 {
4609 struct lwp_info *lwp = get_thread_lwp (thread);
4610
4611 /* LWPs which will not be resumed are not interesting, because
4612 we might not wait for them next time through linux_wait. */
4613 if (lwp->resume == NULL)
4614 return false;
4615
4616 return thread_still_has_status_pending_p (thread);
4617 }
4618
4619 /* Return 1 if this lwp that GDB wants running is stopped at an
4620 internal breakpoint that we need to step over. It assumes that any
4621 required STOP_PC adjustment has already been propagated to the
4622 inferior's regcache. */
4623
4624 static bool
4625 need_step_over_p (thread_info *thread)
4626 {
4627 struct lwp_info *lwp = get_thread_lwp (thread);
4628 struct thread_info *saved_thread;
4629 CORE_ADDR pc;
4630 struct process_info *proc = get_thread_process (thread);
4631
4632 /* GDBserver is skipping the extra traps from the wrapper program,
4633 don't have to do step over. */
4634 if (proc->tdesc == NULL)
4635 return false;
4636
4637 /* LWPs which will not be resumed are not interesting, because we
4638 might not wait for them next time through linux_wait. */
4639
4640 if (!lwp->stopped)
4641 {
4642 if (debug_threads)
4643 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4644 lwpid_of (thread));
4645 return false;
4646 }
4647
4648 if (thread->last_resume_kind == resume_stop)
4649 {
4650 if (debug_threads)
4651 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4652 " stopped\n",
4653 lwpid_of (thread));
4654 return false;
4655 }
4656
4657 gdb_assert (lwp->suspended >= 0);
4658
4659 if (lwp->suspended)
4660 {
4661 if (debug_threads)
4662 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4663 lwpid_of (thread));
4664 return false;
4665 }
4666
4667 if (lwp->status_pending_p)
4668 {
4669 if (debug_threads)
4670 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4671 " status.\n",
4672 lwpid_of (thread));
4673 return false;
4674 }
4675
4676 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4677 or we have. */
4678 pc = get_pc (lwp);
4679
4680 /* If the PC has changed since we stopped, then don't do anything,
4681 and let the breakpoint/tracepoint be hit. This happens if, for
4682 instance, GDB handled the decr_pc_after_break subtraction itself,
4683 GDB is OOL stepping this thread, or the user has issued a "jump"
4684 command, or poked thread's registers herself. */
4685 if (pc != lwp->stop_pc)
4686 {
4687 if (debug_threads)
4688 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4689 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4690 lwpid_of (thread),
4691 paddress (lwp->stop_pc), paddress (pc));
4692 return false;
4693 }
4694
4695 /* On software single step target, resume the inferior with signal
4696 rather than stepping over. */
4697 if (can_software_single_step ()
4698 && lwp->pending_signals != NULL
4699 && lwp_signal_can_be_delivered (lwp))
4700 {
4701 if (debug_threads)
4702 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4703 " signals.\n",
4704 lwpid_of (thread));
4705
4706 return false;
4707 }
4708
4709 saved_thread = current_thread;
4710 current_thread = thread;
4711
4712 /* We can only step over breakpoints we know about. */
4713 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4714 {
4715 /* Don't step over a breakpoint that GDB expects to hit
4716 though. If the condition is being evaluated on the target's side
4717 and it evaluate to false, step over this breakpoint as well. */
4718 if (gdb_breakpoint_here (pc)
4719 && gdb_condition_true_at_breakpoint (pc)
4720 && gdb_no_commands_at_breakpoint (pc))
4721 {
4722 if (debug_threads)
4723 debug_printf ("Need step over [LWP %ld]? yes, but found"
4724 " GDB breakpoint at 0x%s; skipping step over\n",
4725 lwpid_of (thread), paddress (pc));
4726
4727 current_thread = saved_thread;
4728 return false;
4729 }
4730 else
4731 {
4732 if (debug_threads)
4733 debug_printf ("Need step over [LWP %ld]? yes, "
4734 "found breakpoint at 0x%s\n",
4735 lwpid_of (thread), paddress (pc));
4736
4737 /* We've found an lwp that needs stepping over --- return 1 so
4738 that find_thread stops looking. */
4739 current_thread = saved_thread;
4740
4741 return true;
4742 }
4743 }
4744
4745 current_thread = saved_thread;
4746
4747 if (debug_threads)
4748 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4749 " at 0x%s\n",
4750 lwpid_of (thread), paddress (pc));
4751
4752 return false;
4753 }
4754
4755 /* Start a step-over operation on LWP. When LWP stopped at a
4756 breakpoint, to make progress, we need to remove the breakpoint out
4757 of the way. If we let other threads run while we do that, they may
4758 pass by the breakpoint location and miss hitting it. To avoid
4759 that, a step-over momentarily stops all threads while LWP is
4760 single-stepped by either hardware or software while the breakpoint
4761 is temporarily uninserted from the inferior. When the single-step
4762 finishes, we reinsert the breakpoint, and let all threads that are
4763 supposed to be running, run again. */
4764
4765 static int
4766 start_step_over (struct lwp_info *lwp)
4767 {
4768 struct thread_info *thread = get_lwp_thread (lwp);
4769 struct thread_info *saved_thread;
4770 CORE_ADDR pc;
4771 int step;
4772
4773 if (debug_threads)
4774 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4775 lwpid_of (thread));
4776
4777 stop_all_lwps (1, lwp);
4778
4779 if (lwp->suspended != 0)
4780 {
4781 internal_error (__FILE__, __LINE__,
4782 "LWP %ld suspended=%d\n", lwpid_of (thread),
4783 lwp->suspended);
4784 }
4785
4786 if (debug_threads)
4787 debug_printf ("Done stopping all threads for step-over.\n");
4788
4789 /* Note, we should always reach here with an already adjusted PC,
4790 either by GDB (if we're resuming due to GDB's request), or by our
4791 caller, if we just finished handling an internal breakpoint GDB
4792 shouldn't care about. */
4793 pc = get_pc (lwp);
4794
4795 saved_thread = current_thread;
4796 current_thread = thread;
4797
4798 lwp->bp_reinsert = pc;
4799 uninsert_breakpoints_at (pc);
4800 uninsert_fast_tracepoint_jumps_at (pc);
4801
4802 step = single_step (lwp);
4803
4804 current_thread = saved_thread;
4805
4806 linux_resume_one_lwp (lwp, step, 0, NULL);
4807
4808 /* Require next event from this LWP. */
4809 step_over_bkpt = thread->id;
4810 return 1;
4811 }
4812
4813 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4814 start_step_over, if still there, and delete any single-step
4815 breakpoints we've set, on non hardware single-step targets. */
4816
4817 static int
4818 finish_step_over (struct lwp_info *lwp)
4819 {
4820 if (lwp->bp_reinsert != 0)
4821 {
4822 struct thread_info *saved_thread = current_thread;
4823
4824 if (debug_threads)
4825 debug_printf ("Finished step over.\n");
4826
4827 current_thread = get_lwp_thread (lwp);
4828
4829 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4830 may be no breakpoint to reinsert there by now. */
4831 reinsert_breakpoints_at (lwp->bp_reinsert);
4832 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4833
4834 lwp->bp_reinsert = 0;
4835
4836 /* Delete any single-step breakpoints. No longer needed. We
4837 don't have to worry about other threads hitting this trap,
4838 and later not being able to explain it, because we were
4839 stepping over a breakpoint, and we hold all threads but
4840 LWP stopped while doing that. */
4841 if (!can_hardware_single_step ())
4842 {
4843 gdb_assert (has_single_step_breakpoints (current_thread));
4844 delete_single_step_breakpoints (current_thread);
4845 }
4846
4847 step_over_bkpt = null_ptid;
4848 current_thread = saved_thread;
4849 return 1;
4850 }
4851 else
4852 return 0;
4853 }
4854
4855 /* If there's a step over in progress, wait until all threads stop
4856 (that is, until the stepping thread finishes its step), and
4857 unsuspend all lwps. The stepping thread ends with its status
4858 pending, which is processed later when we get back to processing
4859 events. */
4860
4861 static void
4862 complete_ongoing_step_over (void)
4863 {
4864 if (step_over_bkpt != null_ptid)
4865 {
4866 struct lwp_info *lwp;
4867 int wstat;
4868 int ret;
4869
4870 if (debug_threads)
4871 debug_printf ("detach: step over in progress, finish it first\n");
4872
4873 /* Passing NULL_PTID as filter indicates we want all events to
4874 be left pending. Eventually this returns when there are no
4875 unwaited-for children left. */
4876 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4877 &wstat, __WALL);
4878 gdb_assert (ret == -1);
4879
4880 lwp = find_lwp_pid (step_over_bkpt);
4881 if (lwp != NULL)
4882 finish_step_over (lwp);
4883 step_over_bkpt = null_ptid;
4884 unsuspend_all_lwps (lwp);
4885 }
4886 }
4887
4888 /* This function is called once per thread. We check the thread's resume
4889 request, which will tell us whether to resume, step, or leave the thread
4890 stopped; and what signal, if any, it should be sent.
4891
4892 For threads which we aren't explicitly told otherwise, we preserve
4893 the stepping flag; this is used for stepping over gdbserver-placed
4894 breakpoints.
4895
4896 If pending_flags was set in any thread, we queue any needed
4897 signals, since we won't actually resume. We already have a pending
4898 event to report, so we don't need to preserve any step requests;
4899 they should be re-issued if necessary. */
4900
4901 static void
4902 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4903 {
4904 struct lwp_info *lwp = get_thread_lwp (thread);
4905 int leave_pending;
4906
4907 if (lwp->resume == NULL)
4908 return;
4909
4910 if (lwp->resume->kind == resume_stop)
4911 {
4912 if (debug_threads)
4913 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4914
4915 if (!lwp->stopped)
4916 {
4917 if (debug_threads)
4918 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4919
4920 /* Stop the thread, and wait for the event asynchronously,
4921 through the event loop. */
4922 send_sigstop (lwp);
4923 }
4924 else
4925 {
4926 if (debug_threads)
4927 debug_printf ("already stopped LWP %ld\n",
4928 lwpid_of (thread));
4929
4930 /* The LWP may have been stopped in an internal event that
4931 was not meant to be notified back to GDB (e.g., gdbserver
4932 breakpoint), so we should be reporting a stop event in
4933 this case too. */
4934
4935 /* If the thread already has a pending SIGSTOP, this is a
4936 no-op. Otherwise, something later will presumably resume
4937 the thread and this will cause it to cancel any pending
4938 operation, due to last_resume_kind == resume_stop. If
4939 the thread already has a pending status to report, we
4940 will still report it the next time we wait - see
4941 status_pending_p_callback. */
4942
4943 /* If we already have a pending signal to report, then
4944 there's no need to queue a SIGSTOP, as this means we're
4945 midway through moving the LWP out of the jumppad, and we
4946 will report the pending signal as soon as that is
4947 finished. */
4948 if (lwp->pending_signals_to_report == NULL)
4949 send_sigstop (lwp);
4950 }
4951
4952 /* For stop requests, we're done. */
4953 lwp->resume = NULL;
4954 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4955 return;
4956 }
4957
4958 /* If this thread which is about to be resumed has a pending status,
4959 then don't resume it - we can just report the pending status.
4960 Likewise if it is suspended, because e.g., another thread is
4961 stepping past a breakpoint. Make sure to queue any signals that
4962 would otherwise be sent. In all-stop mode, we do this decision
4963 based on if *any* thread has a pending status. If there's a
4964 thread that needs the step-over-breakpoint dance, then don't
4965 resume any other thread but that particular one. */
4966 leave_pending = (lwp->suspended
4967 || lwp->status_pending_p
4968 || leave_all_stopped);
4969
4970 /* If we have a new signal, enqueue the signal. */
4971 if (lwp->resume->sig != 0)
4972 {
4973 siginfo_t info, *info_p;
4974
4975 /* If this is the same signal we were previously stopped by,
4976 make sure to queue its siginfo. */
4977 if (WIFSTOPPED (lwp->last_status)
4978 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4979 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4980 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4981 info_p = &info;
4982 else
4983 info_p = NULL;
4984
4985 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4986 }
4987
4988 if (!leave_pending)
4989 {
4990 if (debug_threads)
4991 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4992
4993 proceed_one_lwp (thread, NULL);
4994 }
4995 else
4996 {
4997 if (debug_threads)
4998 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4999 }
5000
5001 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5002 lwp->resume = NULL;
5003 }
5004
5005 void
5006 linux_process_target::resume (thread_resume *resume_info, size_t n)
5007 {
5008 struct thread_info *need_step_over = NULL;
5009
5010 if (debug_threads)
5011 {
5012 debug_enter ();
5013 debug_printf ("linux_resume:\n");
5014 }
5015
5016 for_each_thread ([&] (thread_info *thread)
5017 {
5018 linux_set_resume_request (thread, resume_info, n);
5019 });
5020
5021 /* If there is a thread which would otherwise be resumed, which has
5022 a pending status, then don't resume any threads - we can just
5023 report the pending status. Make sure to queue any signals that
5024 would otherwise be sent. In non-stop mode, we'll apply this
5025 logic to each thread individually. We consume all pending events
5026 before considering to start a step-over (in all-stop). */
5027 bool any_pending = false;
5028 if (!non_stop)
5029 any_pending = find_thread (resume_status_pending_p) != NULL;
5030
5031 /* If there is a thread which would otherwise be resumed, which is
5032 stopped at a breakpoint that needs stepping over, then don't
5033 resume any threads - have it step over the breakpoint with all
5034 other threads stopped, then resume all threads again. Make sure
5035 to queue any signals that would otherwise be delivered or
5036 queued. */
5037 if (!any_pending && supports_breakpoints ())
5038 need_step_over = find_thread (need_step_over_p);
5039
5040 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5041
5042 if (debug_threads)
5043 {
5044 if (need_step_over != NULL)
5045 debug_printf ("Not resuming all, need step over\n");
5046 else if (any_pending)
5047 debug_printf ("Not resuming, all-stop and found "
5048 "an LWP with pending status\n");
5049 else
5050 debug_printf ("Resuming, no pending status or step over needed\n");
5051 }
5052
5053 /* Even if we're leaving threads stopped, queue all signals we'd
5054 otherwise deliver. */
5055 for_each_thread ([&] (thread_info *thread)
5056 {
5057 linux_resume_one_thread (thread, leave_all_stopped);
5058 });
5059
5060 if (need_step_over)
5061 start_step_over (get_thread_lwp (need_step_over));
5062
5063 if (debug_threads)
5064 {
5065 debug_printf ("linux_resume done\n");
5066 debug_exit ();
5067 }
5068
5069 /* We may have events that were pending that can/should be sent to
5070 the client now. Trigger a linux_wait call. */
5071 if (target_is_async_p ())
5072 async_file_mark ();
5073 }
5074
5075 /* This function is called once per thread. We check the thread's
5076 last resume request, which will tell us whether to resume, step, or
5077 leave the thread stopped. Any signal the client requested to be
5078 delivered has already been enqueued at this point.
5079
5080 If any thread that GDB wants running is stopped at an internal
5081 breakpoint that needs stepping over, we start a step-over operation
5082 on that particular thread, and leave all others stopped. */
5083
5084 static void
5085 proceed_one_lwp (thread_info *thread, lwp_info *except)
5086 {
5087 struct lwp_info *lwp = get_thread_lwp (thread);
5088 int step;
5089
5090 if (lwp == except)
5091 return;
5092
5093 if (debug_threads)
5094 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5095
5096 if (!lwp->stopped)
5097 {
5098 if (debug_threads)
5099 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5100 return;
5101 }
5102
5103 if (thread->last_resume_kind == resume_stop
5104 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5105 {
5106 if (debug_threads)
5107 debug_printf (" client wants LWP to remain %ld stopped\n",
5108 lwpid_of (thread));
5109 return;
5110 }
5111
5112 if (lwp->status_pending_p)
5113 {
5114 if (debug_threads)
5115 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5116 lwpid_of (thread));
5117 return;
5118 }
5119
5120 gdb_assert (lwp->suspended >= 0);
5121
5122 if (lwp->suspended)
5123 {
5124 if (debug_threads)
5125 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5126 return;
5127 }
5128
5129 if (thread->last_resume_kind == resume_stop
5130 && lwp->pending_signals_to_report == NULL
5131 && (lwp->collecting_fast_tracepoint
5132 == fast_tpoint_collect_result::not_collecting))
5133 {
5134 /* We haven't reported this LWP as stopped yet (otherwise, the
5135 last_status.kind check above would catch it, and we wouldn't
5136 reach here. This LWP may have been momentarily paused by a
5137 stop_all_lwps call while handling for example, another LWP's
5138 step-over. In that case, the pending expected SIGSTOP signal
5139 that was queued at vCont;t handling time will have already
5140 been consumed by wait_for_sigstop, and so we need to requeue
5141 another one here. Note that if the LWP already has a SIGSTOP
5142 pending, this is a no-op. */
5143
5144 if (debug_threads)
5145 debug_printf ("Client wants LWP %ld to stop. "
5146 "Making sure it has a SIGSTOP pending\n",
5147 lwpid_of (thread));
5148
5149 send_sigstop (lwp);
5150 }
5151
5152 if (thread->last_resume_kind == resume_step)
5153 {
5154 if (debug_threads)
5155 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5156 lwpid_of (thread));
5157
5158 /* If resume_step is requested by GDB, install single-step
5159 breakpoints when the thread is about to be actually resumed if
5160 the single-step breakpoints weren't removed. */
5161 if (can_software_single_step ()
5162 && !has_single_step_breakpoints (thread))
5163 install_software_single_step_breakpoints (lwp);
5164
5165 step = maybe_hw_step (thread);
5166 }
5167 else if (lwp->bp_reinsert != 0)
5168 {
5169 if (debug_threads)
5170 debug_printf (" stepping LWP %ld, reinsert set\n",
5171 lwpid_of (thread));
5172
5173 step = maybe_hw_step (thread);
5174 }
5175 else
5176 step = 0;
5177
5178 linux_resume_one_lwp (lwp, step, 0, NULL);
5179 }
5180
5181 static void
5182 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5183 {
5184 struct lwp_info *lwp = get_thread_lwp (thread);
5185
5186 if (lwp == except)
5187 return;
5188
5189 lwp_suspended_decr (lwp);
5190
5191 proceed_one_lwp (thread, except);
5192 }
5193
5194 /* When we finish a step-over, set threads running again. If there's
5195 another thread that may need a step-over, now's the time to start
5196 it. Eventually, we'll move all threads past their breakpoints. */
5197
5198 static void
5199 proceed_all_lwps (void)
5200 {
5201 struct thread_info *need_step_over;
5202
5203 /* If there is a thread which would otherwise be resumed, which is
5204 stopped at a breakpoint that needs stepping over, then don't
5205 resume any threads - have it step over the breakpoint with all
5206 other threads stopped, then resume all threads again. */
5207
5208 if (supports_breakpoints ())
5209 {
5210 need_step_over = find_thread (need_step_over_p);
5211
5212 if (need_step_over != NULL)
5213 {
5214 if (debug_threads)
5215 debug_printf ("proceed_all_lwps: found "
5216 "thread %ld needing a step-over\n",
5217 lwpid_of (need_step_over));
5218
5219 start_step_over (get_thread_lwp (need_step_over));
5220 return;
5221 }
5222 }
5223
5224 if (debug_threads)
5225 debug_printf ("Proceeding, no step-over needed\n");
5226
5227 for_each_thread ([] (thread_info *thread)
5228 {
5229 proceed_one_lwp (thread, NULL);
5230 });
5231 }
5232
5233 /* Stopped LWPs that the client wanted to be running, that don't have
5234 pending statuses, are set to run again, except for EXCEPT, if not
5235 NULL. This undoes a stop_all_lwps call. */
5236
5237 static void
5238 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5239 {
5240 if (debug_threads)
5241 {
5242 debug_enter ();
5243 if (except)
5244 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5245 lwpid_of (get_lwp_thread (except)));
5246 else
5247 debug_printf ("unstopping all lwps\n");
5248 }
5249
5250 if (unsuspend)
5251 for_each_thread ([&] (thread_info *thread)
5252 {
5253 unsuspend_and_proceed_one_lwp (thread, except);
5254 });
5255 else
5256 for_each_thread ([&] (thread_info *thread)
5257 {
5258 proceed_one_lwp (thread, except);
5259 });
5260
5261 if (debug_threads)
5262 {
5263 debug_printf ("unstop_all_lwps done\n");
5264 debug_exit ();
5265 }
5266 }
5267
5268
5269 #ifdef HAVE_LINUX_REGSETS
5270
5271 #define use_linux_regsets 1
5272
5273 /* Returns true if REGSET has been disabled. */
5274
5275 static int
5276 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5277 {
5278 return (info->disabled_regsets != NULL
5279 && info->disabled_regsets[regset - info->regsets]);
5280 }
5281
5282 /* Disable REGSET. */
5283
5284 static void
5285 disable_regset (struct regsets_info *info, struct regset_info *regset)
5286 {
5287 int dr_offset;
5288
5289 dr_offset = regset - info->regsets;
5290 if (info->disabled_regsets == NULL)
5291 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5292 info->disabled_regsets[dr_offset] = 1;
5293 }
5294
5295 static int
5296 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5297 struct regcache *regcache)
5298 {
5299 struct regset_info *regset;
5300 int saw_general_regs = 0;
5301 int pid;
5302 struct iovec iov;
5303
5304 pid = lwpid_of (current_thread);
5305 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5306 {
5307 void *buf, *data;
5308 int nt_type, res;
5309
5310 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5311 continue;
5312
5313 buf = xmalloc (regset->size);
5314
5315 nt_type = regset->nt_type;
5316 if (nt_type)
5317 {
5318 iov.iov_base = buf;
5319 iov.iov_len = regset->size;
5320 data = (void *) &iov;
5321 }
5322 else
5323 data = buf;
5324
5325 #ifndef __sparc__
5326 res = ptrace (regset->get_request, pid,
5327 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5328 #else
5329 res = ptrace (regset->get_request, pid, data, nt_type);
5330 #endif
5331 if (res < 0)
5332 {
5333 if (errno == EIO
5334 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5335 {
5336 /* If we get EIO on a regset, or an EINVAL and the regset is
5337 optional, do not try it again for this process mode. */
5338 disable_regset (regsets_info, regset);
5339 }
5340 else if (errno == ENODATA)
5341 {
5342 /* ENODATA may be returned if the regset is currently
5343 not "active". This can happen in normal operation,
5344 so suppress the warning in this case. */
5345 }
5346 else if (errno == ESRCH)
5347 {
5348 /* At this point, ESRCH should mean the process is
5349 already gone, in which case we simply ignore attempts
5350 to read its registers. */
5351 }
5352 else
5353 {
5354 char s[256];
5355 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5356 pid);
5357 perror (s);
5358 }
5359 }
5360 else
5361 {
5362 if (regset->type == GENERAL_REGS)
5363 saw_general_regs = 1;
5364 regset->store_function (regcache, buf);
5365 }
5366 free (buf);
5367 }
5368 if (saw_general_regs)
5369 return 0;
5370 else
5371 return 1;
5372 }
5373
5374 static int
5375 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5376 struct regcache *regcache)
5377 {
5378 struct regset_info *regset;
5379 int saw_general_regs = 0;
5380 int pid;
5381 struct iovec iov;
5382
5383 pid = lwpid_of (current_thread);
5384 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5385 {
5386 void *buf, *data;
5387 int nt_type, res;
5388
5389 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5390 || regset->fill_function == NULL)
5391 continue;
5392
5393 buf = xmalloc (regset->size);
5394
5395 /* First fill the buffer with the current register set contents,
5396 in case there are any items in the kernel's regset that are
5397 not in gdbserver's regcache. */
5398
5399 nt_type = regset->nt_type;
5400 if (nt_type)
5401 {
5402 iov.iov_base = buf;
5403 iov.iov_len = regset->size;
5404 data = (void *) &iov;
5405 }
5406 else
5407 data = buf;
5408
5409 #ifndef __sparc__
5410 res = ptrace (regset->get_request, pid,
5411 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5412 #else
5413 res = ptrace (regset->get_request, pid, data, nt_type);
5414 #endif
5415
5416 if (res == 0)
5417 {
5418 /* Then overlay our cached registers on that. */
5419 regset->fill_function (regcache, buf);
5420
5421 /* Only now do we write the register set. */
5422 #ifndef __sparc__
5423 res = ptrace (regset->set_request, pid,
5424 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5425 #else
5426 res = ptrace (regset->set_request, pid, data, nt_type);
5427 #endif
5428 }
5429
5430 if (res < 0)
5431 {
5432 if (errno == EIO
5433 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5434 {
5435 /* If we get EIO on a regset, or an EINVAL and the regset is
5436 optional, do not try it again for this process mode. */
5437 disable_regset (regsets_info, regset);
5438 }
5439 else if (errno == ESRCH)
5440 {
5441 /* At this point, ESRCH should mean the process is
5442 already gone, in which case we simply ignore attempts
5443 to change its registers. See also the related
5444 comment in linux_resume_one_lwp. */
5445 free (buf);
5446 return 0;
5447 }
5448 else
5449 {
5450 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5451 }
5452 }
5453 else if (regset->type == GENERAL_REGS)
5454 saw_general_regs = 1;
5455 free (buf);
5456 }
5457 if (saw_general_regs)
5458 return 0;
5459 else
5460 return 1;
5461 }
5462
5463 #else /* !HAVE_LINUX_REGSETS */
5464
5465 #define use_linux_regsets 0
5466 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5467 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5468
5469 #endif
5470
5471 /* Return 1 if register REGNO is supported by one of the regset ptrace
5472 calls or 0 if it has to be transferred individually. */
5473
5474 static int
5475 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5476 {
5477 unsigned char mask = 1 << (regno % 8);
5478 size_t index = regno / 8;
5479
5480 return (use_linux_regsets
5481 && (regs_info->regset_bitmap == NULL
5482 || (regs_info->regset_bitmap[index] & mask) != 0));
5483 }
5484
5485 #ifdef HAVE_LINUX_USRREGS
5486
5487 static int
5488 register_addr (const struct usrregs_info *usrregs, int regnum)
5489 {
5490 int addr;
5491
5492 if (regnum < 0 || regnum >= usrregs->num_regs)
5493 error ("Invalid register number %d.", regnum);
5494
5495 addr = usrregs->regmap[regnum];
5496
5497 return addr;
5498 }
5499
5500 /* Fetch one register. */
5501 static void
5502 fetch_register (const struct usrregs_info *usrregs,
5503 struct regcache *regcache, int regno)
5504 {
5505 CORE_ADDR regaddr;
5506 int i, size;
5507 char *buf;
5508 int pid;
5509
5510 if (regno >= usrregs->num_regs)
5511 return;
5512 if ((*the_low_target.cannot_fetch_register) (regno))
5513 return;
5514
5515 regaddr = register_addr (usrregs, regno);
5516 if (regaddr == -1)
5517 return;
5518
5519 size = ((register_size (regcache->tdesc, regno)
5520 + sizeof (PTRACE_XFER_TYPE) - 1)
5521 & -sizeof (PTRACE_XFER_TYPE));
5522 buf = (char *) alloca (size);
5523
5524 pid = lwpid_of (current_thread);
5525 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5526 {
5527 errno = 0;
5528 *(PTRACE_XFER_TYPE *) (buf + i) =
5529 ptrace (PTRACE_PEEKUSER, pid,
5530 /* Coerce to a uintptr_t first to avoid potential gcc warning
5531 of coercing an 8 byte integer to a 4 byte pointer. */
5532 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5533 regaddr += sizeof (PTRACE_XFER_TYPE);
5534 if (errno != 0)
5535 {
5536 /* Mark register REGNO unavailable. */
5537 supply_register (regcache, regno, NULL);
5538 return;
5539 }
5540 }
5541
5542 if (the_low_target.supply_ptrace_register)
5543 the_low_target.supply_ptrace_register (regcache, regno, buf);
5544 else
5545 supply_register (regcache, regno, buf);
5546 }
5547
5548 /* Store one register. */
5549 static void
5550 store_register (const struct usrregs_info *usrregs,
5551 struct regcache *regcache, int regno)
5552 {
5553 CORE_ADDR regaddr;
5554 int i, size;
5555 char *buf;
5556 int pid;
5557
5558 if (regno >= usrregs->num_regs)
5559 return;
5560 if ((*the_low_target.cannot_store_register) (regno))
5561 return;
5562
5563 regaddr = register_addr (usrregs, regno);
5564 if (regaddr == -1)
5565 return;
5566
5567 size = ((register_size (regcache->tdesc, regno)
5568 + sizeof (PTRACE_XFER_TYPE) - 1)
5569 & -sizeof (PTRACE_XFER_TYPE));
5570 buf = (char *) alloca (size);
5571 memset (buf, 0, size);
5572
5573 if (the_low_target.collect_ptrace_register)
5574 the_low_target.collect_ptrace_register (regcache, regno, buf);
5575 else
5576 collect_register (regcache, regno, buf);
5577
5578 pid = lwpid_of (current_thread);
5579 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5580 {
5581 errno = 0;
5582 ptrace (PTRACE_POKEUSER, pid,
5583 /* Coerce to a uintptr_t first to avoid potential gcc warning
5584 about coercing an 8 byte integer to a 4 byte pointer. */
5585 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5586 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5587 if (errno != 0)
5588 {
5589 /* At this point, ESRCH should mean the process is
5590 already gone, in which case we simply ignore attempts
5591 to change its registers. See also the related
5592 comment in linux_resume_one_lwp. */
5593 if (errno == ESRCH)
5594 return;
5595
5596 if ((*the_low_target.cannot_store_register) (regno) == 0)
5597 error ("writing register %d: %s", regno, safe_strerror (errno));
5598 }
5599 regaddr += sizeof (PTRACE_XFER_TYPE);
5600 }
5601 }
5602
5603 /* Fetch all registers, or just one, from the child process.
5604 If REGNO is -1, do this for all registers, skipping any that are
5605 assumed to have been retrieved by regsets_fetch_inferior_registers,
5606 unless ALL is non-zero.
5607 Otherwise, REGNO specifies which register (so we can save time). */
5608 static void
5609 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5610 struct regcache *regcache, int regno, int all)
5611 {
5612 struct usrregs_info *usr = regs_info->usrregs;
5613
5614 if (regno == -1)
5615 {
5616 for (regno = 0; regno < usr->num_regs; regno++)
5617 if (all || !linux_register_in_regsets (regs_info, regno))
5618 fetch_register (usr, regcache, regno);
5619 }
5620 else
5621 fetch_register (usr, regcache, regno);
5622 }
5623
5624 /* Store our register values back into the inferior.
5625 If REGNO is -1, do this for all registers, skipping any that are
5626 assumed to have been saved by regsets_store_inferior_registers,
5627 unless ALL is non-zero.
5628 Otherwise, REGNO specifies which register (so we can save time). */
5629 static void
5630 usr_store_inferior_registers (const struct regs_info *regs_info,
5631 struct regcache *regcache, int regno, int all)
5632 {
5633 struct usrregs_info *usr = regs_info->usrregs;
5634
5635 if (regno == -1)
5636 {
5637 for (regno = 0; regno < usr->num_regs; regno++)
5638 if (all || !linux_register_in_regsets (regs_info, regno))
5639 store_register (usr, regcache, regno);
5640 }
5641 else
5642 store_register (usr, regcache, regno);
5643 }
5644
5645 #else /* !HAVE_LINUX_USRREGS */
5646
5647 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5648 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5649
5650 #endif
5651
5652
5653 void
5654 linux_process_target::fetch_registers (regcache *regcache, int regno)
5655 {
5656 int use_regsets;
5657 int all = 0;
5658 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5659
5660 if (regno == -1)
5661 {
5662 if (the_low_target.fetch_register != NULL
5663 && regs_info->usrregs != NULL)
5664 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5665 (*the_low_target.fetch_register) (regcache, regno);
5666
5667 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5668 if (regs_info->usrregs != NULL)
5669 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5670 }
5671 else
5672 {
5673 if (the_low_target.fetch_register != NULL
5674 && (*the_low_target.fetch_register) (regcache, regno))
5675 return;
5676
5677 use_regsets = linux_register_in_regsets (regs_info, regno);
5678 if (use_regsets)
5679 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5680 regcache);
5681 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5682 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5683 }
5684 }
5685
5686 void
5687 linux_process_target::store_registers (regcache *regcache, int regno)
5688 {
5689 int use_regsets;
5690 int all = 0;
5691 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5692
5693 if (regno == -1)
5694 {
5695 all = regsets_store_inferior_registers (regs_info->regsets_info,
5696 regcache);
5697 if (regs_info->usrregs != NULL)
5698 usr_store_inferior_registers (regs_info, regcache, regno, all);
5699 }
5700 else
5701 {
5702 use_regsets = linux_register_in_regsets (regs_info, regno);
5703 if (use_regsets)
5704 all = regsets_store_inferior_registers (regs_info->regsets_info,
5705 regcache);
5706 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5707 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5708 }
5709 }
5710
5711
5712 /* A wrapper for the read_memory target op. */
5713
5714 static int
5715 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5716 {
5717 return the_target->pt->read_memory (memaddr, myaddr, len);
5718 }
5719
5720 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5721 to debugger memory starting at MYADDR. */
5722
5723 int
5724 linux_process_target::read_memory (CORE_ADDR memaddr,
5725 unsigned char *myaddr, int len)
5726 {
5727 int pid = lwpid_of (current_thread);
5728 PTRACE_XFER_TYPE *buffer;
5729 CORE_ADDR addr;
5730 int count;
5731 char filename[64];
5732 int i;
5733 int ret;
5734 int fd;
5735
5736 /* Try using /proc. Don't bother for one word. */
5737 if (len >= 3 * sizeof (long))
5738 {
5739 int bytes;
5740
5741 /* We could keep this file open and cache it - possibly one per
5742 thread. That requires some juggling, but is even faster. */
5743 sprintf (filename, "/proc/%d/mem", pid);
5744 fd = open (filename, O_RDONLY | O_LARGEFILE);
5745 if (fd == -1)
5746 goto no_proc;
5747
5748 /* If pread64 is available, use it. It's faster if the kernel
5749 supports it (only one syscall), and it's 64-bit safe even on
5750 32-bit platforms (for instance, SPARC debugging a SPARC64
5751 application). */
5752 #ifdef HAVE_PREAD64
5753 bytes = pread64 (fd, myaddr, len, memaddr);
5754 #else
5755 bytes = -1;
5756 if (lseek (fd, memaddr, SEEK_SET) != -1)
5757 bytes = read (fd, myaddr, len);
5758 #endif
5759
5760 close (fd);
5761 if (bytes == len)
5762 return 0;
5763
5764 /* Some data was read, we'll try to get the rest with ptrace. */
5765 if (bytes > 0)
5766 {
5767 memaddr += bytes;
5768 myaddr += bytes;
5769 len -= bytes;
5770 }
5771 }
5772
5773 no_proc:
5774 /* Round starting address down to longword boundary. */
5775 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5776 /* Round ending address up; get number of longwords that makes. */
5777 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5778 / sizeof (PTRACE_XFER_TYPE));
5779 /* Allocate buffer of that many longwords. */
5780 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5781
5782 /* Read all the longwords */
5783 errno = 0;
5784 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5785 {
5786 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5787 about coercing an 8 byte integer to a 4 byte pointer. */
5788 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5789 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5790 (PTRACE_TYPE_ARG4) 0);
5791 if (errno)
5792 break;
5793 }
5794 ret = errno;
5795
5796 /* Copy appropriate bytes out of the buffer. */
5797 if (i > 0)
5798 {
5799 i *= sizeof (PTRACE_XFER_TYPE);
5800 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5801 memcpy (myaddr,
5802 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5803 i < len ? i : len);
5804 }
5805
5806 return ret;
5807 }
5808
5809 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5810 memory at MEMADDR. On failure (cannot write to the inferior)
5811 returns the value of errno. Always succeeds if LEN is zero. */
5812
5813 int
5814 linux_process_target::write_memory (CORE_ADDR memaddr,
5815 const unsigned char *myaddr, int len)
5816 {
5817 int i;
5818 /* Round starting address down to longword boundary. */
5819 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5820 /* Round ending address up; get number of longwords that makes. */
5821 int count
5822 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5823 / sizeof (PTRACE_XFER_TYPE);
5824
5825 /* Allocate buffer of that many longwords. */
5826 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5827
5828 int pid = lwpid_of (current_thread);
5829
5830 if (len == 0)
5831 {
5832 /* Zero length write always succeeds. */
5833 return 0;
5834 }
5835
5836 if (debug_threads)
5837 {
5838 /* Dump up to four bytes. */
5839 char str[4 * 2 + 1];
5840 char *p = str;
5841 int dump = len < 4 ? len : 4;
5842
5843 for (i = 0; i < dump; i++)
5844 {
5845 sprintf (p, "%02x", myaddr[i]);
5846 p += 2;
5847 }
5848 *p = '\0';
5849
5850 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5851 str, (long) memaddr, pid);
5852 }
5853
5854 /* Fill start and end extra bytes of buffer with existing memory data. */
5855
5856 errno = 0;
5857 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5858 about coercing an 8 byte integer to a 4 byte pointer. */
5859 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5860 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5861 (PTRACE_TYPE_ARG4) 0);
5862 if (errno)
5863 return errno;
5864
5865 if (count > 1)
5866 {
5867 errno = 0;
5868 buffer[count - 1]
5869 = ptrace (PTRACE_PEEKTEXT, pid,
5870 /* Coerce to a uintptr_t first to avoid potential gcc warning
5871 about coercing an 8 byte integer to a 4 byte pointer. */
5872 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5873 * sizeof (PTRACE_XFER_TYPE)),
5874 (PTRACE_TYPE_ARG4) 0);
5875 if (errno)
5876 return errno;
5877 }
5878
5879 /* Copy data to be written over corresponding part of buffer. */
5880
5881 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5882 myaddr, len);
5883
5884 /* Write the entire buffer. */
5885
5886 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5887 {
5888 errno = 0;
5889 ptrace (PTRACE_POKETEXT, pid,
5890 /* Coerce to a uintptr_t first to avoid potential gcc warning
5891 about coercing an 8 byte integer to a 4 byte pointer. */
5892 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5893 (PTRACE_TYPE_ARG4) buffer[i]);
5894 if (errno)
5895 return errno;
5896 }
5897
5898 return 0;
5899 }
5900
5901 void
5902 linux_process_target::look_up_symbols ()
5903 {
5904 #ifdef USE_THREAD_DB
5905 struct process_info *proc = current_process ();
5906
5907 if (proc->priv->thread_db != NULL)
5908 return;
5909
5910 thread_db_init ();
5911 #endif
5912 }
5913
5914 void
5915 linux_process_target::request_interrupt ()
5916 {
5917 /* Send a SIGINT to the process group. This acts just like the user
5918 typed a ^C on the controlling terminal. */
5919 ::kill (-signal_pid, SIGINT);
5920 }
5921
5922 bool
5923 linux_process_target::supports_read_auxv ()
5924 {
5925 return true;
5926 }
5927
5928 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5929 to debugger memory starting at MYADDR. */
5930
5931 int
5932 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5933 unsigned int len)
5934 {
5935 char filename[PATH_MAX];
5936 int fd, n;
5937 int pid = lwpid_of (current_thread);
5938
5939 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5940
5941 fd = open (filename, O_RDONLY);
5942 if (fd < 0)
5943 return -1;
5944
5945 if (offset != (CORE_ADDR) 0
5946 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5947 n = -1;
5948 else
5949 n = read (fd, myaddr, len);
5950
5951 close (fd);
5952
5953 return n;
5954 }
5955
5956 /* These breakpoint and watchpoint related wrapper functions simply
5957 pass on the function call if the target has registered a
5958 corresponding function. */
5959
5960 bool
5961 linux_process_target::supports_z_point_type (char z_type)
5962 {
5963 return (the_low_target.supports_z_point_type != NULL
5964 && the_low_target.supports_z_point_type (z_type));
5965 }
5966
5967 int
5968 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5969 int size, raw_breakpoint *bp)
5970 {
5971 if (type == raw_bkpt_type_sw)
5972 return insert_memory_breakpoint (bp);
5973 else if (the_low_target.insert_point != NULL)
5974 return the_low_target.insert_point (type, addr, size, bp);
5975 else
5976 /* Unsupported (see target.h). */
5977 return 1;
5978 }
5979
5980 int
5981 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5982 int size, raw_breakpoint *bp)
5983 {
5984 if (type == raw_bkpt_type_sw)
5985 return remove_memory_breakpoint (bp);
5986 else if (the_low_target.remove_point != NULL)
5987 return the_low_target.remove_point (type, addr, size, bp);
5988 else
5989 /* Unsupported (see target.h). */
5990 return 1;
5991 }
5992
5993 /* Implement the stopped_by_sw_breakpoint target_ops
5994 method. */
5995
5996 bool
5997 linux_process_target::stopped_by_sw_breakpoint ()
5998 {
5999 struct lwp_info *lwp = get_thread_lwp (current_thread);
6000
6001 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6002 }
6003
6004 /* Implement the supports_stopped_by_sw_breakpoint target_ops
6005 method. */
6006
6007 bool
6008 linux_process_target::supports_stopped_by_sw_breakpoint ()
6009 {
6010 return USE_SIGTRAP_SIGINFO;
6011 }
6012
6013 /* Implement the stopped_by_hw_breakpoint target_ops
6014 method. */
6015
6016 bool
6017 linux_process_target::stopped_by_hw_breakpoint ()
6018 {
6019 struct lwp_info *lwp = get_thread_lwp (current_thread);
6020
6021 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6022 }
6023
6024 /* Implement the supports_stopped_by_hw_breakpoint target_ops
6025 method. */
6026
6027 bool
6028 linux_process_target::supports_stopped_by_hw_breakpoint ()
6029 {
6030 return USE_SIGTRAP_SIGINFO;
6031 }
6032
6033 /* Implement the supports_hardware_single_step target_ops method. */
6034
6035 static int
6036 linux_supports_hardware_single_step (void)
6037 {
6038 return can_hardware_single_step ();
6039 }
6040
6041 static int
6042 linux_supports_software_single_step (void)
6043 {
6044 return can_software_single_step ();
6045 }
6046
6047 static int
6048 linux_stopped_by_watchpoint (void)
6049 {
6050 struct lwp_info *lwp = get_thread_lwp (current_thread);
6051
6052 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6053 }
6054
6055 static CORE_ADDR
6056 linux_stopped_data_address (void)
6057 {
6058 struct lwp_info *lwp = get_thread_lwp (current_thread);
6059
6060 return lwp->stopped_data_address;
6061 }
6062
6063 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6064 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6065 && defined(PT_TEXT_END_ADDR)
6066
6067 /* This is only used for targets that define PT_TEXT_ADDR,
6068 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6069 the target has different ways of acquiring this information, like
6070 loadmaps. */
6071
6072 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6073 to tell gdb about. */
6074
6075 static int
6076 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6077 {
6078 unsigned long text, text_end, data;
6079 int pid = lwpid_of (current_thread);
6080
6081 errno = 0;
6082
6083 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6084 (PTRACE_TYPE_ARG4) 0);
6085 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6086 (PTRACE_TYPE_ARG4) 0);
6087 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6088 (PTRACE_TYPE_ARG4) 0);
6089
6090 if (errno == 0)
6091 {
6092 /* Both text and data offsets produced at compile-time (and so
6093 used by gdb) are relative to the beginning of the program,
6094 with the data segment immediately following the text segment.
6095 However, the actual runtime layout in memory may put the data
6096 somewhere else, so when we send gdb a data base-address, we
6097 use the real data base address and subtract the compile-time
6098 data base-address from it (which is just the length of the
6099 text segment). BSS immediately follows data in both
6100 cases. */
6101 *text_p = text;
6102 *data_p = data - (text_end - text);
6103
6104 return 1;
6105 }
6106 return 0;
6107 }
6108 #endif
6109
6110 static int
6111 linux_qxfer_osdata (const char *annex,
6112 unsigned char *readbuf, unsigned const char *writebuf,
6113 CORE_ADDR offset, int len)
6114 {
6115 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6116 }
6117
6118 /* Convert a native/host siginfo object, into/from the siginfo in the
6119 layout of the inferiors' architecture. */
6120
6121 static void
6122 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6123 {
6124 int done = 0;
6125
6126 if (the_low_target.siginfo_fixup != NULL)
6127 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6128
6129 /* If there was no callback, or the callback didn't do anything,
6130 then just do a straight memcpy. */
6131 if (!done)
6132 {
6133 if (direction == 1)
6134 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6135 else
6136 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6137 }
6138 }
6139
6140 static int
6141 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6142 unsigned const char *writebuf, CORE_ADDR offset, int len)
6143 {
6144 int pid;
6145 siginfo_t siginfo;
6146 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6147
6148 if (current_thread == NULL)
6149 return -1;
6150
6151 pid = lwpid_of (current_thread);
6152
6153 if (debug_threads)
6154 debug_printf ("%s siginfo for lwp %d.\n",
6155 readbuf != NULL ? "Reading" : "Writing",
6156 pid);
6157
6158 if (offset >= sizeof (siginfo))
6159 return -1;
6160
6161 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6162 return -1;
6163
6164 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6165 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6166 inferior with a 64-bit GDBSERVER should look the same as debugging it
6167 with a 32-bit GDBSERVER, we need to convert it. */
6168 siginfo_fixup (&siginfo, inf_siginfo, 0);
6169
6170 if (offset + len > sizeof (siginfo))
6171 len = sizeof (siginfo) - offset;
6172
6173 if (readbuf != NULL)
6174 memcpy (readbuf, inf_siginfo + offset, len);
6175 else
6176 {
6177 memcpy (inf_siginfo + offset, writebuf, len);
6178
6179 /* Convert back to ptrace layout before flushing it out. */
6180 siginfo_fixup (&siginfo, inf_siginfo, 1);
6181
6182 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6183 return -1;
6184 }
6185
6186 return len;
6187 }
6188
6189 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6190 so we notice when children change state; as the handler for the
6191 sigsuspend in my_waitpid. */
6192
6193 static void
6194 sigchld_handler (int signo)
6195 {
6196 int old_errno = errno;
6197
6198 if (debug_threads)
6199 {
6200 do
6201 {
6202 /* Use the async signal safe debug function. */
6203 if (debug_write ("sigchld_handler\n",
6204 sizeof ("sigchld_handler\n") - 1) < 0)
6205 break; /* just ignore */
6206 } while (0);
6207 }
6208
6209 if (target_is_async_p ())
6210 async_file_mark (); /* trigger a linux_wait */
6211
6212 errno = old_errno;
6213 }
6214
6215 static int
6216 linux_supports_non_stop (void)
6217 {
6218 return 1;
6219 }
6220
6221 static int
6222 linux_async (int enable)
6223 {
6224 int previous = target_is_async_p ();
6225
6226 if (debug_threads)
6227 debug_printf ("linux_async (%d), previous=%d\n",
6228 enable, previous);
6229
6230 if (previous != enable)
6231 {
6232 sigset_t mask;
6233 sigemptyset (&mask);
6234 sigaddset (&mask, SIGCHLD);
6235
6236 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6237
6238 if (enable)
6239 {
6240 if (pipe (linux_event_pipe) == -1)
6241 {
6242 linux_event_pipe[0] = -1;
6243 linux_event_pipe[1] = -1;
6244 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6245
6246 warning ("creating event pipe failed.");
6247 return previous;
6248 }
6249
6250 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6251 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6252
6253 /* Register the event loop handler. */
6254 add_file_handler (linux_event_pipe[0],
6255 handle_target_event, NULL);
6256
6257 /* Always trigger a linux_wait. */
6258 async_file_mark ();
6259 }
6260 else
6261 {
6262 delete_file_handler (linux_event_pipe[0]);
6263
6264 close (linux_event_pipe[0]);
6265 close (linux_event_pipe[1]);
6266 linux_event_pipe[0] = -1;
6267 linux_event_pipe[1] = -1;
6268 }
6269
6270 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6271 }
6272
6273 return previous;
6274 }
6275
6276 static int
6277 linux_start_non_stop (int nonstop)
6278 {
6279 /* Register or unregister from event-loop accordingly. */
6280 linux_async (nonstop);
6281
6282 if (target_is_async_p () != (nonstop != 0))
6283 return -1;
6284
6285 return 0;
6286 }
6287
6288 static int
6289 linux_supports_multi_process (void)
6290 {
6291 return 1;
6292 }
6293
6294 /* Check if fork events are supported. */
6295
6296 static int
6297 linux_supports_fork_events (void)
6298 {
6299 return linux_supports_tracefork ();
6300 }
6301
6302 /* Check if vfork events are supported. */
6303
6304 static int
6305 linux_supports_vfork_events (void)
6306 {
6307 return linux_supports_tracefork ();
6308 }
6309
6310 /* Check if exec events are supported. */
6311
6312 static int
6313 linux_supports_exec_events (void)
6314 {
6315 return linux_supports_traceexec ();
6316 }
6317
6318 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6319 ptrace flags for all inferiors. This is in case the new GDB connection
6320 doesn't support the same set of events that the previous one did. */
6321
6322 static void
6323 linux_handle_new_gdb_connection (void)
6324 {
6325 /* Request that all the lwps reset their ptrace options. */
6326 for_each_thread ([] (thread_info *thread)
6327 {
6328 struct lwp_info *lwp = get_thread_lwp (thread);
6329
6330 if (!lwp->stopped)
6331 {
6332 /* Stop the lwp so we can modify its ptrace options. */
6333 lwp->must_set_ptrace_flags = 1;
6334 linux_stop_lwp (lwp);
6335 }
6336 else
6337 {
6338 /* Already stopped; go ahead and set the ptrace options. */
6339 struct process_info *proc = find_process_pid (pid_of (thread));
6340 int options = linux_low_ptrace_options (proc->attached);
6341
6342 linux_enable_event_reporting (lwpid_of (thread), options);
6343 lwp->must_set_ptrace_flags = 0;
6344 }
6345 });
6346 }
6347
6348 static int
6349 linux_supports_disable_randomization (void)
6350 {
6351 #ifdef HAVE_PERSONALITY
6352 return 1;
6353 #else
6354 return 0;
6355 #endif
6356 }
6357
6358 static int
6359 linux_supports_agent (void)
6360 {
6361 return 1;
6362 }
6363
6364 static int
6365 linux_supports_range_stepping (void)
6366 {
6367 if (can_software_single_step ())
6368 return 1;
6369 if (*the_low_target.supports_range_stepping == NULL)
6370 return 0;
6371
6372 return (*the_low_target.supports_range_stepping) ();
6373 }
6374
6375 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6376 struct target_loadseg
6377 {
6378 /* Core address to which the segment is mapped. */
6379 Elf32_Addr addr;
6380 /* VMA recorded in the program header. */
6381 Elf32_Addr p_vaddr;
6382 /* Size of this segment in memory. */
6383 Elf32_Word p_memsz;
6384 };
6385
6386 # if defined PT_GETDSBT
6387 struct target_loadmap
6388 {
6389 /* Protocol version number, must be zero. */
6390 Elf32_Word version;
6391 /* Pointer to the DSBT table, its size, and the DSBT index. */
6392 unsigned *dsbt_table;
6393 unsigned dsbt_size, dsbt_index;
6394 /* Number of segments in this map. */
6395 Elf32_Word nsegs;
6396 /* The actual memory map. */
6397 struct target_loadseg segs[/*nsegs*/];
6398 };
6399 # define LINUX_LOADMAP PT_GETDSBT
6400 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6401 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6402 # else
6403 struct target_loadmap
6404 {
6405 /* Protocol version number, must be zero. */
6406 Elf32_Half version;
6407 /* Number of segments in this map. */
6408 Elf32_Half nsegs;
6409 /* The actual memory map. */
6410 struct target_loadseg segs[/*nsegs*/];
6411 };
6412 # define LINUX_LOADMAP PTRACE_GETFDPIC
6413 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6414 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6415 # endif
6416
6417 static int
6418 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6419 unsigned char *myaddr, unsigned int len)
6420 {
6421 int pid = lwpid_of (current_thread);
6422 int addr = -1;
6423 struct target_loadmap *data = NULL;
6424 unsigned int actual_length, copy_length;
6425
6426 if (strcmp (annex, "exec") == 0)
6427 addr = (int) LINUX_LOADMAP_EXEC;
6428 else if (strcmp (annex, "interp") == 0)
6429 addr = (int) LINUX_LOADMAP_INTERP;
6430 else
6431 return -1;
6432
6433 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6434 return -1;
6435
6436 if (data == NULL)
6437 return -1;
6438
6439 actual_length = sizeof (struct target_loadmap)
6440 + sizeof (struct target_loadseg) * data->nsegs;
6441
6442 if (offset < 0 || offset > actual_length)
6443 return -1;
6444
6445 copy_length = actual_length - offset < len ? actual_length - offset : len;
6446 memcpy (myaddr, (char *) data + offset, copy_length);
6447 return copy_length;
6448 }
6449 #else
6450 # define linux_read_loadmap NULL
6451 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6452
6453 static void
6454 linux_process_qsupported (char **features, int count)
6455 {
6456 if (the_low_target.process_qsupported != NULL)
6457 the_low_target.process_qsupported (features, count);
6458 }
6459
6460 static int
6461 linux_supports_catch_syscall (void)
6462 {
6463 return (the_low_target.get_syscall_trapinfo != NULL
6464 && linux_supports_tracesysgood ());
6465 }
6466
6467 static int
6468 linux_get_ipa_tdesc_idx (void)
6469 {
6470 if (the_low_target.get_ipa_tdesc_idx == NULL)
6471 return 0;
6472
6473 return (*the_low_target.get_ipa_tdesc_idx) ();
6474 }
6475
6476 static int
6477 linux_supports_tracepoints (void)
6478 {
6479 if (*the_low_target.supports_tracepoints == NULL)
6480 return 0;
6481
6482 return (*the_low_target.supports_tracepoints) ();
6483 }
6484
6485 static CORE_ADDR
6486 linux_read_pc (struct regcache *regcache)
6487 {
6488 if (the_low_target.get_pc == NULL)
6489 return 0;
6490
6491 return (*the_low_target.get_pc) (regcache);
6492 }
6493
6494 static void
6495 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6496 {
6497 gdb_assert (the_low_target.set_pc != NULL);
6498
6499 (*the_low_target.set_pc) (regcache, pc);
6500 }
6501
6502 static int
6503 linux_thread_stopped (struct thread_info *thread)
6504 {
6505 return get_thread_lwp (thread)->stopped;
6506 }
6507
6508 /* This exposes stop-all-threads functionality to other modules. */
6509
6510 static void
6511 linux_pause_all (int freeze)
6512 {
6513 stop_all_lwps (freeze, NULL);
6514 }
6515
6516 /* This exposes unstop-all-threads functionality to other gdbserver
6517 modules. */
6518
6519 static void
6520 linux_unpause_all (int unfreeze)
6521 {
6522 unstop_all_lwps (unfreeze, NULL);
6523 }
6524
6525 int
6526 linux_process_target::prepare_to_access_memory ()
6527 {
6528 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6529 running LWP. */
6530 if (non_stop)
6531 linux_pause_all (1);
6532 return 0;
6533 }
6534
6535 void
6536 linux_process_target::done_accessing_memory ()
6537 {
6538 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6539 running LWP. */
6540 if (non_stop)
6541 linux_unpause_all (1);
6542 }
6543
6544 static int
6545 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6546 CORE_ADDR collector,
6547 CORE_ADDR lockaddr,
6548 ULONGEST orig_size,
6549 CORE_ADDR *jump_entry,
6550 CORE_ADDR *trampoline,
6551 ULONGEST *trampoline_size,
6552 unsigned char *jjump_pad_insn,
6553 ULONGEST *jjump_pad_insn_size,
6554 CORE_ADDR *adjusted_insn_addr,
6555 CORE_ADDR *adjusted_insn_addr_end,
6556 char *err)
6557 {
6558 return (*the_low_target.install_fast_tracepoint_jump_pad)
6559 (tpoint, tpaddr, collector, lockaddr, orig_size,
6560 jump_entry, trampoline, trampoline_size,
6561 jjump_pad_insn, jjump_pad_insn_size,
6562 adjusted_insn_addr, adjusted_insn_addr_end,
6563 err);
6564 }
6565
6566 static struct emit_ops *
6567 linux_emit_ops (void)
6568 {
6569 if (the_low_target.emit_ops != NULL)
6570 return (*the_low_target.emit_ops) ();
6571 else
6572 return NULL;
6573 }
6574
6575 static int
6576 linux_get_min_fast_tracepoint_insn_len (void)
6577 {
6578 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6579 }
6580
6581 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6582
6583 static int
6584 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6585 CORE_ADDR *phdr_memaddr, int *num_phdr)
6586 {
6587 char filename[PATH_MAX];
6588 int fd;
6589 const int auxv_size = is_elf64
6590 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6591 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6592
6593 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6594
6595 fd = open (filename, O_RDONLY);
6596 if (fd < 0)
6597 return 1;
6598
6599 *phdr_memaddr = 0;
6600 *num_phdr = 0;
6601 while (read (fd, buf, auxv_size) == auxv_size
6602 && (*phdr_memaddr == 0 || *num_phdr == 0))
6603 {
6604 if (is_elf64)
6605 {
6606 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6607
6608 switch (aux->a_type)
6609 {
6610 case AT_PHDR:
6611 *phdr_memaddr = aux->a_un.a_val;
6612 break;
6613 case AT_PHNUM:
6614 *num_phdr = aux->a_un.a_val;
6615 break;
6616 }
6617 }
6618 else
6619 {
6620 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6621
6622 switch (aux->a_type)
6623 {
6624 case AT_PHDR:
6625 *phdr_memaddr = aux->a_un.a_val;
6626 break;
6627 case AT_PHNUM:
6628 *num_phdr = aux->a_un.a_val;
6629 break;
6630 }
6631 }
6632 }
6633
6634 close (fd);
6635
6636 if (*phdr_memaddr == 0 || *num_phdr == 0)
6637 {
6638 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6639 "phdr_memaddr = %ld, phdr_num = %d",
6640 (long) *phdr_memaddr, *num_phdr);
6641 return 2;
6642 }
6643
6644 return 0;
6645 }
6646
6647 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6648
6649 static CORE_ADDR
6650 get_dynamic (const int pid, const int is_elf64)
6651 {
6652 CORE_ADDR phdr_memaddr, relocation;
6653 int num_phdr, i;
6654 unsigned char *phdr_buf;
6655 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6656
6657 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6658 return 0;
6659
6660 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6661 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6662
6663 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6664 return 0;
6665
6666 /* Compute relocation: it is expected to be 0 for "regular" executables,
6667 non-zero for PIE ones. */
6668 relocation = -1;
6669 for (i = 0; relocation == -1 && i < num_phdr; i++)
6670 if (is_elf64)
6671 {
6672 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6673
6674 if (p->p_type == PT_PHDR)
6675 relocation = phdr_memaddr - p->p_vaddr;
6676 }
6677 else
6678 {
6679 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6680
6681 if (p->p_type == PT_PHDR)
6682 relocation = phdr_memaddr - p->p_vaddr;
6683 }
6684
6685 if (relocation == -1)
6686 {
6687 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6688 any real world executables, including PIE executables, have always
6689 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6690 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6691 or present DT_DEBUG anyway (fpc binaries are statically linked).
6692
6693 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6694
6695 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6696
6697 return 0;
6698 }
6699
6700 for (i = 0; i < num_phdr; i++)
6701 {
6702 if (is_elf64)
6703 {
6704 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6705
6706 if (p->p_type == PT_DYNAMIC)
6707 return p->p_vaddr + relocation;
6708 }
6709 else
6710 {
6711 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6712
6713 if (p->p_type == PT_DYNAMIC)
6714 return p->p_vaddr + relocation;
6715 }
6716 }
6717
6718 return 0;
6719 }
6720
6721 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6722 can be 0 if the inferior does not yet have the library list initialized.
6723 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6724 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6725
6726 static CORE_ADDR
6727 get_r_debug (const int pid, const int is_elf64)
6728 {
6729 CORE_ADDR dynamic_memaddr;
6730 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6731 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6732 CORE_ADDR map = -1;
6733
6734 dynamic_memaddr = get_dynamic (pid, is_elf64);
6735 if (dynamic_memaddr == 0)
6736 return map;
6737
6738 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6739 {
6740 if (is_elf64)
6741 {
6742 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6743 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6744 union
6745 {
6746 Elf64_Xword map;
6747 unsigned char buf[sizeof (Elf64_Xword)];
6748 }
6749 rld_map;
6750 #endif
6751 #ifdef DT_MIPS_RLD_MAP
6752 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6753 {
6754 if (linux_read_memory (dyn->d_un.d_val,
6755 rld_map.buf, sizeof (rld_map.buf)) == 0)
6756 return rld_map.map;
6757 else
6758 break;
6759 }
6760 #endif /* DT_MIPS_RLD_MAP */
6761 #ifdef DT_MIPS_RLD_MAP_REL
6762 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6763 {
6764 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6765 rld_map.buf, sizeof (rld_map.buf)) == 0)
6766 return rld_map.map;
6767 else
6768 break;
6769 }
6770 #endif /* DT_MIPS_RLD_MAP_REL */
6771
6772 if (dyn->d_tag == DT_DEBUG && map == -1)
6773 map = dyn->d_un.d_val;
6774
6775 if (dyn->d_tag == DT_NULL)
6776 break;
6777 }
6778 else
6779 {
6780 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6781 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6782 union
6783 {
6784 Elf32_Word map;
6785 unsigned char buf[sizeof (Elf32_Word)];
6786 }
6787 rld_map;
6788 #endif
6789 #ifdef DT_MIPS_RLD_MAP
6790 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6791 {
6792 if (linux_read_memory (dyn->d_un.d_val,
6793 rld_map.buf, sizeof (rld_map.buf)) == 0)
6794 return rld_map.map;
6795 else
6796 break;
6797 }
6798 #endif /* DT_MIPS_RLD_MAP */
6799 #ifdef DT_MIPS_RLD_MAP_REL
6800 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6801 {
6802 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6803 rld_map.buf, sizeof (rld_map.buf)) == 0)
6804 return rld_map.map;
6805 else
6806 break;
6807 }
6808 #endif /* DT_MIPS_RLD_MAP_REL */
6809
6810 if (dyn->d_tag == DT_DEBUG && map == -1)
6811 map = dyn->d_un.d_val;
6812
6813 if (dyn->d_tag == DT_NULL)
6814 break;
6815 }
6816
6817 dynamic_memaddr += dyn_size;
6818 }
6819
6820 return map;
6821 }
6822
6823 /* Read one pointer from MEMADDR in the inferior. */
6824
6825 static int
6826 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6827 {
6828 int ret;
6829
6830 /* Go through a union so this works on either big or little endian
6831 hosts, when the inferior's pointer size is smaller than the size
6832 of CORE_ADDR. It is assumed the inferior's endianness is the
6833 same of the superior's. */
6834 union
6835 {
6836 CORE_ADDR core_addr;
6837 unsigned int ui;
6838 unsigned char uc;
6839 } addr;
6840
6841 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6842 if (ret == 0)
6843 {
6844 if (ptr_size == sizeof (CORE_ADDR))
6845 *ptr = addr.core_addr;
6846 else if (ptr_size == sizeof (unsigned int))
6847 *ptr = addr.ui;
6848 else
6849 gdb_assert_not_reached ("unhandled pointer size");
6850 }
6851 return ret;
6852 }
6853
6854 struct link_map_offsets
6855 {
6856 /* Offset and size of r_debug.r_version. */
6857 int r_version_offset;
6858
6859 /* Offset and size of r_debug.r_map. */
6860 int r_map_offset;
6861
6862 /* Offset to l_addr field in struct link_map. */
6863 int l_addr_offset;
6864
6865 /* Offset to l_name field in struct link_map. */
6866 int l_name_offset;
6867
6868 /* Offset to l_ld field in struct link_map. */
6869 int l_ld_offset;
6870
6871 /* Offset to l_next field in struct link_map. */
6872 int l_next_offset;
6873
6874 /* Offset to l_prev field in struct link_map. */
6875 int l_prev_offset;
6876 };
6877
6878 /* Construct qXfer:libraries-svr4:read reply. */
6879
6880 static int
6881 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6882 unsigned const char *writebuf,
6883 CORE_ADDR offset, int len)
6884 {
6885 struct process_info_private *const priv = current_process ()->priv;
6886 char filename[PATH_MAX];
6887 int pid, is_elf64;
6888
6889 static const struct link_map_offsets lmo_32bit_offsets =
6890 {
6891 0, /* r_version offset. */
6892 4, /* r_debug.r_map offset. */
6893 0, /* l_addr offset in link_map. */
6894 4, /* l_name offset in link_map. */
6895 8, /* l_ld offset in link_map. */
6896 12, /* l_next offset in link_map. */
6897 16 /* l_prev offset in link_map. */
6898 };
6899
6900 static const struct link_map_offsets lmo_64bit_offsets =
6901 {
6902 0, /* r_version offset. */
6903 8, /* r_debug.r_map offset. */
6904 0, /* l_addr offset in link_map. */
6905 8, /* l_name offset in link_map. */
6906 16, /* l_ld offset in link_map. */
6907 24, /* l_next offset in link_map. */
6908 32 /* l_prev offset in link_map. */
6909 };
6910 const struct link_map_offsets *lmo;
6911 unsigned int machine;
6912 int ptr_size;
6913 CORE_ADDR lm_addr = 0, lm_prev = 0;
6914 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6915 int header_done = 0;
6916
6917 if (writebuf != NULL)
6918 return -2;
6919 if (readbuf == NULL)
6920 return -1;
6921
6922 pid = lwpid_of (current_thread);
6923 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6924 is_elf64 = elf_64_file_p (filename, &machine);
6925 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6926 ptr_size = is_elf64 ? 8 : 4;
6927
6928 while (annex[0] != '\0')
6929 {
6930 const char *sep;
6931 CORE_ADDR *addrp;
6932 int name_len;
6933
6934 sep = strchr (annex, '=');
6935 if (sep == NULL)
6936 break;
6937
6938 name_len = sep - annex;
6939 if (name_len == 5 && startswith (annex, "start"))
6940 addrp = &lm_addr;
6941 else if (name_len == 4 && startswith (annex, "prev"))
6942 addrp = &lm_prev;
6943 else
6944 {
6945 annex = strchr (sep, ';');
6946 if (annex == NULL)
6947 break;
6948 annex++;
6949 continue;
6950 }
6951
6952 annex = decode_address_to_semicolon (addrp, sep + 1);
6953 }
6954
6955 if (lm_addr == 0)
6956 {
6957 int r_version = 0;
6958
6959 if (priv->r_debug == 0)
6960 priv->r_debug = get_r_debug (pid, is_elf64);
6961
6962 /* We failed to find DT_DEBUG. Such situation will not change
6963 for this inferior - do not retry it. Report it to GDB as
6964 E01, see for the reasons at the GDB solib-svr4.c side. */
6965 if (priv->r_debug == (CORE_ADDR) -1)
6966 return -1;
6967
6968 if (priv->r_debug != 0)
6969 {
6970 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6971 (unsigned char *) &r_version,
6972 sizeof (r_version)) != 0
6973 || r_version != 1)
6974 {
6975 warning ("unexpected r_debug version %d", r_version);
6976 }
6977 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6978 &lm_addr, ptr_size) != 0)
6979 {
6980 warning ("unable to read r_map from 0x%lx",
6981 (long) priv->r_debug + lmo->r_map_offset);
6982 }
6983 }
6984 }
6985
6986 std::string document = "<library-list-svr4 version=\"1.0\"";
6987
6988 while (lm_addr
6989 && read_one_ptr (lm_addr + lmo->l_name_offset,
6990 &l_name, ptr_size) == 0
6991 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6992 &l_addr, ptr_size) == 0
6993 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6994 &l_ld, ptr_size) == 0
6995 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6996 &l_prev, ptr_size) == 0
6997 && read_one_ptr (lm_addr + lmo->l_next_offset,
6998 &l_next, ptr_size) == 0)
6999 {
7000 unsigned char libname[PATH_MAX];
7001
7002 if (lm_prev != l_prev)
7003 {
7004 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7005 (long) lm_prev, (long) l_prev);
7006 break;
7007 }
7008
7009 /* Ignore the first entry even if it has valid name as the first entry
7010 corresponds to the main executable. The first entry should not be
7011 skipped if the dynamic loader was loaded late by a static executable
7012 (see solib-svr4.c parameter ignore_first). But in such case the main
7013 executable does not have PT_DYNAMIC present and this function already
7014 exited above due to failed get_r_debug. */
7015 if (lm_prev == 0)
7016 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7017 else
7018 {
7019 /* Not checking for error because reading may stop before
7020 we've got PATH_MAX worth of characters. */
7021 libname[0] = '\0';
7022 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7023 libname[sizeof (libname) - 1] = '\0';
7024 if (libname[0] != '\0')
7025 {
7026 if (!header_done)
7027 {
7028 /* Terminate `<library-list-svr4'. */
7029 document += '>';
7030 header_done = 1;
7031 }
7032
7033 string_appendf (document, "<library name=\"");
7034 xml_escape_text_append (&document, (char *) libname);
7035 string_appendf (document, "\" lm=\"0x%lx\" "
7036 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7037 (unsigned long) lm_addr, (unsigned long) l_addr,
7038 (unsigned long) l_ld);
7039 }
7040 }
7041
7042 lm_prev = lm_addr;
7043 lm_addr = l_next;
7044 }
7045
7046 if (!header_done)
7047 {
7048 /* Empty list; terminate `<library-list-svr4'. */
7049 document += "/>";
7050 }
7051 else
7052 document += "</library-list-svr4>";
7053
7054 int document_len = document.length ();
7055 if (offset < document_len)
7056 document_len -= offset;
7057 else
7058 document_len = 0;
7059 if (len > document_len)
7060 len = document_len;
7061
7062 memcpy (readbuf, document.data () + offset, len);
7063
7064 return len;
7065 }
7066
7067 #ifdef HAVE_LINUX_BTRACE
7068
7069 /* See to_disable_btrace target method. */
7070
7071 static int
7072 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7073 {
7074 enum btrace_error err;
7075
7076 err = linux_disable_btrace (tinfo);
7077 return (err == BTRACE_ERR_NONE ? 0 : -1);
7078 }
7079
7080 /* Encode an Intel Processor Trace configuration. */
7081
7082 static void
7083 linux_low_encode_pt_config (struct buffer *buffer,
7084 const struct btrace_data_pt_config *config)
7085 {
7086 buffer_grow_str (buffer, "<pt-config>\n");
7087
7088 switch (config->cpu.vendor)
7089 {
7090 case CV_INTEL:
7091 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7092 "model=\"%u\" stepping=\"%u\"/>\n",
7093 config->cpu.family, config->cpu.model,
7094 config->cpu.stepping);
7095 break;
7096
7097 default:
7098 break;
7099 }
7100
7101 buffer_grow_str (buffer, "</pt-config>\n");
7102 }
7103
7104 /* Encode a raw buffer. */
7105
7106 static void
7107 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7108 unsigned int size)
7109 {
7110 if (size == 0)
7111 return;
7112
7113 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7114 buffer_grow_str (buffer, "<raw>\n");
7115
7116 while (size-- > 0)
7117 {
7118 char elem[2];
7119
7120 elem[0] = tohex ((*data >> 4) & 0xf);
7121 elem[1] = tohex (*data++ & 0xf);
7122
7123 buffer_grow (buffer, elem, 2);
7124 }
7125
7126 buffer_grow_str (buffer, "</raw>\n");
7127 }
7128
7129 /* See to_read_btrace target method. */
7130
7131 static int
7132 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7133 enum btrace_read_type type)
7134 {
7135 struct btrace_data btrace;
7136 enum btrace_error err;
7137
7138 err = linux_read_btrace (&btrace, tinfo, type);
7139 if (err != BTRACE_ERR_NONE)
7140 {
7141 if (err == BTRACE_ERR_OVERFLOW)
7142 buffer_grow_str0 (buffer, "E.Overflow.");
7143 else
7144 buffer_grow_str0 (buffer, "E.Generic Error.");
7145
7146 return -1;
7147 }
7148
7149 switch (btrace.format)
7150 {
7151 case BTRACE_FORMAT_NONE:
7152 buffer_grow_str0 (buffer, "E.No Trace.");
7153 return -1;
7154
7155 case BTRACE_FORMAT_BTS:
7156 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7157 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7158
7159 for (const btrace_block &block : *btrace.variant.bts.blocks)
7160 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7161 paddress (block.begin), paddress (block.end));
7162
7163 buffer_grow_str0 (buffer, "</btrace>\n");
7164 break;
7165
7166 case BTRACE_FORMAT_PT:
7167 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7168 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7169 buffer_grow_str (buffer, "<pt>\n");
7170
7171 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7172
7173 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7174 btrace.variant.pt.size);
7175
7176 buffer_grow_str (buffer, "</pt>\n");
7177 buffer_grow_str0 (buffer, "</btrace>\n");
7178 break;
7179
7180 default:
7181 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7182 return -1;
7183 }
7184
7185 return 0;
7186 }
7187
7188 /* See to_btrace_conf target method. */
7189
7190 static int
7191 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7192 struct buffer *buffer)
7193 {
7194 const struct btrace_config *conf;
7195
7196 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7197 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7198
7199 conf = linux_btrace_conf (tinfo);
7200 if (conf != NULL)
7201 {
7202 switch (conf->format)
7203 {
7204 case BTRACE_FORMAT_NONE:
7205 break;
7206
7207 case BTRACE_FORMAT_BTS:
7208 buffer_xml_printf (buffer, "<bts");
7209 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7210 buffer_xml_printf (buffer, " />\n");
7211 break;
7212
7213 case BTRACE_FORMAT_PT:
7214 buffer_xml_printf (buffer, "<pt");
7215 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7216 buffer_xml_printf (buffer, "/>\n");
7217 break;
7218 }
7219 }
7220
7221 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7222 return 0;
7223 }
7224 #endif /* HAVE_LINUX_BTRACE */
7225
7226 /* See nat/linux-nat.h. */
7227
7228 ptid_t
7229 current_lwp_ptid (void)
7230 {
7231 return ptid_of (current_thread);
7232 }
7233
7234 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7235
7236 static int
7237 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7238 {
7239 if (the_low_target.breakpoint_kind_from_pc != NULL)
7240 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7241 else
7242 return default_breakpoint_kind_from_pc (pcptr);
7243 }
7244
7245 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7246
7247 static const gdb_byte *
7248 linux_sw_breakpoint_from_kind (int kind, int *size)
7249 {
7250 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7251
7252 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7253 }
7254
7255 /* Implementation of the target_ops method
7256 "breakpoint_kind_from_current_state". */
7257
7258 static int
7259 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7260 {
7261 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7262 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7263 else
7264 return linux_breakpoint_kind_from_pc (pcptr);
7265 }
7266
7267 /* Default implementation of linux_target_ops method "set_pc" for
7268 32-bit pc register which is literally named "pc". */
7269
7270 void
7271 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7272 {
7273 uint32_t newpc = pc;
7274
7275 supply_register_by_name (regcache, "pc", &newpc);
7276 }
7277
7278 /* Default implementation of linux_target_ops method "get_pc" for
7279 32-bit pc register which is literally named "pc". */
7280
7281 CORE_ADDR
7282 linux_get_pc_32bit (struct regcache *regcache)
7283 {
7284 uint32_t pc;
7285
7286 collect_register_by_name (regcache, "pc", &pc);
7287 if (debug_threads)
7288 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7289 return pc;
7290 }
7291
7292 /* Default implementation of linux_target_ops method "set_pc" for
7293 64-bit pc register which is literally named "pc". */
7294
7295 void
7296 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7297 {
7298 uint64_t newpc = pc;
7299
7300 supply_register_by_name (regcache, "pc", &newpc);
7301 }
7302
7303 /* Default implementation of linux_target_ops method "get_pc" for
7304 64-bit pc register which is literally named "pc". */
7305
7306 CORE_ADDR
7307 linux_get_pc_64bit (struct regcache *regcache)
7308 {
7309 uint64_t pc;
7310
7311 collect_register_by_name (regcache, "pc", &pc);
7312 if (debug_threads)
7313 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7314 return pc;
7315 }
7316
7317 /* See linux-low.h. */
7318
7319 int
7320 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7321 {
7322 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7323 int offset = 0;
7324
7325 gdb_assert (wordsize == 4 || wordsize == 8);
7326
7327 while (the_target->pt->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7328 {
7329 if (wordsize == 4)
7330 {
7331 uint32_t *data_p = (uint32_t *) data;
7332 if (data_p[0] == match)
7333 {
7334 *valp = data_p[1];
7335 return 1;
7336 }
7337 }
7338 else
7339 {
7340 uint64_t *data_p = (uint64_t *) data;
7341 if (data_p[0] == match)
7342 {
7343 *valp = data_p[1];
7344 return 1;
7345 }
7346 }
7347
7348 offset += 2 * wordsize;
7349 }
7350
7351 return 0;
7352 }
7353
7354 /* See linux-low.h. */
7355
7356 CORE_ADDR
7357 linux_get_hwcap (int wordsize)
7358 {
7359 CORE_ADDR hwcap = 0;
7360 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7361 return hwcap;
7362 }
7363
7364 /* See linux-low.h. */
7365
7366 CORE_ADDR
7367 linux_get_hwcap2 (int wordsize)
7368 {
7369 CORE_ADDR hwcap2 = 0;
7370 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7371 return hwcap2;
7372 }
7373
7374 /* The linux target ops object. */
7375
7376 static linux_process_target the_linux_target;
7377
7378 static process_stratum_target linux_target_ops = {
7379 linux_supports_hardware_single_step,
7380 linux_stopped_by_watchpoint,
7381 linux_stopped_data_address,
7382 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7383 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7384 && defined(PT_TEXT_END_ADDR)
7385 linux_read_offsets,
7386 #else
7387 NULL,
7388 #endif
7389 #ifdef USE_THREAD_DB
7390 thread_db_get_tls_address,
7391 #else
7392 NULL,
7393 #endif
7394 hostio_last_error_from_errno,
7395 linux_qxfer_osdata,
7396 linux_xfer_siginfo,
7397 linux_supports_non_stop,
7398 linux_async,
7399 linux_start_non_stop,
7400 linux_supports_multi_process,
7401 linux_supports_fork_events,
7402 linux_supports_vfork_events,
7403 linux_supports_exec_events,
7404 linux_handle_new_gdb_connection,
7405 #ifdef USE_THREAD_DB
7406 thread_db_handle_monitor_command,
7407 #else
7408 NULL,
7409 #endif
7410 linux_common_core_of_thread,
7411 linux_read_loadmap,
7412 linux_process_qsupported,
7413 linux_supports_tracepoints,
7414 linux_read_pc,
7415 linux_write_pc,
7416 linux_thread_stopped,
7417 NULL,
7418 linux_pause_all,
7419 linux_unpause_all,
7420 linux_stabilize_threads,
7421 linux_install_fast_tracepoint_jump_pad,
7422 linux_emit_ops,
7423 linux_supports_disable_randomization,
7424 linux_get_min_fast_tracepoint_insn_len,
7425 linux_qxfer_libraries_svr4,
7426 linux_supports_agent,
7427 #ifdef HAVE_LINUX_BTRACE
7428 linux_enable_btrace,
7429 linux_low_disable_btrace,
7430 linux_low_read_btrace,
7431 linux_low_btrace_conf,
7432 #else
7433 NULL,
7434 NULL,
7435 NULL,
7436 NULL,
7437 #endif
7438 linux_supports_range_stepping,
7439 linux_proc_pid_to_exec_file,
7440 linux_mntns_open_cloexec,
7441 linux_mntns_unlink,
7442 linux_mntns_readlink,
7443 linux_breakpoint_kind_from_pc,
7444 linux_sw_breakpoint_from_kind,
7445 linux_proc_tid_get_name,
7446 linux_breakpoint_kind_from_current_state,
7447 linux_supports_software_single_step,
7448 linux_supports_catch_syscall,
7449 linux_get_ipa_tdesc_idx,
7450 #if USE_THREAD_DB
7451 thread_db_thread_handle,
7452 #else
7453 NULL,
7454 #endif
7455 &the_linux_target,
7456 };
7457
7458 #ifdef HAVE_LINUX_REGSETS
7459 void
7460 initialize_regsets_info (struct regsets_info *info)
7461 {
7462 for (info->num_regsets = 0;
7463 info->regsets[info->num_regsets].size >= 0;
7464 info->num_regsets++)
7465 ;
7466 }
7467 #endif
7468
7469 void
7470 initialize_low (void)
7471 {
7472 struct sigaction sigchld_action;
7473
7474 memset (&sigchld_action, 0, sizeof (sigchld_action));
7475 set_target_ops (&linux_target_ops);
7476
7477 linux_ptrace_init_warnings ();
7478 linux_proc_init_warnings ();
7479
7480 sigchld_action.sa_handler = sigchld_handler;
7481 sigemptyset (&sigchld_action.sa_mask);
7482 sigchld_action.sa_flags = SA_RESTART;
7483 sigaction (SIGCHLD, &sigchld_action, NULL);
7484
7485 initialize_low_arch ();
7486
7487 linux_check_ptrace_features ();
7488 }
This page took 0.227973 seconds and 5 git commands to generate.