Use linux_get_auxv to get AT_PHDR in the PPC stub
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2019 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "common/agent.h"
23 #include "tdesc.h"
24 #include "common/rsp-low.h"
25 #include "common/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "common/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "common/filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common/common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "common/environ.h"
53 #include "common/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef SPUFS_MAGIC
64 #define SPUFS_MAGIC 0x23c9b64e
65 #endif
66
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
71 # endif
72 #endif
73
74 #ifndef O_LARGEFILE
75 #define O_LARGEFILE 0
76 #endif
77
78 #ifndef AT_HWCAP2
79 #define AT_HWCAP2 26
80 #endif
81
82 /* Some targets did not define these ptrace constants from the start,
83 so gdbserver defines them locally here. In the future, these may
84 be removed after they are added to asm/ptrace.h. */
85 #if !(defined(PT_TEXT_ADDR) \
86 || defined(PT_DATA_ADDR) \
87 || defined(PT_TEXT_END_ADDR))
88 #if defined(__mcoldfire__)
89 /* These are still undefined in 3.10 kernels. */
90 #define PT_TEXT_ADDR 49*4
91 #define PT_DATA_ADDR 50*4
92 #define PT_TEXT_END_ADDR 51*4
93 /* BFIN already defines these since at least 2.6.32 kernels. */
94 #elif defined(BFIN)
95 #define PT_TEXT_ADDR 220
96 #define PT_TEXT_END_ADDR 224
97 #define PT_DATA_ADDR 228
98 /* These are still undefined in 3.10 kernels. */
99 #elif defined(__TMS320C6X__)
100 #define PT_TEXT_ADDR (0x10000*4)
101 #define PT_DATA_ADDR (0x10004*4)
102 #define PT_TEXT_END_ADDR (0x10008*4)
103 #endif
104 #endif
105
106 #ifdef HAVE_LINUX_BTRACE
107 # include "nat/linux-btrace.h"
108 # include "common/btrace-common.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* Does the current host support PTRACE_GETREGSET? */
142 int have_ptrace_getregset = -1;
143
144 /* LWP accessors. */
145
146 /* See nat/linux-nat.h. */
147
148 ptid_t
149 ptid_of_lwp (struct lwp_info *lwp)
150 {
151 return ptid_of (get_lwp_thread (lwp));
152 }
153
154 /* See nat/linux-nat.h. */
155
156 void
157 lwp_set_arch_private_info (struct lwp_info *lwp,
158 struct arch_lwp_info *info)
159 {
160 lwp->arch_private = info;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 struct arch_lwp_info *
166 lwp_arch_private_info (struct lwp_info *lwp)
167 {
168 return lwp->arch_private;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 int
174 lwp_is_stopped (struct lwp_info *lwp)
175 {
176 return lwp->stopped;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 enum target_stop_reason
182 lwp_stop_reason (struct lwp_info *lwp)
183 {
184 return lwp->stop_reason;
185 }
186
187 /* See nat/linux-nat.h. */
188
189 int
190 lwp_is_stepping (struct lwp_info *lwp)
191 {
192 return lwp->stepping;
193 }
194
195 /* A list of all unknown processes which receive stop signals. Some
196 other process will presumably claim each of these as forked
197 children momentarily. */
198
199 struct simple_pid_list
200 {
201 /* The process ID. */
202 int pid;
203
204 /* The status as reported by waitpid. */
205 int status;
206
207 /* Next in chain. */
208 struct simple_pid_list *next;
209 };
210 struct simple_pid_list *stopped_pids;
211
212 /* Trivial list manipulation functions to keep track of a list of new
213 stopped processes. */
214
215 static void
216 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
217 {
218 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
219
220 new_pid->pid = pid;
221 new_pid->status = status;
222 new_pid->next = *listp;
223 *listp = new_pid;
224 }
225
226 static int
227 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
228 {
229 struct simple_pid_list **p;
230
231 for (p = listp; *p != NULL; p = &(*p)->next)
232 if ((*p)->pid == pid)
233 {
234 struct simple_pid_list *next = (*p)->next;
235
236 *statusp = (*p)->status;
237 xfree (*p);
238 *p = next;
239 return 1;
240 }
241 return 0;
242 }
243
244 enum stopping_threads_kind
245 {
246 /* Not stopping threads presently. */
247 NOT_STOPPING_THREADS,
248
249 /* Stopping threads. */
250 STOPPING_THREADS,
251
252 /* Stopping and suspending threads. */
253 STOPPING_AND_SUSPENDING_THREADS
254 };
255
256 /* This is set while stop_all_lwps is in effect. */
257 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
258
259 /* FIXME make into a target method? */
260 int using_threads = 1;
261
262 /* True if we're presently stabilizing threads (moving them out of
263 jump pads). */
264 static int stabilizing_threads;
265
266 static void linux_resume_one_lwp (struct lwp_info *lwp,
267 int step, int signal, siginfo_t *info);
268 static void linux_resume (struct thread_resume *resume_info, size_t n);
269 static void stop_all_lwps (int suspend, struct lwp_info *except);
270 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
271 static void unsuspend_all_lwps (struct lwp_info *except);
272 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
273 int *wstat, int options);
274 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
275 static struct lwp_info *add_lwp (ptid_t ptid);
276 static void linux_mourn (struct process_info *process);
277 static int linux_stopped_by_watchpoint (void);
278 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
279 static int lwp_is_marked_dead (struct lwp_info *lwp);
280 static void proceed_all_lwps (void);
281 static int finish_step_over (struct lwp_info *lwp);
282 static int kill_lwp (unsigned long lwpid, int signo);
283 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
284 static void complete_ongoing_step_over (void);
285 static int linux_low_ptrace_options (int attached);
286 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
287 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
288
289 /* When the event-loop is doing a step-over, this points at the thread
290 being stepped. */
291 ptid_t step_over_bkpt;
292
293 /* True if the low target can hardware single-step. */
294
295 static int
296 can_hardware_single_step (void)
297 {
298 if (the_low_target.supports_hardware_single_step != NULL)
299 return the_low_target.supports_hardware_single_step ();
300 else
301 return 0;
302 }
303
304 /* True if the low target can software single-step. Such targets
305 implement the GET_NEXT_PCS callback. */
306
307 static int
308 can_software_single_step (void)
309 {
310 return (the_low_target.get_next_pcs != NULL);
311 }
312
313 /* True if the low target supports memory breakpoints. If so, we'll
314 have a GET_PC implementation. */
315
316 static int
317 supports_breakpoints (void)
318 {
319 return (the_low_target.get_pc != NULL);
320 }
321
322 /* Returns true if this target can support fast tracepoints. This
323 does not mean that the in-process agent has been loaded in the
324 inferior. */
325
326 static int
327 supports_fast_tracepoints (void)
328 {
329 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
330 }
331
332 /* True if LWP is stopped in its stepping range. */
333
334 static int
335 lwp_in_step_range (struct lwp_info *lwp)
336 {
337 CORE_ADDR pc = lwp->stop_pc;
338
339 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
340 }
341
342 struct pending_signals
343 {
344 int signal;
345 siginfo_t info;
346 struct pending_signals *prev;
347 };
348
349 /* The read/write ends of the pipe registered as waitable file in the
350 event loop. */
351 static int linux_event_pipe[2] = { -1, -1 };
352
353 /* True if we're currently in async mode. */
354 #define target_is_async_p() (linux_event_pipe[0] != -1)
355
356 static void send_sigstop (struct lwp_info *lwp);
357 static void wait_for_sigstop (void);
358
359 /* Return non-zero if HEADER is a 64-bit ELF file. */
360
361 static int
362 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
363 {
364 if (header->e_ident[EI_MAG0] == ELFMAG0
365 && header->e_ident[EI_MAG1] == ELFMAG1
366 && header->e_ident[EI_MAG2] == ELFMAG2
367 && header->e_ident[EI_MAG3] == ELFMAG3)
368 {
369 *machine = header->e_machine;
370 return header->e_ident[EI_CLASS] == ELFCLASS64;
371
372 }
373 *machine = EM_NONE;
374 return -1;
375 }
376
377 /* Return non-zero if FILE is a 64-bit ELF file,
378 zero if the file is not a 64-bit ELF file,
379 and -1 if the file is not accessible or doesn't exist. */
380
381 static int
382 elf_64_file_p (const char *file, unsigned int *machine)
383 {
384 Elf64_Ehdr header;
385 int fd;
386
387 fd = open (file, O_RDONLY);
388 if (fd < 0)
389 return -1;
390
391 if (read (fd, &header, sizeof (header)) != sizeof (header))
392 {
393 close (fd);
394 return 0;
395 }
396 close (fd);
397
398 return elf_64_header_p (&header, machine);
399 }
400
401 /* Accepts an integer PID; Returns true if the executable PID is
402 running is a 64-bit ELF file.. */
403
404 int
405 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
406 {
407 char file[PATH_MAX];
408
409 sprintf (file, "/proc/%d/exe", pid);
410 return elf_64_file_p (file, machine);
411 }
412
413 static void
414 delete_lwp (struct lwp_info *lwp)
415 {
416 struct thread_info *thr = get_lwp_thread (lwp);
417
418 if (debug_threads)
419 debug_printf ("deleting %ld\n", lwpid_of (thr));
420
421 remove_thread (thr);
422
423 if (the_low_target.delete_thread != NULL)
424 the_low_target.delete_thread (lwp->arch_private);
425 else
426 gdb_assert (lwp->arch_private == NULL);
427
428 free (lwp);
429 }
430
431 /* Add a process to the common process list, and set its private
432 data. */
433
434 static struct process_info *
435 linux_add_process (int pid, int attached)
436 {
437 struct process_info *proc;
438
439 proc = add_process (pid, attached);
440 proc->priv = XCNEW (struct process_info_private);
441
442 if (the_low_target.new_process != NULL)
443 proc->priv->arch_private = the_low_target.new_process ();
444
445 return proc;
446 }
447
448 static CORE_ADDR get_pc (struct lwp_info *lwp);
449
450 /* Call the target arch_setup function on the current thread. */
451
452 static void
453 linux_arch_setup (void)
454 {
455 the_low_target.arch_setup ();
456 }
457
458 /* Call the target arch_setup function on THREAD. */
459
460 static void
461 linux_arch_setup_thread (struct thread_info *thread)
462 {
463 struct thread_info *saved_thread;
464
465 saved_thread = current_thread;
466 current_thread = thread;
467
468 linux_arch_setup ();
469
470 current_thread = saved_thread;
471 }
472
473 /* Handle a GNU/Linux extended wait response. If we see a clone,
474 fork, or vfork event, we need to add the new LWP to our list
475 (and return 0 so as not to report the trap to higher layers).
476 If we see an exec event, we will modify ORIG_EVENT_LWP to point
477 to a new LWP representing the new program. */
478
479 static int
480 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
481 {
482 client_state &cs = get_client_state ();
483 struct lwp_info *event_lwp = *orig_event_lwp;
484 int event = linux_ptrace_get_extended_event (wstat);
485 struct thread_info *event_thr = get_lwp_thread (event_lwp);
486 struct lwp_info *new_lwp;
487
488 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
489
490 /* All extended events we currently use are mid-syscall. Only
491 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
492 you have to be using PTRACE_SEIZE to get that. */
493 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
494
495 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
496 || (event == PTRACE_EVENT_CLONE))
497 {
498 ptid_t ptid;
499 unsigned long new_pid;
500 int ret, status;
501
502 /* Get the pid of the new lwp. */
503 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
504 &new_pid);
505
506 /* If we haven't already seen the new PID stop, wait for it now. */
507 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
508 {
509 /* The new child has a pending SIGSTOP. We can't affect it until it
510 hits the SIGSTOP, but we're already attached. */
511
512 ret = my_waitpid (new_pid, &status, __WALL);
513
514 if (ret == -1)
515 perror_with_name ("waiting for new child");
516 else if (ret != new_pid)
517 warning ("wait returned unexpected PID %d", ret);
518 else if (!WIFSTOPPED (status))
519 warning ("wait returned unexpected status 0x%x", status);
520 }
521
522 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
523 {
524 struct process_info *parent_proc;
525 struct process_info *child_proc;
526 struct lwp_info *child_lwp;
527 struct thread_info *child_thr;
528 struct target_desc *tdesc;
529
530 ptid = ptid_t (new_pid, new_pid, 0);
531
532 if (debug_threads)
533 {
534 debug_printf ("HEW: Got fork event from LWP %ld, "
535 "new child is %d\n",
536 ptid_of (event_thr).lwp (),
537 ptid.pid ());
538 }
539
540 /* Add the new process to the tables and clone the breakpoint
541 lists of the parent. We need to do this even if the new process
542 will be detached, since we will need the process object and the
543 breakpoints to remove any breakpoints from memory when we
544 detach, and the client side will access registers. */
545 child_proc = linux_add_process (new_pid, 0);
546 gdb_assert (child_proc != NULL);
547 child_lwp = add_lwp (ptid);
548 gdb_assert (child_lwp != NULL);
549 child_lwp->stopped = 1;
550 child_lwp->must_set_ptrace_flags = 1;
551 child_lwp->status_pending_p = 0;
552 child_thr = get_lwp_thread (child_lwp);
553 child_thr->last_resume_kind = resume_stop;
554 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
555
556 /* If we're suspending all threads, leave this one suspended
557 too. If the fork/clone parent is stepping over a breakpoint,
558 all other threads have been suspended already. Leave the
559 child suspended too. */
560 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
561 || event_lwp->bp_reinsert != 0)
562 {
563 if (debug_threads)
564 debug_printf ("HEW: leaving child suspended\n");
565 child_lwp->suspended = 1;
566 }
567
568 parent_proc = get_thread_process (event_thr);
569 child_proc->attached = parent_proc->attached;
570
571 if (event_lwp->bp_reinsert != 0
572 && can_software_single_step ()
573 && event == PTRACE_EVENT_VFORK)
574 {
575 /* If we leave single-step breakpoints there, child will
576 hit it, so uninsert single-step breakpoints from parent
577 (and child). Once vfork child is done, reinsert
578 them back to parent. */
579 uninsert_single_step_breakpoints (event_thr);
580 }
581
582 clone_all_breakpoints (child_thr, event_thr);
583
584 tdesc = allocate_target_description ();
585 copy_target_description (tdesc, parent_proc->tdesc);
586 child_proc->tdesc = tdesc;
587
588 /* Clone arch-specific process data. */
589 if (the_low_target.new_fork != NULL)
590 the_low_target.new_fork (parent_proc, child_proc);
591
592 /* Save fork info in the parent thread. */
593 if (event == PTRACE_EVENT_FORK)
594 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
595 else if (event == PTRACE_EVENT_VFORK)
596 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
597
598 event_lwp->waitstatus.value.related_pid = ptid;
599
600 /* The status_pending field contains bits denoting the
601 extended event, so when the pending event is handled,
602 the handler will look at lwp->waitstatus. */
603 event_lwp->status_pending_p = 1;
604 event_lwp->status_pending = wstat;
605
606 /* Link the threads until the parent event is passed on to
607 higher layers. */
608 event_lwp->fork_relative = child_lwp;
609 child_lwp->fork_relative = event_lwp;
610
611 /* If the parent thread is doing step-over with single-step
612 breakpoints, the list of single-step breakpoints are cloned
613 from the parent's. Remove them from the child process.
614 In case of vfork, we'll reinsert them back once vforked
615 child is done. */
616 if (event_lwp->bp_reinsert != 0
617 && can_software_single_step ())
618 {
619 /* The child process is forked and stopped, so it is safe
620 to access its memory without stopping all other threads
621 from other processes. */
622 delete_single_step_breakpoints (child_thr);
623
624 gdb_assert (has_single_step_breakpoints (event_thr));
625 gdb_assert (!has_single_step_breakpoints (child_thr));
626 }
627
628 /* Report the event. */
629 return 0;
630 }
631
632 if (debug_threads)
633 debug_printf ("HEW: Got clone event "
634 "from LWP %ld, new child is LWP %ld\n",
635 lwpid_of (event_thr), new_pid);
636
637 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
638 new_lwp = add_lwp (ptid);
639
640 /* Either we're going to immediately resume the new thread
641 or leave it stopped. linux_resume_one_lwp is a nop if it
642 thinks the thread is currently running, so set this first
643 before calling linux_resume_one_lwp. */
644 new_lwp->stopped = 1;
645
646 /* If we're suspending all threads, leave this one suspended
647 too. If the fork/clone parent is stepping over a breakpoint,
648 all other threads have been suspended already. Leave the
649 child suspended too. */
650 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
651 || event_lwp->bp_reinsert != 0)
652 new_lwp->suspended = 1;
653
654 /* Normally we will get the pending SIGSTOP. But in some cases
655 we might get another signal delivered to the group first.
656 If we do get another signal, be sure not to lose it. */
657 if (WSTOPSIG (status) != SIGSTOP)
658 {
659 new_lwp->stop_expected = 1;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663 else if (cs.report_thread_events)
664 {
665 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
666 new_lwp->status_pending_p = 1;
667 new_lwp->status_pending = status;
668 }
669
670 #ifdef USE_THREAD_DB
671 thread_db_notice_clone (event_thr, ptid);
672 #endif
673
674 /* Don't report the event. */
675 return 1;
676 }
677 else if (event == PTRACE_EVENT_VFORK_DONE)
678 {
679 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
680
681 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
682 {
683 reinsert_single_step_breakpoints (event_thr);
684
685 gdb_assert (has_single_step_breakpoints (event_thr));
686 }
687
688 /* Report the event. */
689 return 0;
690 }
691 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
692 {
693 struct process_info *proc;
694 std::vector<int> syscalls_to_catch;
695 ptid_t event_ptid;
696 pid_t event_pid;
697
698 if (debug_threads)
699 {
700 debug_printf ("HEW: Got exec event from LWP %ld\n",
701 lwpid_of (event_thr));
702 }
703
704 /* Get the event ptid. */
705 event_ptid = ptid_of (event_thr);
706 event_pid = event_ptid.pid ();
707
708 /* Save the syscall list from the execing process. */
709 proc = get_thread_process (event_thr);
710 syscalls_to_catch = std::move (proc->syscalls_to_catch);
711
712 /* Delete the execing process and all its threads. */
713 linux_mourn (proc);
714 current_thread = NULL;
715
716 /* Create a new process/lwp/thread. */
717 proc = linux_add_process (event_pid, 0);
718 event_lwp = add_lwp (event_ptid);
719 event_thr = get_lwp_thread (event_lwp);
720 gdb_assert (current_thread == event_thr);
721 linux_arch_setup_thread (event_thr);
722
723 /* Set the event status. */
724 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
725 event_lwp->waitstatus.value.execd_pathname
726 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
727
728 /* Mark the exec status as pending. */
729 event_lwp->stopped = 1;
730 event_lwp->status_pending_p = 1;
731 event_lwp->status_pending = wstat;
732 event_thr->last_resume_kind = resume_continue;
733 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
734
735 /* Update syscall state in the new lwp, effectively mid-syscall too. */
736 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
737
738 /* Restore the list to catch. Don't rely on the client, which is free
739 to avoid sending a new list when the architecture doesn't change.
740 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
741 proc->syscalls_to_catch = std::move (syscalls_to_catch);
742
743 /* Report the event. */
744 *orig_event_lwp = event_lwp;
745 return 0;
746 }
747
748 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
749 }
750
751 /* Return the PC as read from the regcache of LWP, without any
752 adjustment. */
753
754 static CORE_ADDR
755 get_pc (struct lwp_info *lwp)
756 {
757 struct thread_info *saved_thread;
758 struct regcache *regcache;
759 CORE_ADDR pc;
760
761 if (the_low_target.get_pc == NULL)
762 return 0;
763
764 saved_thread = current_thread;
765 current_thread = get_lwp_thread (lwp);
766
767 regcache = get_thread_regcache (current_thread, 1);
768 pc = (*the_low_target.get_pc) (regcache);
769
770 if (debug_threads)
771 debug_printf ("pc is 0x%lx\n", (long) pc);
772
773 current_thread = saved_thread;
774 return pc;
775 }
776
777 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
778 Fill *SYSNO with the syscall nr trapped. */
779
780 static void
781 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
782 {
783 struct thread_info *saved_thread;
784 struct regcache *regcache;
785
786 if (the_low_target.get_syscall_trapinfo == NULL)
787 {
788 /* If we cannot get the syscall trapinfo, report an unknown
789 system call number. */
790 *sysno = UNKNOWN_SYSCALL;
791 return;
792 }
793
794 saved_thread = current_thread;
795 current_thread = get_lwp_thread (lwp);
796
797 regcache = get_thread_regcache (current_thread, 1);
798 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
799
800 if (debug_threads)
801 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
802
803 current_thread = saved_thread;
804 }
805
806 static int check_stopped_by_watchpoint (struct lwp_info *child);
807
808 /* Called when the LWP stopped for a signal/trap. If it stopped for a
809 trap check what caused it (breakpoint, watchpoint, trace, etc.),
810 and save the result in the LWP's stop_reason field. If it stopped
811 for a breakpoint, decrement the PC if necessary on the lwp's
812 architecture. Returns true if we now have the LWP's stop PC. */
813
814 static int
815 save_stop_reason (struct lwp_info *lwp)
816 {
817 CORE_ADDR pc;
818 CORE_ADDR sw_breakpoint_pc;
819 struct thread_info *saved_thread;
820 #if USE_SIGTRAP_SIGINFO
821 siginfo_t siginfo;
822 #endif
823
824 if (the_low_target.get_pc == NULL)
825 return 0;
826
827 pc = get_pc (lwp);
828 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
829
830 /* breakpoint_at reads from the current thread. */
831 saved_thread = current_thread;
832 current_thread = get_lwp_thread (lwp);
833
834 #if USE_SIGTRAP_SIGINFO
835 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
836 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
837 {
838 if (siginfo.si_signo == SIGTRAP)
839 {
840 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
841 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
842 {
843 /* The si_code is ambiguous on this arch -- check debug
844 registers. */
845 if (!check_stopped_by_watchpoint (lwp))
846 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
847 }
848 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
849 {
850 /* If we determine the LWP stopped for a SW breakpoint,
851 trust it. Particularly don't check watchpoint
852 registers, because at least on s390, we'd find
853 stopped-by-watchpoint as long as there's a watchpoint
854 set. */
855 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
856 }
857 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
858 {
859 /* This can indicate either a hardware breakpoint or
860 hardware watchpoint. Check debug registers. */
861 if (!check_stopped_by_watchpoint (lwp))
862 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
863 }
864 else if (siginfo.si_code == TRAP_TRACE)
865 {
866 /* We may have single stepped an instruction that
867 triggered a watchpoint. In that case, on some
868 architectures (such as x86), instead of TRAP_HWBKPT,
869 si_code indicates TRAP_TRACE, and we need to check
870 the debug registers separately. */
871 if (!check_stopped_by_watchpoint (lwp))
872 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
873 }
874 }
875 }
876 #else
877 /* We may have just stepped a breakpoint instruction. E.g., in
878 non-stop mode, GDB first tells the thread A to step a range, and
879 then the user inserts a breakpoint inside the range. In that
880 case we need to report the breakpoint PC. */
881 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
882 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
883 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
884
885 if (hardware_breakpoint_inserted_here (pc))
886 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
887
888 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
889 check_stopped_by_watchpoint (lwp);
890 #endif
891
892 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
893 {
894 if (debug_threads)
895 {
896 struct thread_info *thr = get_lwp_thread (lwp);
897
898 debug_printf ("CSBB: %s stopped by software breakpoint\n",
899 target_pid_to_str (ptid_of (thr)));
900 }
901
902 /* Back up the PC if necessary. */
903 if (pc != sw_breakpoint_pc)
904 {
905 struct regcache *regcache
906 = get_thread_regcache (current_thread, 1);
907 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
908 }
909
910 /* Update this so we record the correct stop PC below. */
911 pc = sw_breakpoint_pc;
912 }
913 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
914 {
915 if (debug_threads)
916 {
917 struct thread_info *thr = get_lwp_thread (lwp);
918
919 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
920 target_pid_to_str (ptid_of (thr)));
921 }
922 }
923 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
924 {
925 if (debug_threads)
926 {
927 struct thread_info *thr = get_lwp_thread (lwp);
928
929 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
930 target_pid_to_str (ptid_of (thr)));
931 }
932 }
933 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
934 {
935 if (debug_threads)
936 {
937 struct thread_info *thr = get_lwp_thread (lwp);
938
939 debug_printf ("CSBB: %s stopped by trace\n",
940 target_pid_to_str (ptid_of (thr)));
941 }
942 }
943
944 lwp->stop_pc = pc;
945 current_thread = saved_thread;
946 return 1;
947 }
948
949 static struct lwp_info *
950 add_lwp (ptid_t ptid)
951 {
952 struct lwp_info *lwp;
953
954 lwp = XCNEW (struct lwp_info);
955
956 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
957
958 lwp->thread = add_thread (ptid, lwp);
959
960 if (the_low_target.new_thread != NULL)
961 the_low_target.new_thread (lwp);
962
963 return lwp;
964 }
965
966 /* Callback to be used when calling fork_inferior, responsible for
967 actually initiating the tracing of the inferior. */
968
969 static void
970 linux_ptrace_fun ()
971 {
972 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
973 (PTRACE_TYPE_ARG4) 0) < 0)
974 trace_start_error_with_name ("ptrace");
975
976 if (setpgid (0, 0) < 0)
977 trace_start_error_with_name ("setpgid");
978
979 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
980 stdout to stderr so that inferior i/o doesn't corrupt the connection.
981 Also, redirect stdin to /dev/null. */
982 if (remote_connection_is_stdio ())
983 {
984 if (close (0) < 0)
985 trace_start_error_with_name ("close");
986 if (open ("/dev/null", O_RDONLY) < 0)
987 trace_start_error_with_name ("open");
988 if (dup2 (2, 1) < 0)
989 trace_start_error_with_name ("dup2");
990 if (write (2, "stdin/stdout redirected\n",
991 sizeof ("stdin/stdout redirected\n") - 1) < 0)
992 {
993 /* Errors ignored. */;
994 }
995 }
996 }
997
998 /* Start an inferior process and returns its pid.
999 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
1000 are its arguments. */
1001
1002 static int
1003 linux_create_inferior (const char *program,
1004 const std::vector<char *> &program_args)
1005 {
1006 client_state &cs = get_client_state ();
1007 struct lwp_info *new_lwp;
1008 int pid;
1009 ptid_t ptid;
1010
1011 {
1012 maybe_disable_address_space_randomization restore_personality
1013 (cs.disable_randomization);
1014 std::string str_program_args = stringify_argv (program_args);
1015
1016 pid = fork_inferior (program,
1017 str_program_args.c_str (),
1018 get_environ ()->envp (), linux_ptrace_fun,
1019 NULL, NULL, NULL, NULL);
1020 }
1021
1022 linux_add_process (pid, 0);
1023
1024 ptid = ptid_t (pid, pid, 0);
1025 new_lwp = add_lwp (ptid);
1026 new_lwp->must_set_ptrace_flags = 1;
1027
1028 post_fork_inferior (pid, program);
1029
1030 return pid;
1031 }
1032
1033 /* Implement the post_create_inferior target_ops method. */
1034
1035 static void
1036 linux_post_create_inferior (void)
1037 {
1038 struct lwp_info *lwp = get_thread_lwp (current_thread);
1039
1040 linux_arch_setup ();
1041
1042 if (lwp->must_set_ptrace_flags)
1043 {
1044 struct process_info *proc = current_process ();
1045 int options = linux_low_ptrace_options (proc->attached);
1046
1047 linux_enable_event_reporting (lwpid_of (current_thread), options);
1048 lwp->must_set_ptrace_flags = 0;
1049 }
1050 }
1051
1052 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1053 error. */
1054
1055 int
1056 linux_attach_lwp (ptid_t ptid)
1057 {
1058 struct lwp_info *new_lwp;
1059 int lwpid = ptid.lwp ();
1060
1061 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1062 != 0)
1063 return errno;
1064
1065 new_lwp = add_lwp (ptid);
1066
1067 /* We need to wait for SIGSTOP before being able to make the next
1068 ptrace call on this LWP. */
1069 new_lwp->must_set_ptrace_flags = 1;
1070
1071 if (linux_proc_pid_is_stopped (lwpid))
1072 {
1073 if (debug_threads)
1074 debug_printf ("Attached to a stopped process\n");
1075
1076 /* The process is definitely stopped. It is in a job control
1077 stop, unless the kernel predates the TASK_STOPPED /
1078 TASK_TRACED distinction, in which case it might be in a
1079 ptrace stop. Make sure it is in a ptrace stop; from there we
1080 can kill it, signal it, et cetera.
1081
1082 First make sure there is a pending SIGSTOP. Since we are
1083 already attached, the process can not transition from stopped
1084 to running without a PTRACE_CONT; so we know this signal will
1085 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1086 probably already in the queue (unless this kernel is old
1087 enough to use TASK_STOPPED for ptrace stops); but since
1088 SIGSTOP is not an RT signal, it can only be queued once. */
1089 kill_lwp (lwpid, SIGSTOP);
1090
1091 /* Finally, resume the stopped process. This will deliver the
1092 SIGSTOP (or a higher priority signal, just like normal
1093 PTRACE_ATTACH), which we'll catch later on. */
1094 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1095 }
1096
1097 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1098 brings it to a halt.
1099
1100 There are several cases to consider here:
1101
1102 1) gdbserver has already attached to the process and is being notified
1103 of a new thread that is being created.
1104 In this case we should ignore that SIGSTOP and resume the
1105 process. This is handled below by setting stop_expected = 1,
1106 and the fact that add_thread sets last_resume_kind ==
1107 resume_continue.
1108
1109 2) This is the first thread (the process thread), and we're attaching
1110 to it via attach_inferior.
1111 In this case we want the process thread to stop.
1112 This is handled by having linux_attach set last_resume_kind ==
1113 resume_stop after we return.
1114
1115 If the pid we are attaching to is also the tgid, we attach to and
1116 stop all the existing threads. Otherwise, we attach to pid and
1117 ignore any other threads in the same group as this pid.
1118
1119 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1120 existing threads.
1121 In this case we want the thread to stop.
1122 FIXME: This case is currently not properly handled.
1123 We should wait for the SIGSTOP but don't. Things work apparently
1124 because enough time passes between when we ptrace (ATTACH) and when
1125 gdb makes the next ptrace call on the thread.
1126
1127 On the other hand, if we are currently trying to stop all threads, we
1128 should treat the new thread as if we had sent it a SIGSTOP. This works
1129 because we are guaranteed that the add_lwp call above added us to the
1130 end of the list, and so the new thread has not yet reached
1131 wait_for_sigstop (but will). */
1132 new_lwp->stop_expected = 1;
1133
1134 return 0;
1135 }
1136
1137 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1138 already attached. Returns true if a new LWP is found, false
1139 otherwise. */
1140
1141 static int
1142 attach_proc_task_lwp_callback (ptid_t ptid)
1143 {
1144 /* Is this a new thread? */
1145 if (find_thread_ptid (ptid) == NULL)
1146 {
1147 int lwpid = ptid.lwp ();
1148 int err;
1149
1150 if (debug_threads)
1151 debug_printf ("Found new lwp %d\n", lwpid);
1152
1153 err = linux_attach_lwp (ptid);
1154
1155 /* Be quiet if we simply raced with the thread exiting. EPERM
1156 is returned if the thread's task still exists, and is marked
1157 as exited or zombie, as well as other conditions, so in that
1158 case, confirm the status in /proc/PID/status. */
1159 if (err == ESRCH
1160 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1161 {
1162 if (debug_threads)
1163 {
1164 debug_printf ("Cannot attach to lwp %d: "
1165 "thread is gone (%d: %s)\n",
1166 lwpid, err, strerror (err));
1167 }
1168 }
1169 else if (err != 0)
1170 {
1171 std::string reason
1172 = linux_ptrace_attach_fail_reason_string (ptid, err);
1173
1174 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1175 }
1176
1177 return 1;
1178 }
1179 return 0;
1180 }
1181
1182 static void async_file_mark (void);
1183
1184 /* Attach to PID. If PID is the tgid, attach to it and all
1185 of its threads. */
1186
1187 static int
1188 linux_attach (unsigned long pid)
1189 {
1190 struct process_info *proc;
1191 struct thread_info *initial_thread;
1192 ptid_t ptid = ptid_t (pid, pid, 0);
1193 int err;
1194
1195 proc = linux_add_process (pid, 1);
1196
1197 /* Attach to PID. We will check for other threads
1198 soon. */
1199 err = linux_attach_lwp (ptid);
1200 if (err != 0)
1201 {
1202 remove_process (proc);
1203
1204 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1205 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1206 }
1207
1208 /* Don't ignore the initial SIGSTOP if we just attached to this
1209 process. It will be collected by wait shortly. */
1210 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1211 initial_thread->last_resume_kind = resume_stop;
1212
1213 /* We must attach to every LWP. If /proc is mounted, use that to
1214 find them now. On the one hand, the inferior may be using raw
1215 clone instead of using pthreads. On the other hand, even if it
1216 is using pthreads, GDB may not be connected yet (thread_db needs
1217 to do symbol lookups, through qSymbol). Also, thread_db walks
1218 structures in the inferior's address space to find the list of
1219 threads/LWPs, and those structures may well be corrupted. Note
1220 that once thread_db is loaded, we'll still use it to list threads
1221 and associate pthread info with each LWP. */
1222 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1223
1224 /* GDB will shortly read the xml target description for this
1225 process, to figure out the process' architecture. But the target
1226 description is only filled in when the first process/thread in
1227 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1228 that now, otherwise, if GDB is fast enough, it could read the
1229 target description _before_ that initial stop. */
1230 if (non_stop)
1231 {
1232 struct lwp_info *lwp;
1233 int wstat, lwpid;
1234 ptid_t pid_ptid = ptid_t (pid);
1235
1236 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1237 &wstat, __WALL);
1238 gdb_assert (lwpid > 0);
1239
1240 lwp = find_lwp_pid (ptid_t (lwpid));
1241
1242 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1243 {
1244 lwp->status_pending_p = 1;
1245 lwp->status_pending = wstat;
1246 }
1247
1248 initial_thread->last_resume_kind = resume_continue;
1249
1250 async_file_mark ();
1251
1252 gdb_assert (proc->tdesc != NULL);
1253 }
1254
1255 return 0;
1256 }
1257
1258 static int
1259 last_thread_of_process_p (int pid)
1260 {
1261 bool seen_one = false;
1262
1263 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1264 {
1265 if (!seen_one)
1266 {
1267 /* This is the first thread of this process we see. */
1268 seen_one = true;
1269 return false;
1270 }
1271 else
1272 {
1273 /* This is the second thread of this process we see. */
1274 return true;
1275 }
1276 });
1277
1278 return thread == NULL;
1279 }
1280
1281 /* Kill LWP. */
1282
1283 static void
1284 linux_kill_one_lwp (struct lwp_info *lwp)
1285 {
1286 struct thread_info *thr = get_lwp_thread (lwp);
1287 int pid = lwpid_of (thr);
1288
1289 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1290 there is no signal context, and ptrace(PTRACE_KILL) (or
1291 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1292 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1293 alternative is to kill with SIGKILL. We only need one SIGKILL
1294 per process, not one for each thread. But since we still support
1295 support debugging programs using raw clone without CLONE_THREAD,
1296 we send one for each thread. For years, we used PTRACE_KILL
1297 only, so we're being a bit paranoid about some old kernels where
1298 PTRACE_KILL might work better (dubious if there are any such, but
1299 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1300 second, and so we're fine everywhere. */
1301
1302 errno = 0;
1303 kill_lwp (pid, SIGKILL);
1304 if (debug_threads)
1305 {
1306 int save_errno = errno;
1307
1308 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1309 target_pid_to_str (ptid_of (thr)),
1310 save_errno ? strerror (save_errno) : "OK");
1311 }
1312
1313 errno = 0;
1314 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1315 if (debug_threads)
1316 {
1317 int save_errno = errno;
1318
1319 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1320 target_pid_to_str (ptid_of (thr)),
1321 save_errno ? strerror (save_errno) : "OK");
1322 }
1323 }
1324
1325 /* Kill LWP and wait for it to die. */
1326
1327 static void
1328 kill_wait_lwp (struct lwp_info *lwp)
1329 {
1330 struct thread_info *thr = get_lwp_thread (lwp);
1331 int pid = ptid_of (thr).pid ();
1332 int lwpid = ptid_of (thr).lwp ();
1333 int wstat;
1334 int res;
1335
1336 if (debug_threads)
1337 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1338
1339 do
1340 {
1341 linux_kill_one_lwp (lwp);
1342
1343 /* Make sure it died. Notes:
1344
1345 - The loop is most likely unnecessary.
1346
1347 - We don't use linux_wait_for_event as that could delete lwps
1348 while we're iterating over them. We're not interested in
1349 any pending status at this point, only in making sure all
1350 wait status on the kernel side are collected until the
1351 process is reaped.
1352
1353 - We don't use __WALL here as the __WALL emulation relies on
1354 SIGCHLD, and killing a stopped process doesn't generate
1355 one, nor an exit status.
1356 */
1357 res = my_waitpid (lwpid, &wstat, 0);
1358 if (res == -1 && errno == ECHILD)
1359 res = my_waitpid (lwpid, &wstat, __WCLONE);
1360 } while (res > 0 && WIFSTOPPED (wstat));
1361
1362 /* Even if it was stopped, the child may have already disappeared.
1363 E.g., if it was killed by SIGKILL. */
1364 if (res < 0 && errno != ECHILD)
1365 perror_with_name ("kill_wait_lwp");
1366 }
1367
1368 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1369 except the leader. */
1370
1371 static void
1372 kill_one_lwp_callback (thread_info *thread, int pid)
1373 {
1374 struct lwp_info *lwp = get_thread_lwp (thread);
1375
1376 /* We avoid killing the first thread here, because of a Linux kernel (at
1377 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1378 the children get a chance to be reaped, it will remain a zombie
1379 forever. */
1380
1381 if (lwpid_of (thread) == pid)
1382 {
1383 if (debug_threads)
1384 debug_printf ("lkop: is last of process %s\n",
1385 target_pid_to_str (thread->id));
1386 return;
1387 }
1388
1389 kill_wait_lwp (lwp);
1390 }
1391
1392 static int
1393 linux_kill (process_info *process)
1394 {
1395 int pid = process->pid;
1396
1397 /* If we're killing a running inferior, make sure it is stopped
1398 first, as PTRACE_KILL will not work otherwise. */
1399 stop_all_lwps (0, NULL);
1400
1401 for_each_thread (pid, [&] (thread_info *thread)
1402 {
1403 kill_one_lwp_callback (thread, pid);
1404 });
1405
1406 /* See the comment in linux_kill_one_lwp. We did not kill the first
1407 thread in the list, so do so now. */
1408 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1409
1410 if (lwp == NULL)
1411 {
1412 if (debug_threads)
1413 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1414 pid);
1415 }
1416 else
1417 kill_wait_lwp (lwp);
1418
1419 the_target->mourn (process);
1420
1421 /* Since we presently can only stop all lwps of all processes, we
1422 need to unstop lwps of other processes. */
1423 unstop_all_lwps (0, NULL);
1424 return 0;
1425 }
1426
1427 /* Get pending signal of THREAD, for detaching purposes. This is the
1428 signal the thread last stopped for, which we need to deliver to the
1429 thread when detaching, otherwise, it'd be suppressed/lost. */
1430
1431 static int
1432 get_detach_signal (struct thread_info *thread)
1433 {
1434 client_state &cs = get_client_state ();
1435 enum gdb_signal signo = GDB_SIGNAL_0;
1436 int status;
1437 struct lwp_info *lp = get_thread_lwp (thread);
1438
1439 if (lp->status_pending_p)
1440 status = lp->status_pending;
1441 else
1442 {
1443 /* If the thread had been suspended by gdbserver, and it stopped
1444 cleanly, then it'll have stopped with SIGSTOP. But we don't
1445 want to deliver that SIGSTOP. */
1446 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1447 || thread->last_status.value.sig == GDB_SIGNAL_0)
1448 return 0;
1449
1450 /* Otherwise, we may need to deliver the signal we
1451 intercepted. */
1452 status = lp->last_status;
1453 }
1454
1455 if (!WIFSTOPPED (status))
1456 {
1457 if (debug_threads)
1458 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1459 target_pid_to_str (ptid_of (thread)));
1460 return 0;
1461 }
1462
1463 /* Extended wait statuses aren't real SIGTRAPs. */
1464 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1465 {
1466 if (debug_threads)
1467 debug_printf ("GPS: lwp %s had stopped with extended "
1468 "status: no pending signal\n",
1469 target_pid_to_str (ptid_of (thread)));
1470 return 0;
1471 }
1472
1473 signo = gdb_signal_from_host (WSTOPSIG (status));
1474
1475 if (cs.program_signals_p && !cs.program_signals[signo])
1476 {
1477 if (debug_threads)
1478 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1479 target_pid_to_str (ptid_of (thread)),
1480 gdb_signal_to_string (signo));
1481 return 0;
1482 }
1483 else if (!cs.program_signals_p
1484 /* If we have no way to know which signals GDB does not
1485 want to have passed to the program, assume
1486 SIGTRAP/SIGINT, which is GDB's default. */
1487 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1488 {
1489 if (debug_threads)
1490 debug_printf ("GPS: lwp %s had signal %s, "
1491 "but we don't know if we should pass it. "
1492 "Default to not.\n",
1493 target_pid_to_str (ptid_of (thread)),
1494 gdb_signal_to_string (signo));
1495 return 0;
1496 }
1497 else
1498 {
1499 if (debug_threads)
1500 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1501 target_pid_to_str (ptid_of (thread)),
1502 gdb_signal_to_string (signo));
1503
1504 return WSTOPSIG (status);
1505 }
1506 }
1507
1508 /* Detach from LWP. */
1509
1510 static void
1511 linux_detach_one_lwp (struct lwp_info *lwp)
1512 {
1513 struct thread_info *thread = get_lwp_thread (lwp);
1514 int sig;
1515 int lwpid;
1516
1517 /* If there is a pending SIGSTOP, get rid of it. */
1518 if (lwp->stop_expected)
1519 {
1520 if (debug_threads)
1521 debug_printf ("Sending SIGCONT to %s\n",
1522 target_pid_to_str (ptid_of (thread)));
1523
1524 kill_lwp (lwpid_of (thread), SIGCONT);
1525 lwp->stop_expected = 0;
1526 }
1527
1528 /* Pass on any pending signal for this thread. */
1529 sig = get_detach_signal (thread);
1530
1531 /* Preparing to resume may try to write registers, and fail if the
1532 lwp is zombie. If that happens, ignore the error. We'll handle
1533 it below, when detach fails with ESRCH. */
1534 TRY
1535 {
1536 /* Flush any pending changes to the process's registers. */
1537 regcache_invalidate_thread (thread);
1538
1539 /* Finally, let it resume. */
1540 if (the_low_target.prepare_to_resume != NULL)
1541 the_low_target.prepare_to_resume (lwp);
1542 }
1543 CATCH (ex, RETURN_MASK_ERROR)
1544 {
1545 if (!check_ptrace_stopped_lwp_gone (lwp))
1546 throw_exception (ex);
1547 }
1548 END_CATCH
1549
1550 lwpid = lwpid_of (thread);
1551 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1552 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1553 {
1554 int save_errno = errno;
1555
1556 /* We know the thread exists, so ESRCH must mean the lwp is
1557 zombie. This can happen if one of the already-detached
1558 threads exits the whole thread group. In that case we're
1559 still attached, and must reap the lwp. */
1560 if (save_errno == ESRCH)
1561 {
1562 int ret, status;
1563
1564 ret = my_waitpid (lwpid, &status, __WALL);
1565 if (ret == -1)
1566 {
1567 warning (_("Couldn't reap LWP %d while detaching: %s"),
1568 lwpid, strerror (errno));
1569 }
1570 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1571 {
1572 warning (_("Reaping LWP %d while detaching "
1573 "returned unexpected status 0x%x"),
1574 lwpid, status);
1575 }
1576 }
1577 else
1578 {
1579 error (_("Can't detach %s: %s"),
1580 target_pid_to_str (ptid_of (thread)),
1581 strerror (save_errno));
1582 }
1583 }
1584 else if (debug_threads)
1585 {
1586 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1587 target_pid_to_str (ptid_of (thread)),
1588 strsignal (sig));
1589 }
1590
1591 delete_lwp (lwp);
1592 }
1593
1594 /* Callback for for_each_thread. Detaches from non-leader threads of a
1595 given process. */
1596
1597 static void
1598 linux_detach_lwp_callback (thread_info *thread)
1599 {
1600 /* We don't actually detach from the thread group leader just yet.
1601 If the thread group exits, we must reap the zombie clone lwps
1602 before we're able to reap the leader. */
1603 if (thread->id.pid () == thread->id.lwp ())
1604 return;
1605
1606 lwp_info *lwp = get_thread_lwp (thread);
1607 linux_detach_one_lwp (lwp);
1608 }
1609
1610 static int
1611 linux_detach (process_info *process)
1612 {
1613 struct lwp_info *main_lwp;
1614
1615 /* As there's a step over already in progress, let it finish first,
1616 otherwise nesting a stabilize_threads operation on top gets real
1617 messy. */
1618 complete_ongoing_step_over ();
1619
1620 /* Stop all threads before detaching. First, ptrace requires that
1621 the thread is stopped to sucessfully detach. Second, thread_db
1622 may need to uninstall thread event breakpoints from memory, which
1623 only works with a stopped process anyway. */
1624 stop_all_lwps (0, NULL);
1625
1626 #ifdef USE_THREAD_DB
1627 thread_db_detach (process);
1628 #endif
1629
1630 /* Stabilize threads (move out of jump pads). */
1631 stabilize_threads ();
1632
1633 /* Detach from the clone lwps first. If the thread group exits just
1634 while we're detaching, we must reap the clone lwps before we're
1635 able to reap the leader. */
1636 for_each_thread (process->pid, linux_detach_lwp_callback);
1637
1638 main_lwp = find_lwp_pid (ptid_t (process->pid));
1639 linux_detach_one_lwp (main_lwp);
1640
1641 the_target->mourn (process);
1642
1643 /* Since we presently can only stop all lwps of all processes, we
1644 need to unstop lwps of other processes. */
1645 unstop_all_lwps (0, NULL);
1646 return 0;
1647 }
1648
1649 /* Remove all LWPs that belong to process PROC from the lwp list. */
1650
1651 static void
1652 linux_mourn (struct process_info *process)
1653 {
1654 struct process_info_private *priv;
1655
1656 #ifdef USE_THREAD_DB
1657 thread_db_mourn (process);
1658 #endif
1659
1660 for_each_thread (process->pid, [] (thread_info *thread)
1661 {
1662 delete_lwp (get_thread_lwp (thread));
1663 });
1664
1665 /* Freeing all private data. */
1666 priv = process->priv;
1667 if (the_low_target.delete_process != NULL)
1668 the_low_target.delete_process (priv->arch_private);
1669 else
1670 gdb_assert (priv->arch_private == NULL);
1671 free (priv);
1672 process->priv = NULL;
1673
1674 remove_process (process);
1675 }
1676
1677 static void
1678 linux_join (int pid)
1679 {
1680 int status, ret;
1681
1682 do {
1683 ret = my_waitpid (pid, &status, 0);
1684 if (WIFEXITED (status) || WIFSIGNALED (status))
1685 break;
1686 } while (ret != -1 || errno != ECHILD);
1687 }
1688
1689 /* Return nonzero if the given thread is still alive. */
1690 static int
1691 linux_thread_alive (ptid_t ptid)
1692 {
1693 struct lwp_info *lwp = find_lwp_pid (ptid);
1694
1695 /* We assume we always know if a thread exits. If a whole process
1696 exited but we still haven't been able to report it to GDB, we'll
1697 hold on to the last lwp of the dead process. */
1698 if (lwp != NULL)
1699 return !lwp_is_marked_dead (lwp);
1700 else
1701 return 0;
1702 }
1703
1704 /* Return 1 if this lwp still has an interesting status pending. If
1705 not (e.g., it had stopped for a breakpoint that is gone), return
1706 false. */
1707
1708 static int
1709 thread_still_has_status_pending_p (struct thread_info *thread)
1710 {
1711 struct lwp_info *lp = get_thread_lwp (thread);
1712
1713 if (!lp->status_pending_p)
1714 return 0;
1715
1716 if (thread->last_resume_kind != resume_stop
1717 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1718 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1719 {
1720 struct thread_info *saved_thread;
1721 CORE_ADDR pc;
1722 int discard = 0;
1723
1724 gdb_assert (lp->last_status != 0);
1725
1726 pc = get_pc (lp);
1727
1728 saved_thread = current_thread;
1729 current_thread = thread;
1730
1731 if (pc != lp->stop_pc)
1732 {
1733 if (debug_threads)
1734 debug_printf ("PC of %ld changed\n",
1735 lwpid_of (thread));
1736 discard = 1;
1737 }
1738
1739 #if !USE_SIGTRAP_SIGINFO
1740 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1741 && !(*the_low_target.breakpoint_at) (pc))
1742 {
1743 if (debug_threads)
1744 debug_printf ("previous SW breakpoint of %ld gone\n",
1745 lwpid_of (thread));
1746 discard = 1;
1747 }
1748 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1749 && !hardware_breakpoint_inserted_here (pc))
1750 {
1751 if (debug_threads)
1752 debug_printf ("previous HW breakpoint of %ld gone\n",
1753 lwpid_of (thread));
1754 discard = 1;
1755 }
1756 #endif
1757
1758 current_thread = saved_thread;
1759
1760 if (discard)
1761 {
1762 if (debug_threads)
1763 debug_printf ("discarding pending breakpoint status\n");
1764 lp->status_pending_p = 0;
1765 return 0;
1766 }
1767 }
1768
1769 return 1;
1770 }
1771
1772 /* Returns true if LWP is resumed from the client's perspective. */
1773
1774 static int
1775 lwp_resumed (struct lwp_info *lwp)
1776 {
1777 struct thread_info *thread = get_lwp_thread (lwp);
1778
1779 if (thread->last_resume_kind != resume_stop)
1780 return 1;
1781
1782 /* Did gdb send us a `vCont;t', but we haven't reported the
1783 corresponding stop to gdb yet? If so, the thread is still
1784 resumed/running from gdb's perspective. */
1785 if (thread->last_resume_kind == resume_stop
1786 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1787 return 1;
1788
1789 return 0;
1790 }
1791
1792 /* Return true if this lwp has an interesting status pending. */
1793 static bool
1794 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1795 {
1796 struct lwp_info *lp = get_thread_lwp (thread);
1797
1798 /* Check if we're only interested in events from a specific process
1799 or a specific LWP. */
1800 if (!thread->id.matches (ptid))
1801 return 0;
1802
1803 if (!lwp_resumed (lp))
1804 return 0;
1805
1806 if (lp->status_pending_p
1807 && !thread_still_has_status_pending_p (thread))
1808 {
1809 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1810 return 0;
1811 }
1812
1813 return lp->status_pending_p;
1814 }
1815
1816 struct lwp_info *
1817 find_lwp_pid (ptid_t ptid)
1818 {
1819 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1820 {
1821 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1822 return thr_arg->id.lwp () == lwp;
1823 });
1824
1825 if (thread == NULL)
1826 return NULL;
1827
1828 return get_thread_lwp (thread);
1829 }
1830
1831 /* Return the number of known LWPs in the tgid given by PID. */
1832
1833 static int
1834 num_lwps (int pid)
1835 {
1836 int count = 0;
1837
1838 for_each_thread (pid, [&] (thread_info *thread)
1839 {
1840 count++;
1841 });
1842
1843 return count;
1844 }
1845
1846 /* See nat/linux-nat.h. */
1847
1848 struct lwp_info *
1849 iterate_over_lwps (ptid_t filter,
1850 gdb::function_view<iterate_over_lwps_ftype> callback)
1851 {
1852 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1853 {
1854 lwp_info *lwp = get_thread_lwp (thr_arg);
1855
1856 return callback (lwp);
1857 });
1858
1859 if (thread == NULL)
1860 return NULL;
1861
1862 return get_thread_lwp (thread);
1863 }
1864
1865 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1866 their exits until all other threads in the group have exited. */
1867
1868 static void
1869 check_zombie_leaders (void)
1870 {
1871 for_each_process ([] (process_info *proc) {
1872 pid_t leader_pid = pid_of (proc);
1873 struct lwp_info *leader_lp;
1874
1875 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1876
1877 if (debug_threads)
1878 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1879 "num_lwps=%d, zombie=%d\n",
1880 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1881 linux_proc_pid_is_zombie (leader_pid));
1882
1883 if (leader_lp != NULL && !leader_lp->stopped
1884 /* Check if there are other threads in the group, as we may
1885 have raced with the inferior simply exiting. */
1886 && !last_thread_of_process_p (leader_pid)
1887 && linux_proc_pid_is_zombie (leader_pid))
1888 {
1889 /* A leader zombie can mean one of two things:
1890
1891 - It exited, and there's an exit status pending
1892 available, or only the leader exited (not the whole
1893 program). In the latter case, we can't waitpid the
1894 leader's exit status until all other threads are gone.
1895
1896 - There are 3 or more threads in the group, and a thread
1897 other than the leader exec'd. On an exec, the Linux
1898 kernel destroys all other threads (except the execing
1899 one) in the thread group, and resets the execing thread's
1900 tid to the tgid. No exit notification is sent for the
1901 execing thread -- from the ptracer's perspective, it
1902 appears as though the execing thread just vanishes.
1903 Until we reap all other threads except the leader and the
1904 execing thread, the leader will be zombie, and the
1905 execing thread will be in `D (disc sleep)'. As soon as
1906 all other threads are reaped, the execing thread changes
1907 it's tid to the tgid, and the previous (zombie) leader
1908 vanishes, giving place to the "new" leader. We could try
1909 distinguishing the exit and exec cases, by waiting once
1910 more, and seeing if something comes out, but it doesn't
1911 sound useful. The previous leader _does_ go away, and
1912 we'll re-add the new one once we see the exec event
1913 (which is just the same as what would happen if the
1914 previous leader did exit voluntarily before some other
1915 thread execs). */
1916
1917 if (debug_threads)
1918 debug_printf ("CZL: Thread group leader %d zombie "
1919 "(it exited, or another thread execd).\n",
1920 leader_pid);
1921
1922 delete_lwp (leader_lp);
1923 }
1924 });
1925 }
1926
1927 /* Callback for `find_thread'. Returns the first LWP that is not
1928 stopped. */
1929
1930 static bool
1931 not_stopped_callback (thread_info *thread, ptid_t filter)
1932 {
1933 if (!thread->id.matches (filter))
1934 return false;
1935
1936 lwp_info *lwp = get_thread_lwp (thread);
1937
1938 return !lwp->stopped;
1939 }
1940
1941 /* Increment LWP's suspend count. */
1942
1943 static void
1944 lwp_suspended_inc (struct lwp_info *lwp)
1945 {
1946 lwp->suspended++;
1947
1948 if (debug_threads && lwp->suspended > 4)
1949 {
1950 struct thread_info *thread = get_lwp_thread (lwp);
1951
1952 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1953 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1954 }
1955 }
1956
1957 /* Decrement LWP's suspend count. */
1958
1959 static void
1960 lwp_suspended_decr (struct lwp_info *lwp)
1961 {
1962 lwp->suspended--;
1963
1964 if (lwp->suspended < 0)
1965 {
1966 struct thread_info *thread = get_lwp_thread (lwp);
1967
1968 internal_error (__FILE__, __LINE__,
1969 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1970 lwp->suspended);
1971 }
1972 }
1973
1974 /* This function should only be called if the LWP got a SIGTRAP.
1975
1976 Handle any tracepoint steps or hits. Return true if a tracepoint
1977 event was handled, 0 otherwise. */
1978
1979 static int
1980 handle_tracepoints (struct lwp_info *lwp)
1981 {
1982 struct thread_info *tinfo = get_lwp_thread (lwp);
1983 int tpoint_related_event = 0;
1984
1985 gdb_assert (lwp->suspended == 0);
1986
1987 /* If this tracepoint hit causes a tracing stop, we'll immediately
1988 uninsert tracepoints. To do this, we temporarily pause all
1989 threads, unpatch away, and then unpause threads. We need to make
1990 sure the unpausing doesn't resume LWP too. */
1991 lwp_suspended_inc (lwp);
1992
1993 /* And we need to be sure that any all-threads-stopping doesn't try
1994 to move threads out of the jump pads, as it could deadlock the
1995 inferior (LWP could be in the jump pad, maybe even holding the
1996 lock.) */
1997
1998 /* Do any necessary step collect actions. */
1999 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2000
2001 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2002
2003 /* See if we just hit a tracepoint and do its main collect
2004 actions. */
2005 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2006
2007 lwp_suspended_decr (lwp);
2008
2009 gdb_assert (lwp->suspended == 0);
2010 gdb_assert (!stabilizing_threads
2011 || (lwp->collecting_fast_tracepoint
2012 != fast_tpoint_collect_result::not_collecting));
2013
2014 if (tpoint_related_event)
2015 {
2016 if (debug_threads)
2017 debug_printf ("got a tracepoint event\n");
2018 return 1;
2019 }
2020
2021 return 0;
2022 }
2023
2024 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2025 collection status. */
2026
2027 static fast_tpoint_collect_result
2028 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2029 struct fast_tpoint_collect_status *status)
2030 {
2031 CORE_ADDR thread_area;
2032 struct thread_info *thread = get_lwp_thread (lwp);
2033
2034 if (the_low_target.get_thread_area == NULL)
2035 return fast_tpoint_collect_result::not_collecting;
2036
2037 /* Get the thread area address. This is used to recognize which
2038 thread is which when tracing with the in-process agent library.
2039 We don't read anything from the address, and treat it as opaque;
2040 it's the address itself that we assume is unique per-thread. */
2041 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2042 return fast_tpoint_collect_result::not_collecting;
2043
2044 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2045 }
2046
2047 /* The reason we resume in the caller, is because we want to be able
2048 to pass lwp->status_pending as WSTAT, and we need to clear
2049 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2050 refuses to resume. */
2051
2052 static int
2053 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2054 {
2055 struct thread_info *saved_thread;
2056
2057 saved_thread = current_thread;
2058 current_thread = get_lwp_thread (lwp);
2059
2060 if ((wstat == NULL
2061 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2062 && supports_fast_tracepoints ()
2063 && agent_loaded_p ())
2064 {
2065 struct fast_tpoint_collect_status status;
2066
2067 if (debug_threads)
2068 debug_printf ("Checking whether LWP %ld needs to move out of the "
2069 "jump pad.\n",
2070 lwpid_of (current_thread));
2071
2072 fast_tpoint_collect_result r
2073 = linux_fast_tracepoint_collecting (lwp, &status);
2074
2075 if (wstat == NULL
2076 || (WSTOPSIG (*wstat) != SIGILL
2077 && WSTOPSIG (*wstat) != SIGFPE
2078 && WSTOPSIG (*wstat) != SIGSEGV
2079 && WSTOPSIG (*wstat) != SIGBUS))
2080 {
2081 lwp->collecting_fast_tracepoint = r;
2082
2083 if (r != fast_tpoint_collect_result::not_collecting)
2084 {
2085 if (r == fast_tpoint_collect_result::before_insn
2086 && lwp->exit_jump_pad_bkpt == NULL)
2087 {
2088 /* Haven't executed the original instruction yet.
2089 Set breakpoint there, and wait till it's hit,
2090 then single-step until exiting the jump pad. */
2091 lwp->exit_jump_pad_bkpt
2092 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2093 }
2094
2095 if (debug_threads)
2096 debug_printf ("Checking whether LWP %ld needs to move out of "
2097 "the jump pad...it does\n",
2098 lwpid_of (current_thread));
2099 current_thread = saved_thread;
2100
2101 return 1;
2102 }
2103 }
2104 else
2105 {
2106 /* If we get a synchronous signal while collecting, *and*
2107 while executing the (relocated) original instruction,
2108 reset the PC to point at the tpoint address, before
2109 reporting to GDB. Otherwise, it's an IPA lib bug: just
2110 report the signal to GDB, and pray for the best. */
2111
2112 lwp->collecting_fast_tracepoint
2113 = fast_tpoint_collect_result::not_collecting;
2114
2115 if (r != fast_tpoint_collect_result::not_collecting
2116 && (status.adjusted_insn_addr <= lwp->stop_pc
2117 && lwp->stop_pc < status.adjusted_insn_addr_end))
2118 {
2119 siginfo_t info;
2120 struct regcache *regcache;
2121
2122 /* The si_addr on a few signals references the address
2123 of the faulting instruction. Adjust that as
2124 well. */
2125 if ((WSTOPSIG (*wstat) == SIGILL
2126 || WSTOPSIG (*wstat) == SIGFPE
2127 || WSTOPSIG (*wstat) == SIGBUS
2128 || WSTOPSIG (*wstat) == SIGSEGV)
2129 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2130 (PTRACE_TYPE_ARG3) 0, &info) == 0
2131 /* Final check just to make sure we don't clobber
2132 the siginfo of non-kernel-sent signals. */
2133 && (uintptr_t) info.si_addr == lwp->stop_pc)
2134 {
2135 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2136 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2137 (PTRACE_TYPE_ARG3) 0, &info);
2138 }
2139
2140 regcache = get_thread_regcache (current_thread, 1);
2141 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2142 lwp->stop_pc = status.tpoint_addr;
2143
2144 /* Cancel any fast tracepoint lock this thread was
2145 holding. */
2146 force_unlock_trace_buffer ();
2147 }
2148
2149 if (lwp->exit_jump_pad_bkpt != NULL)
2150 {
2151 if (debug_threads)
2152 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2153 "stopping all threads momentarily.\n");
2154
2155 stop_all_lwps (1, lwp);
2156
2157 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2158 lwp->exit_jump_pad_bkpt = NULL;
2159
2160 unstop_all_lwps (1, lwp);
2161
2162 gdb_assert (lwp->suspended >= 0);
2163 }
2164 }
2165 }
2166
2167 if (debug_threads)
2168 debug_printf ("Checking whether LWP %ld needs to move out of the "
2169 "jump pad...no\n",
2170 lwpid_of (current_thread));
2171
2172 current_thread = saved_thread;
2173 return 0;
2174 }
2175
2176 /* Enqueue one signal in the "signals to report later when out of the
2177 jump pad" list. */
2178
2179 static void
2180 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2181 {
2182 struct pending_signals *p_sig;
2183 struct thread_info *thread = get_lwp_thread (lwp);
2184
2185 if (debug_threads)
2186 debug_printf ("Deferring signal %d for LWP %ld.\n",
2187 WSTOPSIG (*wstat), lwpid_of (thread));
2188
2189 if (debug_threads)
2190 {
2191 struct pending_signals *sig;
2192
2193 for (sig = lwp->pending_signals_to_report;
2194 sig != NULL;
2195 sig = sig->prev)
2196 debug_printf (" Already queued %d\n",
2197 sig->signal);
2198
2199 debug_printf (" (no more currently queued signals)\n");
2200 }
2201
2202 /* Don't enqueue non-RT signals if they are already in the deferred
2203 queue. (SIGSTOP being the easiest signal to see ending up here
2204 twice) */
2205 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2206 {
2207 struct pending_signals *sig;
2208
2209 for (sig = lwp->pending_signals_to_report;
2210 sig != NULL;
2211 sig = sig->prev)
2212 {
2213 if (sig->signal == WSTOPSIG (*wstat))
2214 {
2215 if (debug_threads)
2216 debug_printf ("Not requeuing already queued non-RT signal %d"
2217 " for LWP %ld\n",
2218 sig->signal,
2219 lwpid_of (thread));
2220 return;
2221 }
2222 }
2223 }
2224
2225 p_sig = XCNEW (struct pending_signals);
2226 p_sig->prev = lwp->pending_signals_to_report;
2227 p_sig->signal = WSTOPSIG (*wstat);
2228
2229 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2230 &p_sig->info);
2231
2232 lwp->pending_signals_to_report = p_sig;
2233 }
2234
2235 /* Dequeue one signal from the "signals to report later when out of
2236 the jump pad" list. */
2237
2238 static int
2239 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2240 {
2241 struct thread_info *thread = get_lwp_thread (lwp);
2242
2243 if (lwp->pending_signals_to_report != NULL)
2244 {
2245 struct pending_signals **p_sig;
2246
2247 p_sig = &lwp->pending_signals_to_report;
2248 while ((*p_sig)->prev != NULL)
2249 p_sig = &(*p_sig)->prev;
2250
2251 *wstat = W_STOPCODE ((*p_sig)->signal);
2252 if ((*p_sig)->info.si_signo != 0)
2253 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2254 &(*p_sig)->info);
2255 free (*p_sig);
2256 *p_sig = NULL;
2257
2258 if (debug_threads)
2259 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2260 WSTOPSIG (*wstat), lwpid_of (thread));
2261
2262 if (debug_threads)
2263 {
2264 struct pending_signals *sig;
2265
2266 for (sig = lwp->pending_signals_to_report;
2267 sig != NULL;
2268 sig = sig->prev)
2269 debug_printf (" Still queued %d\n",
2270 sig->signal);
2271
2272 debug_printf (" (no more queued signals)\n");
2273 }
2274
2275 return 1;
2276 }
2277
2278 return 0;
2279 }
2280
2281 /* Fetch the possibly triggered data watchpoint info and store it in
2282 CHILD.
2283
2284 On some archs, like x86, that use debug registers to set
2285 watchpoints, it's possible that the way to know which watched
2286 address trapped, is to check the register that is used to select
2287 which address to watch. Problem is, between setting the watchpoint
2288 and reading back which data address trapped, the user may change
2289 the set of watchpoints, and, as a consequence, GDB changes the
2290 debug registers in the inferior. To avoid reading back a stale
2291 stopped-data-address when that happens, we cache in LP the fact
2292 that a watchpoint trapped, and the corresponding data address, as
2293 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2294 registers meanwhile, we have the cached data we can rely on. */
2295
2296 static int
2297 check_stopped_by_watchpoint (struct lwp_info *child)
2298 {
2299 if (the_low_target.stopped_by_watchpoint != NULL)
2300 {
2301 struct thread_info *saved_thread;
2302
2303 saved_thread = current_thread;
2304 current_thread = get_lwp_thread (child);
2305
2306 if (the_low_target.stopped_by_watchpoint ())
2307 {
2308 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2309
2310 if (the_low_target.stopped_data_address != NULL)
2311 child->stopped_data_address
2312 = the_low_target.stopped_data_address ();
2313 else
2314 child->stopped_data_address = 0;
2315 }
2316
2317 current_thread = saved_thread;
2318 }
2319
2320 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2321 }
2322
2323 /* Return the ptrace options that we want to try to enable. */
2324
2325 static int
2326 linux_low_ptrace_options (int attached)
2327 {
2328 client_state &cs = get_client_state ();
2329 int options = 0;
2330
2331 if (!attached)
2332 options |= PTRACE_O_EXITKILL;
2333
2334 if (cs.report_fork_events)
2335 options |= PTRACE_O_TRACEFORK;
2336
2337 if (cs.report_vfork_events)
2338 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2339
2340 if (cs.report_exec_events)
2341 options |= PTRACE_O_TRACEEXEC;
2342
2343 options |= PTRACE_O_TRACESYSGOOD;
2344
2345 return options;
2346 }
2347
2348 /* Do low-level handling of the event, and check if we should go on
2349 and pass it to caller code. Return the affected lwp if we are, or
2350 NULL otherwise. */
2351
2352 static struct lwp_info *
2353 linux_low_filter_event (int lwpid, int wstat)
2354 {
2355 client_state &cs = get_client_state ();
2356 struct lwp_info *child;
2357 struct thread_info *thread;
2358 int have_stop_pc = 0;
2359
2360 child = find_lwp_pid (ptid_t (lwpid));
2361
2362 /* Check for stop events reported by a process we didn't already
2363 know about - anything not already in our LWP list.
2364
2365 If we're expecting to receive stopped processes after
2366 fork, vfork, and clone events, then we'll just add the
2367 new one to our list and go back to waiting for the event
2368 to be reported - the stopped process might be returned
2369 from waitpid before or after the event is.
2370
2371 But note the case of a non-leader thread exec'ing after the
2372 leader having exited, and gone from our lists (because
2373 check_zombie_leaders deleted it). The non-leader thread
2374 changes its tid to the tgid. */
2375
2376 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2377 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2378 {
2379 ptid_t child_ptid;
2380
2381 /* A multi-thread exec after we had seen the leader exiting. */
2382 if (debug_threads)
2383 {
2384 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2385 "after exec.\n", lwpid);
2386 }
2387
2388 child_ptid = ptid_t (lwpid, lwpid, 0);
2389 child = add_lwp (child_ptid);
2390 child->stopped = 1;
2391 current_thread = child->thread;
2392 }
2393
2394 /* If we didn't find a process, one of two things presumably happened:
2395 - A process we started and then detached from has exited. Ignore it.
2396 - A process we are controlling has forked and the new child's stop
2397 was reported to us by the kernel. Save its PID. */
2398 if (child == NULL && WIFSTOPPED (wstat))
2399 {
2400 add_to_pid_list (&stopped_pids, lwpid, wstat);
2401 return NULL;
2402 }
2403 else if (child == NULL)
2404 return NULL;
2405
2406 thread = get_lwp_thread (child);
2407
2408 child->stopped = 1;
2409
2410 child->last_status = wstat;
2411
2412 /* Check if the thread has exited. */
2413 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2414 {
2415 if (debug_threads)
2416 debug_printf ("LLFE: %d exited.\n", lwpid);
2417
2418 if (finish_step_over (child))
2419 {
2420 /* Unsuspend all other LWPs, and set them back running again. */
2421 unsuspend_all_lwps (child);
2422 }
2423
2424 /* If there is at least one more LWP, then the exit signal was
2425 not the end of the debugged application and should be
2426 ignored, unless GDB wants to hear about thread exits. */
2427 if (cs.report_thread_events
2428 || last_thread_of_process_p (pid_of (thread)))
2429 {
2430 /* Since events are serialized to GDB core, and we can't
2431 report this one right now. Leave the status pending for
2432 the next time we're able to report it. */
2433 mark_lwp_dead (child, wstat);
2434 return child;
2435 }
2436 else
2437 {
2438 delete_lwp (child);
2439 return NULL;
2440 }
2441 }
2442
2443 gdb_assert (WIFSTOPPED (wstat));
2444
2445 if (WIFSTOPPED (wstat))
2446 {
2447 struct process_info *proc;
2448
2449 /* Architecture-specific setup after inferior is running. */
2450 proc = find_process_pid (pid_of (thread));
2451 if (proc->tdesc == NULL)
2452 {
2453 if (proc->attached)
2454 {
2455 /* This needs to happen after we have attached to the
2456 inferior and it is stopped for the first time, but
2457 before we access any inferior registers. */
2458 linux_arch_setup_thread (thread);
2459 }
2460 else
2461 {
2462 /* The process is started, but GDBserver will do
2463 architecture-specific setup after the program stops at
2464 the first instruction. */
2465 child->status_pending_p = 1;
2466 child->status_pending = wstat;
2467 return child;
2468 }
2469 }
2470 }
2471
2472 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2473 {
2474 struct process_info *proc = find_process_pid (pid_of (thread));
2475 int options = linux_low_ptrace_options (proc->attached);
2476
2477 linux_enable_event_reporting (lwpid, options);
2478 child->must_set_ptrace_flags = 0;
2479 }
2480
2481 /* Always update syscall_state, even if it will be filtered later. */
2482 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2483 {
2484 child->syscall_state
2485 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2486 ? TARGET_WAITKIND_SYSCALL_RETURN
2487 : TARGET_WAITKIND_SYSCALL_ENTRY);
2488 }
2489 else
2490 {
2491 /* Almost all other ptrace-stops are known to be outside of system
2492 calls, with further exceptions in handle_extended_wait. */
2493 child->syscall_state = TARGET_WAITKIND_IGNORE;
2494 }
2495
2496 /* Be careful to not overwrite stop_pc until save_stop_reason is
2497 called. */
2498 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2499 && linux_is_extended_waitstatus (wstat))
2500 {
2501 child->stop_pc = get_pc (child);
2502 if (handle_extended_wait (&child, wstat))
2503 {
2504 /* The event has been handled, so just return without
2505 reporting it. */
2506 return NULL;
2507 }
2508 }
2509
2510 if (linux_wstatus_maybe_breakpoint (wstat))
2511 {
2512 if (save_stop_reason (child))
2513 have_stop_pc = 1;
2514 }
2515
2516 if (!have_stop_pc)
2517 child->stop_pc = get_pc (child);
2518
2519 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2520 && child->stop_expected)
2521 {
2522 if (debug_threads)
2523 debug_printf ("Expected stop.\n");
2524 child->stop_expected = 0;
2525
2526 if (thread->last_resume_kind == resume_stop)
2527 {
2528 /* We want to report the stop to the core. Treat the
2529 SIGSTOP as a normal event. */
2530 if (debug_threads)
2531 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2532 target_pid_to_str (ptid_of (thread)));
2533 }
2534 else if (stopping_threads != NOT_STOPPING_THREADS)
2535 {
2536 /* Stopping threads. We don't want this SIGSTOP to end up
2537 pending. */
2538 if (debug_threads)
2539 debug_printf ("LLW: SIGSTOP caught for %s "
2540 "while stopping threads.\n",
2541 target_pid_to_str (ptid_of (thread)));
2542 return NULL;
2543 }
2544 else
2545 {
2546 /* This is a delayed SIGSTOP. Filter out the event. */
2547 if (debug_threads)
2548 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2549 child->stepping ? "step" : "continue",
2550 target_pid_to_str (ptid_of (thread)));
2551
2552 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2553 return NULL;
2554 }
2555 }
2556
2557 child->status_pending_p = 1;
2558 child->status_pending = wstat;
2559 return child;
2560 }
2561
2562 /* Return true if THREAD is doing hardware single step. */
2563
2564 static int
2565 maybe_hw_step (struct thread_info *thread)
2566 {
2567 if (can_hardware_single_step ())
2568 return 1;
2569 else
2570 {
2571 /* GDBserver must insert single-step breakpoint for software
2572 single step. */
2573 gdb_assert (has_single_step_breakpoints (thread));
2574 return 0;
2575 }
2576 }
2577
2578 /* Resume LWPs that are currently stopped without any pending status
2579 to report, but are resumed from the core's perspective. */
2580
2581 static void
2582 resume_stopped_resumed_lwps (thread_info *thread)
2583 {
2584 struct lwp_info *lp = get_thread_lwp (thread);
2585
2586 if (lp->stopped
2587 && !lp->suspended
2588 && !lp->status_pending_p
2589 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2590 {
2591 int step = 0;
2592
2593 if (thread->last_resume_kind == resume_step)
2594 step = maybe_hw_step (thread);
2595
2596 if (debug_threads)
2597 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2598 target_pid_to_str (ptid_of (thread)),
2599 paddress (lp->stop_pc),
2600 step);
2601
2602 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2603 }
2604 }
2605
2606 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2607 match FILTER_PTID (leaving others pending). The PTIDs can be:
2608 minus_one_ptid, to specify any child; a pid PTID, specifying all
2609 lwps of a thread group; or a PTID representing a single lwp. Store
2610 the stop status through the status pointer WSTAT. OPTIONS is
2611 passed to the waitpid call. Return 0 if no event was found and
2612 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2613 was found. Return the PID of the stopped child otherwise. */
2614
2615 static int
2616 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2617 int *wstatp, int options)
2618 {
2619 struct thread_info *event_thread;
2620 struct lwp_info *event_child, *requested_child;
2621 sigset_t block_mask, prev_mask;
2622
2623 retry:
2624 /* N.B. event_thread points to the thread_info struct that contains
2625 event_child. Keep them in sync. */
2626 event_thread = NULL;
2627 event_child = NULL;
2628 requested_child = NULL;
2629
2630 /* Check for a lwp with a pending status. */
2631
2632 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2633 {
2634 event_thread = find_thread_in_random ([&] (thread_info *thread)
2635 {
2636 return status_pending_p_callback (thread, filter_ptid);
2637 });
2638
2639 if (event_thread != NULL)
2640 event_child = get_thread_lwp (event_thread);
2641 if (debug_threads && event_thread)
2642 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2643 }
2644 else if (filter_ptid != null_ptid)
2645 {
2646 requested_child = find_lwp_pid (filter_ptid);
2647
2648 if (stopping_threads == NOT_STOPPING_THREADS
2649 && requested_child->status_pending_p
2650 && (requested_child->collecting_fast_tracepoint
2651 != fast_tpoint_collect_result::not_collecting))
2652 {
2653 enqueue_one_deferred_signal (requested_child,
2654 &requested_child->status_pending);
2655 requested_child->status_pending_p = 0;
2656 requested_child->status_pending = 0;
2657 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2658 }
2659
2660 if (requested_child->suspended
2661 && requested_child->status_pending_p)
2662 {
2663 internal_error (__FILE__, __LINE__,
2664 "requesting an event out of a"
2665 " suspended child?");
2666 }
2667
2668 if (requested_child->status_pending_p)
2669 {
2670 event_child = requested_child;
2671 event_thread = get_lwp_thread (event_child);
2672 }
2673 }
2674
2675 if (event_child != NULL)
2676 {
2677 if (debug_threads)
2678 debug_printf ("Got an event from pending child %ld (%04x)\n",
2679 lwpid_of (event_thread), event_child->status_pending);
2680 *wstatp = event_child->status_pending;
2681 event_child->status_pending_p = 0;
2682 event_child->status_pending = 0;
2683 current_thread = event_thread;
2684 return lwpid_of (event_thread);
2685 }
2686
2687 /* But if we don't find a pending event, we'll have to wait.
2688
2689 We only enter this loop if no process has a pending wait status.
2690 Thus any action taken in response to a wait status inside this
2691 loop is responding as soon as we detect the status, not after any
2692 pending events. */
2693
2694 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2695 all signals while here. */
2696 sigfillset (&block_mask);
2697 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2698
2699 /* Always pull all events out of the kernel. We'll randomly select
2700 an event LWP out of all that have events, to prevent
2701 starvation. */
2702 while (event_child == NULL)
2703 {
2704 pid_t ret = 0;
2705
2706 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2707 quirks:
2708
2709 - If the thread group leader exits while other threads in the
2710 thread group still exist, waitpid(TGID, ...) hangs. That
2711 waitpid won't return an exit status until the other threads
2712 in the group are reaped.
2713
2714 - When a non-leader thread execs, that thread just vanishes
2715 without reporting an exit (so we'd hang if we waited for it
2716 explicitly in that case). The exec event is reported to
2717 the TGID pid. */
2718 errno = 0;
2719 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2720
2721 if (debug_threads)
2722 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2723 ret, errno ? strerror (errno) : "ERRNO-OK");
2724
2725 if (ret > 0)
2726 {
2727 if (debug_threads)
2728 {
2729 debug_printf ("LLW: waitpid %ld received %s\n",
2730 (long) ret, status_to_str (*wstatp));
2731 }
2732
2733 /* Filter all events. IOW, leave all events pending. We'll
2734 randomly select an event LWP out of all that have events
2735 below. */
2736 linux_low_filter_event (ret, *wstatp);
2737 /* Retry until nothing comes out of waitpid. A single
2738 SIGCHLD can indicate more than one child stopped. */
2739 continue;
2740 }
2741
2742 /* Now that we've pulled all events out of the kernel, resume
2743 LWPs that don't have an interesting event to report. */
2744 if (stopping_threads == NOT_STOPPING_THREADS)
2745 for_each_thread (resume_stopped_resumed_lwps);
2746
2747 /* ... and find an LWP with a status to report to the core, if
2748 any. */
2749 event_thread = find_thread_in_random ([&] (thread_info *thread)
2750 {
2751 return status_pending_p_callback (thread, filter_ptid);
2752 });
2753
2754 if (event_thread != NULL)
2755 {
2756 event_child = get_thread_lwp (event_thread);
2757 *wstatp = event_child->status_pending;
2758 event_child->status_pending_p = 0;
2759 event_child->status_pending = 0;
2760 break;
2761 }
2762
2763 /* Check for zombie thread group leaders. Those can't be reaped
2764 until all other threads in the thread group are. */
2765 check_zombie_leaders ();
2766
2767 auto not_stopped = [&] (thread_info *thread)
2768 {
2769 return not_stopped_callback (thread, wait_ptid);
2770 };
2771
2772 /* If there are no resumed children left in the set of LWPs we
2773 want to wait for, bail. We can't just block in
2774 waitpid/sigsuspend, because lwps might have been left stopped
2775 in trace-stop state, and we'd be stuck forever waiting for
2776 their status to change (which would only happen if we resumed
2777 them). Even if WNOHANG is set, this return code is preferred
2778 over 0 (below), as it is more detailed. */
2779 if (find_thread (not_stopped) == NULL)
2780 {
2781 if (debug_threads)
2782 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2783 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2784 return -1;
2785 }
2786
2787 /* No interesting event to report to the caller. */
2788 if ((options & WNOHANG))
2789 {
2790 if (debug_threads)
2791 debug_printf ("WNOHANG set, no event found\n");
2792
2793 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2794 return 0;
2795 }
2796
2797 /* Block until we get an event reported with SIGCHLD. */
2798 if (debug_threads)
2799 debug_printf ("sigsuspend'ing\n");
2800
2801 sigsuspend (&prev_mask);
2802 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2803 goto retry;
2804 }
2805
2806 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2807
2808 current_thread = event_thread;
2809
2810 return lwpid_of (event_thread);
2811 }
2812
2813 /* Wait for an event from child(ren) PTID. PTIDs can be:
2814 minus_one_ptid, to specify any child; a pid PTID, specifying all
2815 lwps of a thread group; or a PTID representing a single lwp. Store
2816 the stop status through the status pointer WSTAT. OPTIONS is
2817 passed to the waitpid call. Return 0 if no event was found and
2818 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2819 was found. Return the PID of the stopped child otherwise. */
2820
2821 static int
2822 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2823 {
2824 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2825 }
2826
2827 /* Select one LWP out of those that have events pending. */
2828
2829 static void
2830 select_event_lwp (struct lwp_info **orig_lp)
2831 {
2832 int random_selector;
2833 struct thread_info *event_thread = NULL;
2834
2835 /* In all-stop, give preference to the LWP that is being
2836 single-stepped. There will be at most one, and it's the LWP that
2837 the core is most interested in. If we didn't do this, then we'd
2838 have to handle pending step SIGTRAPs somehow in case the core
2839 later continues the previously-stepped thread, otherwise we'd
2840 report the pending SIGTRAP, and the core, not having stepped the
2841 thread, wouldn't understand what the trap was for, and therefore
2842 would report it to the user as a random signal. */
2843 if (!non_stop)
2844 {
2845 event_thread = find_thread ([] (thread_info *thread)
2846 {
2847 lwp_info *lp = get_thread_lwp (thread);
2848
2849 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2850 && thread->last_resume_kind == resume_step
2851 && lp->status_pending_p);
2852 });
2853
2854 if (event_thread != NULL)
2855 {
2856 if (debug_threads)
2857 debug_printf ("SEL: Select single-step %s\n",
2858 target_pid_to_str (ptid_of (event_thread)));
2859 }
2860 }
2861 if (event_thread == NULL)
2862 {
2863 /* No single-stepping LWP. Select one at random, out of those
2864 which have had events. */
2865
2866 /* First see how many events we have. */
2867 int num_events = 0;
2868 for_each_thread ([&] (thread_info *thread)
2869 {
2870 lwp_info *lp = get_thread_lwp (thread);
2871
2872 /* Count only resumed LWPs that have an event pending. */
2873 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2874 && lp->status_pending_p)
2875 num_events++;
2876 });
2877 gdb_assert (num_events > 0);
2878
2879 /* Now randomly pick a LWP out of those that have had
2880 events. */
2881 random_selector = (int)
2882 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2883
2884 if (debug_threads && num_events > 1)
2885 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2886 num_events, random_selector);
2887
2888 event_thread = find_thread ([&] (thread_info *thread)
2889 {
2890 lwp_info *lp = get_thread_lwp (thread);
2891
2892 /* Select only resumed LWPs that have an event pending. */
2893 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2894 && lp->status_pending_p)
2895 if (random_selector-- == 0)
2896 return true;
2897
2898 return false;
2899 });
2900 }
2901
2902 if (event_thread != NULL)
2903 {
2904 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2905
2906 /* Switch the event LWP. */
2907 *orig_lp = event_lp;
2908 }
2909 }
2910
2911 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2912 NULL. */
2913
2914 static void
2915 unsuspend_all_lwps (struct lwp_info *except)
2916 {
2917 for_each_thread ([&] (thread_info *thread)
2918 {
2919 lwp_info *lwp = get_thread_lwp (thread);
2920
2921 if (lwp != except)
2922 lwp_suspended_decr (lwp);
2923 });
2924 }
2925
2926 static void move_out_of_jump_pad_callback (thread_info *thread);
2927 static bool stuck_in_jump_pad_callback (thread_info *thread);
2928 static bool lwp_running (thread_info *thread);
2929 static ptid_t linux_wait_1 (ptid_t ptid,
2930 struct target_waitstatus *ourstatus,
2931 int target_options);
2932
2933 /* Stabilize threads (move out of jump pads).
2934
2935 If a thread is midway collecting a fast tracepoint, we need to
2936 finish the collection and move it out of the jump pad before
2937 reporting the signal.
2938
2939 This avoids recursion while collecting (when a signal arrives
2940 midway, and the signal handler itself collects), which would trash
2941 the trace buffer. In case the user set a breakpoint in a signal
2942 handler, this avoids the backtrace showing the jump pad, etc..
2943 Most importantly, there are certain things we can't do safely if
2944 threads are stopped in a jump pad (or in its callee's). For
2945 example:
2946
2947 - starting a new trace run. A thread still collecting the
2948 previous run, could trash the trace buffer when resumed. The trace
2949 buffer control structures would have been reset but the thread had
2950 no way to tell. The thread could even midway memcpy'ing to the
2951 buffer, which would mean that when resumed, it would clobber the
2952 trace buffer that had been set for a new run.
2953
2954 - we can't rewrite/reuse the jump pads for new tracepoints
2955 safely. Say you do tstart while a thread is stopped midway while
2956 collecting. When the thread is later resumed, it finishes the
2957 collection, and returns to the jump pad, to execute the original
2958 instruction that was under the tracepoint jump at the time the
2959 older run had been started. If the jump pad had been rewritten
2960 since for something else in the new run, the thread would now
2961 execute the wrong / random instructions. */
2962
2963 static void
2964 linux_stabilize_threads (void)
2965 {
2966 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2967
2968 if (thread_stuck != NULL)
2969 {
2970 if (debug_threads)
2971 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2972 lwpid_of (thread_stuck));
2973 return;
2974 }
2975
2976 thread_info *saved_thread = current_thread;
2977
2978 stabilizing_threads = 1;
2979
2980 /* Kick 'em all. */
2981 for_each_thread (move_out_of_jump_pad_callback);
2982
2983 /* Loop until all are stopped out of the jump pads. */
2984 while (find_thread (lwp_running) != NULL)
2985 {
2986 struct target_waitstatus ourstatus;
2987 struct lwp_info *lwp;
2988 int wstat;
2989
2990 /* Note that we go through the full wait even loop. While
2991 moving threads out of jump pad, we need to be able to step
2992 over internal breakpoints and such. */
2993 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2994
2995 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2996 {
2997 lwp = get_thread_lwp (current_thread);
2998
2999 /* Lock it. */
3000 lwp_suspended_inc (lwp);
3001
3002 if (ourstatus.value.sig != GDB_SIGNAL_0
3003 || current_thread->last_resume_kind == resume_stop)
3004 {
3005 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3006 enqueue_one_deferred_signal (lwp, &wstat);
3007 }
3008 }
3009 }
3010
3011 unsuspend_all_lwps (NULL);
3012
3013 stabilizing_threads = 0;
3014
3015 current_thread = saved_thread;
3016
3017 if (debug_threads)
3018 {
3019 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3020
3021 if (thread_stuck != NULL)
3022 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3023 lwpid_of (thread_stuck));
3024 }
3025 }
3026
3027 /* Convenience function that is called when the kernel reports an
3028 event that is not passed out to GDB. */
3029
3030 static ptid_t
3031 ignore_event (struct target_waitstatus *ourstatus)
3032 {
3033 /* If we got an event, there may still be others, as a single
3034 SIGCHLD can indicate more than one child stopped. This forces
3035 another target_wait call. */
3036 async_file_mark ();
3037
3038 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3039 return null_ptid;
3040 }
3041
3042 /* Convenience function that is called when the kernel reports an exit
3043 event. This decides whether to report the event to GDB as a
3044 process exit event, a thread exit event, or to suppress the
3045 event. */
3046
3047 static ptid_t
3048 filter_exit_event (struct lwp_info *event_child,
3049 struct target_waitstatus *ourstatus)
3050 {
3051 client_state &cs = get_client_state ();
3052 struct thread_info *thread = get_lwp_thread (event_child);
3053 ptid_t ptid = ptid_of (thread);
3054
3055 if (!last_thread_of_process_p (pid_of (thread)))
3056 {
3057 if (cs.report_thread_events)
3058 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3059 else
3060 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3061
3062 delete_lwp (event_child);
3063 }
3064 return ptid;
3065 }
3066
3067 /* Returns 1 if GDB is interested in any event_child syscalls. */
3068
3069 static int
3070 gdb_catching_syscalls_p (struct lwp_info *event_child)
3071 {
3072 struct thread_info *thread = get_lwp_thread (event_child);
3073 struct process_info *proc = get_thread_process (thread);
3074
3075 return !proc->syscalls_to_catch.empty ();
3076 }
3077
3078 /* Returns 1 if GDB is interested in the event_child syscall.
3079 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3080
3081 static int
3082 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3083 {
3084 int sysno;
3085 struct thread_info *thread = get_lwp_thread (event_child);
3086 struct process_info *proc = get_thread_process (thread);
3087
3088 if (proc->syscalls_to_catch.empty ())
3089 return 0;
3090
3091 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3092 return 1;
3093
3094 get_syscall_trapinfo (event_child, &sysno);
3095
3096 for (int iter : proc->syscalls_to_catch)
3097 if (iter == sysno)
3098 return 1;
3099
3100 return 0;
3101 }
3102
3103 /* Wait for process, returns status. */
3104
3105 static ptid_t
3106 linux_wait_1 (ptid_t ptid,
3107 struct target_waitstatus *ourstatus, int target_options)
3108 {
3109 client_state &cs = get_client_state ();
3110 int w;
3111 struct lwp_info *event_child;
3112 int options;
3113 int pid;
3114 int step_over_finished;
3115 int bp_explains_trap;
3116 int maybe_internal_trap;
3117 int report_to_gdb;
3118 int trace_event;
3119 int in_step_range;
3120 int any_resumed;
3121
3122 if (debug_threads)
3123 {
3124 debug_enter ();
3125 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3126 }
3127
3128 /* Translate generic target options into linux options. */
3129 options = __WALL;
3130 if (target_options & TARGET_WNOHANG)
3131 options |= WNOHANG;
3132
3133 bp_explains_trap = 0;
3134 trace_event = 0;
3135 in_step_range = 0;
3136 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3137
3138 auto status_pending_p_any = [&] (thread_info *thread)
3139 {
3140 return status_pending_p_callback (thread, minus_one_ptid);
3141 };
3142
3143 auto not_stopped = [&] (thread_info *thread)
3144 {
3145 return not_stopped_callback (thread, minus_one_ptid);
3146 };
3147
3148 /* Find a resumed LWP, if any. */
3149 if (find_thread (status_pending_p_any) != NULL)
3150 any_resumed = 1;
3151 else if (find_thread (not_stopped) != NULL)
3152 any_resumed = 1;
3153 else
3154 any_resumed = 0;
3155
3156 if (step_over_bkpt == null_ptid)
3157 pid = linux_wait_for_event (ptid, &w, options);
3158 else
3159 {
3160 if (debug_threads)
3161 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3162 target_pid_to_str (step_over_bkpt));
3163 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3164 }
3165
3166 if (pid == 0 || (pid == -1 && !any_resumed))
3167 {
3168 gdb_assert (target_options & TARGET_WNOHANG);
3169
3170 if (debug_threads)
3171 {
3172 debug_printf ("linux_wait_1 ret = null_ptid, "
3173 "TARGET_WAITKIND_IGNORE\n");
3174 debug_exit ();
3175 }
3176
3177 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3178 return null_ptid;
3179 }
3180 else if (pid == -1)
3181 {
3182 if (debug_threads)
3183 {
3184 debug_printf ("linux_wait_1 ret = null_ptid, "
3185 "TARGET_WAITKIND_NO_RESUMED\n");
3186 debug_exit ();
3187 }
3188
3189 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3190 return null_ptid;
3191 }
3192
3193 event_child = get_thread_lwp (current_thread);
3194
3195 /* linux_wait_for_event only returns an exit status for the last
3196 child of a process. Report it. */
3197 if (WIFEXITED (w) || WIFSIGNALED (w))
3198 {
3199 if (WIFEXITED (w))
3200 {
3201 ourstatus->kind = TARGET_WAITKIND_EXITED;
3202 ourstatus->value.integer = WEXITSTATUS (w);
3203
3204 if (debug_threads)
3205 {
3206 debug_printf ("linux_wait_1 ret = %s, exited with "
3207 "retcode %d\n",
3208 target_pid_to_str (ptid_of (current_thread)),
3209 WEXITSTATUS (w));
3210 debug_exit ();
3211 }
3212 }
3213 else
3214 {
3215 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3216 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3217
3218 if (debug_threads)
3219 {
3220 debug_printf ("linux_wait_1 ret = %s, terminated with "
3221 "signal %d\n",
3222 target_pid_to_str (ptid_of (current_thread)),
3223 WTERMSIG (w));
3224 debug_exit ();
3225 }
3226 }
3227
3228 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3229 return filter_exit_event (event_child, ourstatus);
3230
3231 return ptid_of (current_thread);
3232 }
3233
3234 /* If step-over executes a breakpoint instruction, in the case of a
3235 hardware single step it means a gdb/gdbserver breakpoint had been
3236 planted on top of a permanent breakpoint, in the case of a software
3237 single step it may just mean that gdbserver hit the reinsert breakpoint.
3238 The PC has been adjusted by save_stop_reason to point at
3239 the breakpoint address.
3240 So in the case of the hardware single step advance the PC manually
3241 past the breakpoint and in the case of software single step advance only
3242 if it's not the single_step_breakpoint we are hitting.
3243 This avoids that a program would keep trapping a permanent breakpoint
3244 forever. */
3245 if (step_over_bkpt != null_ptid
3246 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3247 && (event_child->stepping
3248 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3249 {
3250 int increment_pc = 0;
3251 int breakpoint_kind = 0;
3252 CORE_ADDR stop_pc = event_child->stop_pc;
3253
3254 breakpoint_kind =
3255 the_target->breakpoint_kind_from_current_state (&stop_pc);
3256 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3257
3258 if (debug_threads)
3259 {
3260 debug_printf ("step-over for %s executed software breakpoint\n",
3261 target_pid_to_str (ptid_of (current_thread)));
3262 }
3263
3264 if (increment_pc != 0)
3265 {
3266 struct regcache *regcache
3267 = get_thread_regcache (current_thread, 1);
3268
3269 event_child->stop_pc += increment_pc;
3270 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3271
3272 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3273 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3274 }
3275 }
3276
3277 /* If this event was not handled before, and is not a SIGTRAP, we
3278 report it. SIGILL and SIGSEGV are also treated as traps in case
3279 a breakpoint is inserted at the current PC. If this target does
3280 not support internal breakpoints at all, we also report the
3281 SIGTRAP without further processing; it's of no concern to us. */
3282 maybe_internal_trap
3283 = (supports_breakpoints ()
3284 && (WSTOPSIG (w) == SIGTRAP
3285 || ((WSTOPSIG (w) == SIGILL
3286 || WSTOPSIG (w) == SIGSEGV)
3287 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3288
3289 if (maybe_internal_trap)
3290 {
3291 /* Handle anything that requires bookkeeping before deciding to
3292 report the event or continue waiting. */
3293
3294 /* First check if we can explain the SIGTRAP with an internal
3295 breakpoint, or if we should possibly report the event to GDB.
3296 Do this before anything that may remove or insert a
3297 breakpoint. */
3298 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3299
3300 /* We have a SIGTRAP, possibly a step-over dance has just
3301 finished. If so, tweak the state machine accordingly,
3302 reinsert breakpoints and delete any single-step
3303 breakpoints. */
3304 step_over_finished = finish_step_over (event_child);
3305
3306 /* Now invoke the callbacks of any internal breakpoints there. */
3307 check_breakpoints (event_child->stop_pc);
3308
3309 /* Handle tracepoint data collecting. This may overflow the
3310 trace buffer, and cause a tracing stop, removing
3311 breakpoints. */
3312 trace_event = handle_tracepoints (event_child);
3313
3314 if (bp_explains_trap)
3315 {
3316 if (debug_threads)
3317 debug_printf ("Hit a gdbserver breakpoint.\n");
3318 }
3319 }
3320 else
3321 {
3322 /* We have some other signal, possibly a step-over dance was in
3323 progress, and it should be cancelled too. */
3324 step_over_finished = finish_step_over (event_child);
3325 }
3326
3327 /* We have all the data we need. Either report the event to GDB, or
3328 resume threads and keep waiting for more. */
3329
3330 /* If we're collecting a fast tracepoint, finish the collection and
3331 move out of the jump pad before delivering a signal. See
3332 linux_stabilize_threads. */
3333
3334 if (WIFSTOPPED (w)
3335 && WSTOPSIG (w) != SIGTRAP
3336 && supports_fast_tracepoints ()
3337 && agent_loaded_p ())
3338 {
3339 if (debug_threads)
3340 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3341 "to defer or adjust it.\n",
3342 WSTOPSIG (w), lwpid_of (current_thread));
3343
3344 /* Allow debugging the jump pad itself. */
3345 if (current_thread->last_resume_kind != resume_step
3346 && maybe_move_out_of_jump_pad (event_child, &w))
3347 {
3348 enqueue_one_deferred_signal (event_child, &w);
3349
3350 if (debug_threads)
3351 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3352 WSTOPSIG (w), lwpid_of (current_thread));
3353
3354 linux_resume_one_lwp (event_child, 0, 0, NULL);
3355
3356 if (debug_threads)
3357 debug_exit ();
3358 return ignore_event (ourstatus);
3359 }
3360 }
3361
3362 if (event_child->collecting_fast_tracepoint
3363 != fast_tpoint_collect_result::not_collecting)
3364 {
3365 if (debug_threads)
3366 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3367 "Check if we're already there.\n",
3368 lwpid_of (current_thread),
3369 (int) event_child->collecting_fast_tracepoint);
3370
3371 trace_event = 1;
3372
3373 event_child->collecting_fast_tracepoint
3374 = linux_fast_tracepoint_collecting (event_child, NULL);
3375
3376 if (event_child->collecting_fast_tracepoint
3377 != fast_tpoint_collect_result::before_insn)
3378 {
3379 /* No longer need this breakpoint. */
3380 if (event_child->exit_jump_pad_bkpt != NULL)
3381 {
3382 if (debug_threads)
3383 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3384 "stopping all threads momentarily.\n");
3385
3386 /* Other running threads could hit this breakpoint.
3387 We don't handle moribund locations like GDB does,
3388 instead we always pause all threads when removing
3389 breakpoints, so that any step-over or
3390 decr_pc_after_break adjustment is always taken
3391 care of while the breakpoint is still
3392 inserted. */
3393 stop_all_lwps (1, event_child);
3394
3395 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3396 event_child->exit_jump_pad_bkpt = NULL;
3397
3398 unstop_all_lwps (1, event_child);
3399
3400 gdb_assert (event_child->suspended >= 0);
3401 }
3402 }
3403
3404 if (event_child->collecting_fast_tracepoint
3405 == fast_tpoint_collect_result::not_collecting)
3406 {
3407 if (debug_threads)
3408 debug_printf ("fast tracepoint finished "
3409 "collecting successfully.\n");
3410
3411 /* We may have a deferred signal to report. */
3412 if (dequeue_one_deferred_signal (event_child, &w))
3413 {
3414 if (debug_threads)
3415 debug_printf ("dequeued one signal.\n");
3416 }
3417 else
3418 {
3419 if (debug_threads)
3420 debug_printf ("no deferred signals.\n");
3421
3422 if (stabilizing_threads)
3423 {
3424 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3425 ourstatus->value.sig = GDB_SIGNAL_0;
3426
3427 if (debug_threads)
3428 {
3429 debug_printf ("linux_wait_1 ret = %s, stopped "
3430 "while stabilizing threads\n",
3431 target_pid_to_str (ptid_of (current_thread)));
3432 debug_exit ();
3433 }
3434
3435 return ptid_of (current_thread);
3436 }
3437 }
3438 }
3439 }
3440
3441 /* Check whether GDB would be interested in this event. */
3442
3443 /* Check if GDB is interested in this syscall. */
3444 if (WIFSTOPPED (w)
3445 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3446 && !gdb_catch_this_syscall_p (event_child))
3447 {
3448 if (debug_threads)
3449 {
3450 debug_printf ("Ignored syscall for LWP %ld.\n",
3451 lwpid_of (current_thread));
3452 }
3453
3454 linux_resume_one_lwp (event_child, event_child->stepping,
3455 0, NULL);
3456
3457 if (debug_threads)
3458 debug_exit ();
3459 return ignore_event (ourstatus);
3460 }
3461
3462 /* If GDB is not interested in this signal, don't stop other
3463 threads, and don't report it to GDB. Just resume the inferior
3464 right away. We do this for threading-related signals as well as
3465 any that GDB specifically requested we ignore. But never ignore
3466 SIGSTOP if we sent it ourselves, and do not ignore signals when
3467 stepping - they may require special handling to skip the signal
3468 handler. Also never ignore signals that could be caused by a
3469 breakpoint. */
3470 if (WIFSTOPPED (w)
3471 && current_thread->last_resume_kind != resume_step
3472 && (
3473 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3474 (current_process ()->priv->thread_db != NULL
3475 && (WSTOPSIG (w) == __SIGRTMIN
3476 || WSTOPSIG (w) == __SIGRTMIN + 1))
3477 ||
3478 #endif
3479 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3480 && !(WSTOPSIG (w) == SIGSTOP
3481 && current_thread->last_resume_kind == resume_stop)
3482 && !linux_wstatus_maybe_breakpoint (w))))
3483 {
3484 siginfo_t info, *info_p;
3485
3486 if (debug_threads)
3487 debug_printf ("Ignored signal %d for LWP %ld.\n",
3488 WSTOPSIG (w), lwpid_of (current_thread));
3489
3490 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3491 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3492 info_p = &info;
3493 else
3494 info_p = NULL;
3495
3496 if (step_over_finished)
3497 {
3498 /* We cancelled this thread's step-over above. We still
3499 need to unsuspend all other LWPs, and set them back
3500 running again while the signal handler runs. */
3501 unsuspend_all_lwps (event_child);
3502
3503 /* Enqueue the pending signal info so that proceed_all_lwps
3504 doesn't lose it. */
3505 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3506
3507 proceed_all_lwps ();
3508 }
3509 else
3510 {
3511 linux_resume_one_lwp (event_child, event_child->stepping,
3512 WSTOPSIG (w), info_p);
3513 }
3514
3515 if (debug_threads)
3516 debug_exit ();
3517
3518 return ignore_event (ourstatus);
3519 }
3520
3521 /* Note that all addresses are always "out of the step range" when
3522 there's no range to begin with. */
3523 in_step_range = lwp_in_step_range (event_child);
3524
3525 /* If GDB wanted this thread to single step, and the thread is out
3526 of the step range, we always want to report the SIGTRAP, and let
3527 GDB handle it. Watchpoints should always be reported. So should
3528 signals we can't explain. A SIGTRAP we can't explain could be a
3529 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3530 do, we're be able to handle GDB breakpoints on top of internal
3531 breakpoints, by handling the internal breakpoint and still
3532 reporting the event to GDB. If we don't, we're out of luck, GDB
3533 won't see the breakpoint hit. If we see a single-step event but
3534 the thread should be continuing, don't pass the trap to gdb.
3535 That indicates that we had previously finished a single-step but
3536 left the single-step pending -- see
3537 complete_ongoing_step_over. */
3538 report_to_gdb = (!maybe_internal_trap
3539 || (current_thread->last_resume_kind == resume_step
3540 && !in_step_range)
3541 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3542 || (!in_step_range
3543 && !bp_explains_trap
3544 && !trace_event
3545 && !step_over_finished
3546 && !(current_thread->last_resume_kind == resume_continue
3547 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3548 || (gdb_breakpoint_here (event_child->stop_pc)
3549 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3550 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3551 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3552
3553 run_breakpoint_commands (event_child->stop_pc);
3554
3555 /* We found no reason GDB would want us to stop. We either hit one
3556 of our own breakpoints, or finished an internal step GDB
3557 shouldn't know about. */
3558 if (!report_to_gdb)
3559 {
3560 if (debug_threads)
3561 {
3562 if (bp_explains_trap)
3563 debug_printf ("Hit a gdbserver breakpoint.\n");
3564 if (step_over_finished)
3565 debug_printf ("Step-over finished.\n");
3566 if (trace_event)
3567 debug_printf ("Tracepoint event.\n");
3568 if (lwp_in_step_range (event_child))
3569 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3570 paddress (event_child->stop_pc),
3571 paddress (event_child->step_range_start),
3572 paddress (event_child->step_range_end));
3573 }
3574
3575 /* We're not reporting this breakpoint to GDB, so apply the
3576 decr_pc_after_break adjustment to the inferior's regcache
3577 ourselves. */
3578
3579 if (the_low_target.set_pc != NULL)
3580 {
3581 struct regcache *regcache
3582 = get_thread_regcache (current_thread, 1);
3583 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3584 }
3585
3586 if (step_over_finished)
3587 {
3588 /* If we have finished stepping over a breakpoint, we've
3589 stopped and suspended all LWPs momentarily except the
3590 stepping one. This is where we resume them all again.
3591 We're going to keep waiting, so use proceed, which
3592 handles stepping over the next breakpoint. */
3593 unsuspend_all_lwps (event_child);
3594 }
3595 else
3596 {
3597 /* Remove the single-step breakpoints if any. Note that
3598 there isn't single-step breakpoint if we finished stepping
3599 over. */
3600 if (can_software_single_step ()
3601 && has_single_step_breakpoints (current_thread))
3602 {
3603 stop_all_lwps (0, event_child);
3604 delete_single_step_breakpoints (current_thread);
3605 unstop_all_lwps (0, event_child);
3606 }
3607 }
3608
3609 if (debug_threads)
3610 debug_printf ("proceeding all threads.\n");
3611 proceed_all_lwps ();
3612
3613 if (debug_threads)
3614 debug_exit ();
3615
3616 return ignore_event (ourstatus);
3617 }
3618
3619 if (debug_threads)
3620 {
3621 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3622 {
3623 std::string str
3624 = target_waitstatus_to_string (&event_child->waitstatus);
3625
3626 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3627 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3628 }
3629 if (current_thread->last_resume_kind == resume_step)
3630 {
3631 if (event_child->step_range_start == event_child->step_range_end)
3632 debug_printf ("GDB wanted to single-step, reporting event.\n");
3633 else if (!lwp_in_step_range (event_child))
3634 debug_printf ("Out of step range, reporting event.\n");
3635 }
3636 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3637 debug_printf ("Stopped by watchpoint.\n");
3638 else if (gdb_breakpoint_here (event_child->stop_pc))
3639 debug_printf ("Stopped by GDB breakpoint.\n");
3640 if (debug_threads)
3641 debug_printf ("Hit a non-gdbserver trap event.\n");
3642 }
3643
3644 /* Alright, we're going to report a stop. */
3645
3646 /* Remove single-step breakpoints. */
3647 if (can_software_single_step ())
3648 {
3649 /* Remove single-step breakpoints or not. It it is true, stop all
3650 lwps, so that other threads won't hit the breakpoint in the
3651 staled memory. */
3652 int remove_single_step_breakpoints_p = 0;
3653
3654 if (non_stop)
3655 {
3656 remove_single_step_breakpoints_p
3657 = has_single_step_breakpoints (current_thread);
3658 }
3659 else
3660 {
3661 /* In all-stop, a stop reply cancels all previous resume
3662 requests. Delete all single-step breakpoints. */
3663
3664 find_thread ([&] (thread_info *thread) {
3665 if (has_single_step_breakpoints (thread))
3666 {
3667 remove_single_step_breakpoints_p = 1;
3668 return true;
3669 }
3670
3671 return false;
3672 });
3673 }
3674
3675 if (remove_single_step_breakpoints_p)
3676 {
3677 /* If we remove single-step breakpoints from memory, stop all lwps,
3678 so that other threads won't hit the breakpoint in the staled
3679 memory. */
3680 stop_all_lwps (0, event_child);
3681
3682 if (non_stop)
3683 {
3684 gdb_assert (has_single_step_breakpoints (current_thread));
3685 delete_single_step_breakpoints (current_thread);
3686 }
3687 else
3688 {
3689 for_each_thread ([] (thread_info *thread){
3690 if (has_single_step_breakpoints (thread))
3691 delete_single_step_breakpoints (thread);
3692 });
3693 }
3694
3695 unstop_all_lwps (0, event_child);
3696 }
3697 }
3698
3699 if (!stabilizing_threads)
3700 {
3701 /* In all-stop, stop all threads. */
3702 if (!non_stop)
3703 stop_all_lwps (0, NULL);
3704
3705 if (step_over_finished)
3706 {
3707 if (!non_stop)
3708 {
3709 /* If we were doing a step-over, all other threads but
3710 the stepping one had been paused in start_step_over,
3711 with their suspend counts incremented. We don't want
3712 to do a full unstop/unpause, because we're in
3713 all-stop mode (so we want threads stopped), but we
3714 still need to unsuspend the other threads, to
3715 decrement their `suspended' count back. */
3716 unsuspend_all_lwps (event_child);
3717 }
3718 else
3719 {
3720 /* If we just finished a step-over, then all threads had
3721 been momentarily paused. In all-stop, that's fine,
3722 we want threads stopped by now anyway. In non-stop,
3723 we need to re-resume threads that GDB wanted to be
3724 running. */
3725 unstop_all_lwps (1, event_child);
3726 }
3727 }
3728
3729 /* If we're not waiting for a specific LWP, choose an event LWP
3730 from among those that have had events. Giving equal priority
3731 to all LWPs that have had events helps prevent
3732 starvation. */
3733 if (ptid == minus_one_ptid)
3734 {
3735 event_child->status_pending_p = 1;
3736 event_child->status_pending = w;
3737
3738 select_event_lwp (&event_child);
3739
3740 /* current_thread and event_child must stay in sync. */
3741 current_thread = get_lwp_thread (event_child);
3742
3743 event_child->status_pending_p = 0;
3744 w = event_child->status_pending;
3745 }
3746
3747
3748 /* Stabilize threads (move out of jump pads). */
3749 if (!non_stop)
3750 stabilize_threads ();
3751 }
3752 else
3753 {
3754 /* If we just finished a step-over, then all threads had been
3755 momentarily paused. In all-stop, that's fine, we want
3756 threads stopped by now anyway. In non-stop, we need to
3757 re-resume threads that GDB wanted to be running. */
3758 if (step_over_finished)
3759 unstop_all_lwps (1, event_child);
3760 }
3761
3762 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3763 {
3764 /* If the reported event is an exit, fork, vfork or exec, let
3765 GDB know. */
3766
3767 /* Break the unreported fork relationship chain. */
3768 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3769 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3770 {
3771 event_child->fork_relative->fork_relative = NULL;
3772 event_child->fork_relative = NULL;
3773 }
3774
3775 *ourstatus = event_child->waitstatus;
3776 /* Clear the event lwp's waitstatus since we handled it already. */
3777 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3778 }
3779 else
3780 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3781
3782 /* Now that we've selected our final event LWP, un-adjust its PC if
3783 it was a software breakpoint, and the client doesn't know we can
3784 adjust the breakpoint ourselves. */
3785 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3786 && !cs.swbreak_feature)
3787 {
3788 int decr_pc = the_low_target.decr_pc_after_break;
3789
3790 if (decr_pc != 0)
3791 {
3792 struct regcache *regcache
3793 = get_thread_regcache (current_thread, 1);
3794 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3795 }
3796 }
3797
3798 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3799 {
3800 get_syscall_trapinfo (event_child,
3801 &ourstatus->value.syscall_number);
3802 ourstatus->kind = event_child->syscall_state;
3803 }
3804 else if (current_thread->last_resume_kind == resume_stop
3805 && WSTOPSIG (w) == SIGSTOP)
3806 {
3807 /* A thread that has been requested to stop by GDB with vCont;t,
3808 and it stopped cleanly, so report as SIG0. The use of
3809 SIGSTOP is an implementation detail. */
3810 ourstatus->value.sig = GDB_SIGNAL_0;
3811 }
3812 else if (current_thread->last_resume_kind == resume_stop
3813 && WSTOPSIG (w) != SIGSTOP)
3814 {
3815 /* A thread that has been requested to stop by GDB with vCont;t,
3816 but, it stopped for other reasons. */
3817 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3818 }
3819 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3820 {
3821 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3822 }
3823
3824 gdb_assert (step_over_bkpt == null_ptid);
3825
3826 if (debug_threads)
3827 {
3828 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3829 target_pid_to_str (ptid_of (current_thread)),
3830 ourstatus->kind, ourstatus->value.sig);
3831 debug_exit ();
3832 }
3833
3834 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3835 return filter_exit_event (event_child, ourstatus);
3836
3837 return ptid_of (current_thread);
3838 }
3839
3840 /* Get rid of any pending event in the pipe. */
3841 static void
3842 async_file_flush (void)
3843 {
3844 int ret;
3845 char buf;
3846
3847 do
3848 ret = read (linux_event_pipe[0], &buf, 1);
3849 while (ret >= 0 || (ret == -1 && errno == EINTR));
3850 }
3851
3852 /* Put something in the pipe, so the event loop wakes up. */
3853 static void
3854 async_file_mark (void)
3855 {
3856 int ret;
3857
3858 async_file_flush ();
3859
3860 do
3861 ret = write (linux_event_pipe[1], "+", 1);
3862 while (ret == 0 || (ret == -1 && errno == EINTR));
3863
3864 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3865 be awakened anyway. */
3866 }
3867
3868 static ptid_t
3869 linux_wait (ptid_t ptid,
3870 struct target_waitstatus *ourstatus, int target_options)
3871 {
3872 ptid_t event_ptid;
3873
3874 /* Flush the async file first. */
3875 if (target_is_async_p ())
3876 async_file_flush ();
3877
3878 do
3879 {
3880 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3881 }
3882 while ((target_options & TARGET_WNOHANG) == 0
3883 && event_ptid == null_ptid
3884 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3885
3886 /* If at least one stop was reported, there may be more. A single
3887 SIGCHLD can signal more than one child stop. */
3888 if (target_is_async_p ()
3889 && (target_options & TARGET_WNOHANG) != 0
3890 && event_ptid != null_ptid)
3891 async_file_mark ();
3892
3893 return event_ptid;
3894 }
3895
3896 /* Send a signal to an LWP. */
3897
3898 static int
3899 kill_lwp (unsigned long lwpid, int signo)
3900 {
3901 int ret;
3902
3903 errno = 0;
3904 ret = syscall (__NR_tkill, lwpid, signo);
3905 if (errno == ENOSYS)
3906 {
3907 /* If tkill fails, then we are not using nptl threads, a
3908 configuration we no longer support. */
3909 perror_with_name (("tkill"));
3910 }
3911 return ret;
3912 }
3913
3914 void
3915 linux_stop_lwp (struct lwp_info *lwp)
3916 {
3917 send_sigstop (lwp);
3918 }
3919
3920 static void
3921 send_sigstop (struct lwp_info *lwp)
3922 {
3923 int pid;
3924
3925 pid = lwpid_of (get_lwp_thread (lwp));
3926
3927 /* If we already have a pending stop signal for this process, don't
3928 send another. */
3929 if (lwp->stop_expected)
3930 {
3931 if (debug_threads)
3932 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3933
3934 return;
3935 }
3936
3937 if (debug_threads)
3938 debug_printf ("Sending sigstop to lwp %d\n", pid);
3939
3940 lwp->stop_expected = 1;
3941 kill_lwp (pid, SIGSTOP);
3942 }
3943
3944 static void
3945 send_sigstop (thread_info *thread, lwp_info *except)
3946 {
3947 struct lwp_info *lwp = get_thread_lwp (thread);
3948
3949 /* Ignore EXCEPT. */
3950 if (lwp == except)
3951 return;
3952
3953 if (lwp->stopped)
3954 return;
3955
3956 send_sigstop (lwp);
3957 }
3958
3959 /* Increment the suspend count of an LWP, and stop it, if not stopped
3960 yet. */
3961 static void
3962 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3963 {
3964 struct lwp_info *lwp = get_thread_lwp (thread);
3965
3966 /* Ignore EXCEPT. */
3967 if (lwp == except)
3968 return;
3969
3970 lwp_suspended_inc (lwp);
3971
3972 send_sigstop (thread, except);
3973 }
3974
3975 static void
3976 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3977 {
3978 /* Store the exit status for later. */
3979 lwp->status_pending_p = 1;
3980 lwp->status_pending = wstat;
3981
3982 /* Store in waitstatus as well, as there's nothing else to process
3983 for this event. */
3984 if (WIFEXITED (wstat))
3985 {
3986 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3987 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3988 }
3989 else if (WIFSIGNALED (wstat))
3990 {
3991 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3992 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3993 }
3994
3995 /* Prevent trying to stop it. */
3996 lwp->stopped = 1;
3997
3998 /* No further stops are expected from a dead lwp. */
3999 lwp->stop_expected = 0;
4000 }
4001
4002 /* Return true if LWP has exited already, and has a pending exit event
4003 to report to GDB. */
4004
4005 static int
4006 lwp_is_marked_dead (struct lwp_info *lwp)
4007 {
4008 return (lwp->status_pending_p
4009 && (WIFEXITED (lwp->status_pending)
4010 || WIFSIGNALED (lwp->status_pending)));
4011 }
4012
4013 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4014
4015 static void
4016 wait_for_sigstop (void)
4017 {
4018 struct thread_info *saved_thread;
4019 ptid_t saved_tid;
4020 int wstat;
4021 int ret;
4022
4023 saved_thread = current_thread;
4024 if (saved_thread != NULL)
4025 saved_tid = saved_thread->id;
4026 else
4027 saved_tid = null_ptid; /* avoid bogus unused warning */
4028
4029 if (debug_threads)
4030 debug_printf ("wait_for_sigstop: pulling events\n");
4031
4032 /* Passing NULL_PTID as filter indicates we want all events to be
4033 left pending. Eventually this returns when there are no
4034 unwaited-for children left. */
4035 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4036 &wstat, __WALL);
4037 gdb_assert (ret == -1);
4038
4039 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4040 current_thread = saved_thread;
4041 else
4042 {
4043 if (debug_threads)
4044 debug_printf ("Previously current thread died.\n");
4045
4046 /* We can't change the current inferior behind GDB's back,
4047 otherwise, a subsequent command may apply to the wrong
4048 process. */
4049 current_thread = NULL;
4050 }
4051 }
4052
4053 /* Returns true if THREAD is stopped in a jump pad, and we can't
4054 move it out, because we need to report the stop event to GDB. For
4055 example, if the user puts a breakpoint in the jump pad, it's
4056 because she wants to debug it. */
4057
4058 static bool
4059 stuck_in_jump_pad_callback (thread_info *thread)
4060 {
4061 struct lwp_info *lwp = get_thread_lwp (thread);
4062
4063 if (lwp->suspended != 0)
4064 {
4065 internal_error (__FILE__, __LINE__,
4066 "LWP %ld is suspended, suspended=%d\n",
4067 lwpid_of (thread), lwp->suspended);
4068 }
4069 gdb_assert (lwp->stopped);
4070
4071 /* Allow debugging the jump pad, gdb_collect, etc.. */
4072 return (supports_fast_tracepoints ()
4073 && agent_loaded_p ()
4074 && (gdb_breakpoint_here (lwp->stop_pc)
4075 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4076 || thread->last_resume_kind == resume_step)
4077 && (linux_fast_tracepoint_collecting (lwp, NULL)
4078 != fast_tpoint_collect_result::not_collecting));
4079 }
4080
4081 static void
4082 move_out_of_jump_pad_callback (thread_info *thread)
4083 {
4084 struct thread_info *saved_thread;
4085 struct lwp_info *lwp = get_thread_lwp (thread);
4086 int *wstat;
4087
4088 if (lwp->suspended != 0)
4089 {
4090 internal_error (__FILE__, __LINE__,
4091 "LWP %ld is suspended, suspended=%d\n",
4092 lwpid_of (thread), lwp->suspended);
4093 }
4094 gdb_assert (lwp->stopped);
4095
4096 /* For gdb_breakpoint_here. */
4097 saved_thread = current_thread;
4098 current_thread = thread;
4099
4100 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4101
4102 /* Allow debugging the jump pad, gdb_collect, etc. */
4103 if (!gdb_breakpoint_here (lwp->stop_pc)
4104 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4105 && thread->last_resume_kind != resume_step
4106 && maybe_move_out_of_jump_pad (lwp, wstat))
4107 {
4108 if (debug_threads)
4109 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4110 lwpid_of (thread));
4111
4112 if (wstat)
4113 {
4114 lwp->status_pending_p = 0;
4115 enqueue_one_deferred_signal (lwp, wstat);
4116
4117 if (debug_threads)
4118 debug_printf ("Signal %d for LWP %ld deferred "
4119 "(in jump pad)\n",
4120 WSTOPSIG (*wstat), lwpid_of (thread));
4121 }
4122
4123 linux_resume_one_lwp (lwp, 0, 0, NULL);
4124 }
4125 else
4126 lwp_suspended_inc (lwp);
4127
4128 current_thread = saved_thread;
4129 }
4130
4131 static bool
4132 lwp_running (thread_info *thread)
4133 {
4134 struct lwp_info *lwp = get_thread_lwp (thread);
4135
4136 if (lwp_is_marked_dead (lwp))
4137 return false;
4138
4139 return !lwp->stopped;
4140 }
4141
4142 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4143 If SUSPEND, then also increase the suspend count of every LWP,
4144 except EXCEPT. */
4145
4146 static void
4147 stop_all_lwps (int suspend, struct lwp_info *except)
4148 {
4149 /* Should not be called recursively. */
4150 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4151
4152 if (debug_threads)
4153 {
4154 debug_enter ();
4155 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4156 suspend ? "stop-and-suspend" : "stop",
4157 except != NULL
4158 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4159 : "none");
4160 }
4161
4162 stopping_threads = (suspend
4163 ? STOPPING_AND_SUSPENDING_THREADS
4164 : STOPPING_THREADS);
4165
4166 if (suspend)
4167 for_each_thread ([&] (thread_info *thread)
4168 {
4169 suspend_and_send_sigstop (thread, except);
4170 });
4171 else
4172 for_each_thread ([&] (thread_info *thread)
4173 {
4174 send_sigstop (thread, except);
4175 });
4176
4177 wait_for_sigstop ();
4178 stopping_threads = NOT_STOPPING_THREADS;
4179
4180 if (debug_threads)
4181 {
4182 debug_printf ("stop_all_lwps done, setting stopping_threads "
4183 "back to !stopping\n");
4184 debug_exit ();
4185 }
4186 }
4187
4188 /* Enqueue one signal in the chain of signals which need to be
4189 delivered to this process on next resume. */
4190
4191 static void
4192 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4193 {
4194 struct pending_signals *p_sig = XNEW (struct pending_signals);
4195
4196 p_sig->prev = lwp->pending_signals;
4197 p_sig->signal = signal;
4198 if (info == NULL)
4199 memset (&p_sig->info, 0, sizeof (siginfo_t));
4200 else
4201 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4202 lwp->pending_signals = p_sig;
4203 }
4204
4205 /* Install breakpoints for software single stepping. */
4206
4207 static void
4208 install_software_single_step_breakpoints (struct lwp_info *lwp)
4209 {
4210 struct thread_info *thread = get_lwp_thread (lwp);
4211 struct regcache *regcache = get_thread_regcache (thread, 1);
4212
4213 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4214
4215 current_thread = thread;
4216 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4217
4218 for (CORE_ADDR pc : next_pcs)
4219 set_single_step_breakpoint (pc, current_ptid);
4220 }
4221
4222 /* Single step via hardware or software single step.
4223 Return 1 if hardware single stepping, 0 if software single stepping
4224 or can't single step. */
4225
4226 static int
4227 single_step (struct lwp_info* lwp)
4228 {
4229 int step = 0;
4230
4231 if (can_hardware_single_step ())
4232 {
4233 step = 1;
4234 }
4235 else if (can_software_single_step ())
4236 {
4237 install_software_single_step_breakpoints (lwp);
4238 step = 0;
4239 }
4240 else
4241 {
4242 if (debug_threads)
4243 debug_printf ("stepping is not implemented on this target");
4244 }
4245
4246 return step;
4247 }
4248
4249 /* The signal can be delivered to the inferior if we are not trying to
4250 finish a fast tracepoint collect. Since signal can be delivered in
4251 the step-over, the program may go to signal handler and trap again
4252 after return from the signal handler. We can live with the spurious
4253 double traps. */
4254
4255 static int
4256 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4257 {
4258 return (lwp->collecting_fast_tracepoint
4259 == fast_tpoint_collect_result::not_collecting);
4260 }
4261
4262 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4263 SIGNAL is nonzero, give it that signal. */
4264
4265 static void
4266 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4267 int step, int signal, siginfo_t *info)
4268 {
4269 struct thread_info *thread = get_lwp_thread (lwp);
4270 struct thread_info *saved_thread;
4271 int ptrace_request;
4272 struct process_info *proc = get_thread_process (thread);
4273
4274 /* Note that target description may not be initialised
4275 (proc->tdesc == NULL) at this point because the program hasn't
4276 stopped at the first instruction yet. It means GDBserver skips
4277 the extra traps from the wrapper program (see option --wrapper).
4278 Code in this function that requires register access should be
4279 guarded by proc->tdesc == NULL or something else. */
4280
4281 if (lwp->stopped == 0)
4282 return;
4283
4284 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4285
4286 fast_tpoint_collect_result fast_tp_collecting
4287 = lwp->collecting_fast_tracepoint;
4288
4289 gdb_assert (!stabilizing_threads
4290 || (fast_tp_collecting
4291 != fast_tpoint_collect_result::not_collecting));
4292
4293 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4294 user used the "jump" command, or "set $pc = foo"). */
4295 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4296 {
4297 /* Collecting 'while-stepping' actions doesn't make sense
4298 anymore. */
4299 release_while_stepping_state_list (thread);
4300 }
4301
4302 /* If we have pending signals or status, and a new signal, enqueue the
4303 signal. Also enqueue the signal if it can't be delivered to the
4304 inferior right now. */
4305 if (signal != 0
4306 && (lwp->status_pending_p
4307 || lwp->pending_signals != NULL
4308 || !lwp_signal_can_be_delivered (lwp)))
4309 {
4310 enqueue_pending_signal (lwp, signal, info);
4311
4312 /* Postpone any pending signal. It was enqueued above. */
4313 signal = 0;
4314 }
4315
4316 if (lwp->status_pending_p)
4317 {
4318 if (debug_threads)
4319 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4320 " has pending status\n",
4321 lwpid_of (thread), step ? "step" : "continue",
4322 lwp->stop_expected ? "expected" : "not expected");
4323 return;
4324 }
4325
4326 saved_thread = current_thread;
4327 current_thread = thread;
4328
4329 /* This bit needs some thinking about. If we get a signal that
4330 we must report while a single-step reinsert is still pending,
4331 we often end up resuming the thread. It might be better to
4332 (ew) allow a stack of pending events; then we could be sure that
4333 the reinsert happened right away and not lose any signals.
4334
4335 Making this stack would also shrink the window in which breakpoints are
4336 uninserted (see comment in linux_wait_for_lwp) but not enough for
4337 complete correctness, so it won't solve that problem. It may be
4338 worthwhile just to solve this one, however. */
4339 if (lwp->bp_reinsert != 0)
4340 {
4341 if (debug_threads)
4342 debug_printf (" pending reinsert at 0x%s\n",
4343 paddress (lwp->bp_reinsert));
4344
4345 if (can_hardware_single_step ())
4346 {
4347 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4348 {
4349 if (step == 0)
4350 warning ("BAD - reinserting but not stepping.");
4351 if (lwp->suspended)
4352 warning ("BAD - reinserting and suspended(%d).",
4353 lwp->suspended);
4354 }
4355 }
4356
4357 step = maybe_hw_step (thread);
4358 }
4359
4360 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4361 {
4362 if (debug_threads)
4363 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4364 " (exit-jump-pad-bkpt)\n",
4365 lwpid_of (thread));
4366 }
4367 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4368 {
4369 if (debug_threads)
4370 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4371 " single-stepping\n",
4372 lwpid_of (thread));
4373
4374 if (can_hardware_single_step ())
4375 step = 1;
4376 else
4377 {
4378 internal_error (__FILE__, __LINE__,
4379 "moving out of jump pad single-stepping"
4380 " not implemented on this target");
4381 }
4382 }
4383
4384 /* If we have while-stepping actions in this thread set it stepping.
4385 If we have a signal to deliver, it may or may not be set to
4386 SIG_IGN, we don't know. Assume so, and allow collecting
4387 while-stepping into a signal handler. A possible smart thing to
4388 do would be to set an internal breakpoint at the signal return
4389 address, continue, and carry on catching this while-stepping
4390 action only when that breakpoint is hit. A future
4391 enhancement. */
4392 if (thread->while_stepping != NULL)
4393 {
4394 if (debug_threads)
4395 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4396 lwpid_of (thread));
4397
4398 step = single_step (lwp);
4399 }
4400
4401 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4402 {
4403 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4404
4405 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4406
4407 if (debug_threads)
4408 {
4409 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4410 (long) lwp->stop_pc);
4411 }
4412 }
4413
4414 /* If we have pending signals, consume one if it can be delivered to
4415 the inferior. */
4416 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4417 {
4418 struct pending_signals **p_sig;
4419
4420 p_sig = &lwp->pending_signals;
4421 while ((*p_sig)->prev != NULL)
4422 p_sig = &(*p_sig)->prev;
4423
4424 signal = (*p_sig)->signal;
4425 if ((*p_sig)->info.si_signo != 0)
4426 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4427 &(*p_sig)->info);
4428
4429 free (*p_sig);
4430 *p_sig = NULL;
4431 }
4432
4433 if (debug_threads)
4434 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4435 lwpid_of (thread), step ? "step" : "continue", signal,
4436 lwp->stop_expected ? "expected" : "not expected");
4437
4438 if (the_low_target.prepare_to_resume != NULL)
4439 the_low_target.prepare_to_resume (lwp);
4440
4441 regcache_invalidate_thread (thread);
4442 errno = 0;
4443 lwp->stepping = step;
4444 if (step)
4445 ptrace_request = PTRACE_SINGLESTEP;
4446 else if (gdb_catching_syscalls_p (lwp))
4447 ptrace_request = PTRACE_SYSCALL;
4448 else
4449 ptrace_request = PTRACE_CONT;
4450 ptrace (ptrace_request,
4451 lwpid_of (thread),
4452 (PTRACE_TYPE_ARG3) 0,
4453 /* Coerce to a uintptr_t first to avoid potential gcc warning
4454 of coercing an 8 byte integer to a 4 byte pointer. */
4455 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4456
4457 current_thread = saved_thread;
4458 if (errno)
4459 perror_with_name ("resuming thread");
4460
4461 /* Successfully resumed. Clear state that no longer makes sense,
4462 and mark the LWP as running. Must not do this before resuming
4463 otherwise if that fails other code will be confused. E.g., we'd
4464 later try to stop the LWP and hang forever waiting for a stop
4465 status. Note that we must not throw after this is cleared,
4466 otherwise handle_zombie_lwp_error would get confused. */
4467 lwp->stopped = 0;
4468 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4469 }
4470
4471 /* Called when we try to resume a stopped LWP and that errors out. If
4472 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4473 or about to become), discard the error, clear any pending status
4474 the LWP may have, and return true (we'll collect the exit status
4475 soon enough). Otherwise, return false. */
4476
4477 static int
4478 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4479 {
4480 struct thread_info *thread = get_lwp_thread (lp);
4481
4482 /* If we get an error after resuming the LWP successfully, we'd
4483 confuse !T state for the LWP being gone. */
4484 gdb_assert (lp->stopped);
4485
4486 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4487 because even if ptrace failed with ESRCH, the tracee may be "not
4488 yet fully dead", but already refusing ptrace requests. In that
4489 case the tracee has 'R (Running)' state for a little bit
4490 (observed in Linux 3.18). See also the note on ESRCH in the
4491 ptrace(2) man page. Instead, check whether the LWP has any state
4492 other than ptrace-stopped. */
4493
4494 /* Don't assume anything if /proc/PID/status can't be read. */
4495 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4496 {
4497 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4498 lp->status_pending_p = 0;
4499 return 1;
4500 }
4501 return 0;
4502 }
4503
4504 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4505 disappears while we try to resume it. */
4506
4507 static void
4508 linux_resume_one_lwp (struct lwp_info *lwp,
4509 int step, int signal, siginfo_t *info)
4510 {
4511 TRY
4512 {
4513 linux_resume_one_lwp_throw (lwp, step, signal, info);
4514 }
4515 CATCH (ex, RETURN_MASK_ERROR)
4516 {
4517 if (!check_ptrace_stopped_lwp_gone (lwp))
4518 throw_exception (ex);
4519 }
4520 END_CATCH
4521 }
4522
4523 /* This function is called once per thread via for_each_thread.
4524 We look up which resume request applies to THREAD and mark it with a
4525 pointer to the appropriate resume request.
4526
4527 This algorithm is O(threads * resume elements), but resume elements
4528 is small (and will remain small at least until GDB supports thread
4529 suspension). */
4530
4531 static void
4532 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4533 {
4534 struct lwp_info *lwp = get_thread_lwp (thread);
4535
4536 for (int ndx = 0; ndx < n; ndx++)
4537 {
4538 ptid_t ptid = resume[ndx].thread;
4539 if (ptid == minus_one_ptid
4540 || ptid == thread->id
4541 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4542 of PID'. */
4543 || (ptid.pid () == pid_of (thread)
4544 && (ptid.is_pid ()
4545 || ptid.lwp () == -1)))
4546 {
4547 if (resume[ndx].kind == resume_stop
4548 && thread->last_resume_kind == resume_stop)
4549 {
4550 if (debug_threads)
4551 debug_printf ("already %s LWP %ld at GDB's request\n",
4552 (thread->last_status.kind
4553 == TARGET_WAITKIND_STOPPED)
4554 ? "stopped"
4555 : "stopping",
4556 lwpid_of (thread));
4557
4558 continue;
4559 }
4560
4561 /* Ignore (wildcard) resume requests for already-resumed
4562 threads. */
4563 if (resume[ndx].kind != resume_stop
4564 && thread->last_resume_kind != resume_stop)
4565 {
4566 if (debug_threads)
4567 debug_printf ("already %s LWP %ld at GDB's request\n",
4568 (thread->last_resume_kind
4569 == resume_step)
4570 ? "stepping"
4571 : "continuing",
4572 lwpid_of (thread));
4573 continue;
4574 }
4575
4576 /* Don't let wildcard resumes resume fork children that GDB
4577 does not yet know are new fork children. */
4578 if (lwp->fork_relative != NULL)
4579 {
4580 struct lwp_info *rel = lwp->fork_relative;
4581
4582 if (rel->status_pending_p
4583 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4584 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4585 {
4586 if (debug_threads)
4587 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4588 lwpid_of (thread));
4589 continue;
4590 }
4591 }
4592
4593 /* If the thread has a pending event that has already been
4594 reported to GDBserver core, but GDB has not pulled the
4595 event out of the vStopped queue yet, likewise, ignore the
4596 (wildcard) resume request. */
4597 if (in_queued_stop_replies (thread->id))
4598 {
4599 if (debug_threads)
4600 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4601 lwpid_of (thread));
4602 continue;
4603 }
4604
4605 lwp->resume = &resume[ndx];
4606 thread->last_resume_kind = lwp->resume->kind;
4607
4608 lwp->step_range_start = lwp->resume->step_range_start;
4609 lwp->step_range_end = lwp->resume->step_range_end;
4610
4611 /* If we had a deferred signal to report, dequeue one now.
4612 This can happen if LWP gets more than one signal while
4613 trying to get out of a jump pad. */
4614 if (lwp->stopped
4615 && !lwp->status_pending_p
4616 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4617 {
4618 lwp->status_pending_p = 1;
4619
4620 if (debug_threads)
4621 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4622 "leaving status pending.\n",
4623 WSTOPSIG (lwp->status_pending),
4624 lwpid_of (thread));
4625 }
4626
4627 return;
4628 }
4629 }
4630
4631 /* No resume action for this thread. */
4632 lwp->resume = NULL;
4633 }
4634
4635 /* find_thread callback for linux_resume. Return true if this lwp has an
4636 interesting status pending. */
4637
4638 static bool
4639 resume_status_pending_p (thread_info *thread)
4640 {
4641 struct lwp_info *lwp = get_thread_lwp (thread);
4642
4643 /* LWPs which will not be resumed are not interesting, because
4644 we might not wait for them next time through linux_wait. */
4645 if (lwp->resume == NULL)
4646 return false;
4647
4648 return thread_still_has_status_pending_p (thread);
4649 }
4650
4651 /* Return 1 if this lwp that GDB wants running is stopped at an
4652 internal breakpoint that we need to step over. It assumes that any
4653 required STOP_PC adjustment has already been propagated to the
4654 inferior's regcache. */
4655
4656 static bool
4657 need_step_over_p (thread_info *thread)
4658 {
4659 struct lwp_info *lwp = get_thread_lwp (thread);
4660 struct thread_info *saved_thread;
4661 CORE_ADDR pc;
4662 struct process_info *proc = get_thread_process (thread);
4663
4664 /* GDBserver is skipping the extra traps from the wrapper program,
4665 don't have to do step over. */
4666 if (proc->tdesc == NULL)
4667 return false;
4668
4669 /* LWPs which will not be resumed are not interesting, because we
4670 might not wait for them next time through linux_wait. */
4671
4672 if (!lwp->stopped)
4673 {
4674 if (debug_threads)
4675 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4676 lwpid_of (thread));
4677 return false;
4678 }
4679
4680 if (thread->last_resume_kind == resume_stop)
4681 {
4682 if (debug_threads)
4683 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4684 " stopped\n",
4685 lwpid_of (thread));
4686 return false;
4687 }
4688
4689 gdb_assert (lwp->suspended >= 0);
4690
4691 if (lwp->suspended)
4692 {
4693 if (debug_threads)
4694 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4695 lwpid_of (thread));
4696 return false;
4697 }
4698
4699 if (lwp->status_pending_p)
4700 {
4701 if (debug_threads)
4702 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4703 " status.\n",
4704 lwpid_of (thread));
4705 return false;
4706 }
4707
4708 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4709 or we have. */
4710 pc = get_pc (lwp);
4711
4712 /* If the PC has changed since we stopped, then don't do anything,
4713 and let the breakpoint/tracepoint be hit. This happens if, for
4714 instance, GDB handled the decr_pc_after_break subtraction itself,
4715 GDB is OOL stepping this thread, or the user has issued a "jump"
4716 command, or poked thread's registers herself. */
4717 if (pc != lwp->stop_pc)
4718 {
4719 if (debug_threads)
4720 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4721 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4722 lwpid_of (thread),
4723 paddress (lwp->stop_pc), paddress (pc));
4724 return false;
4725 }
4726
4727 /* On software single step target, resume the inferior with signal
4728 rather than stepping over. */
4729 if (can_software_single_step ()
4730 && lwp->pending_signals != NULL
4731 && lwp_signal_can_be_delivered (lwp))
4732 {
4733 if (debug_threads)
4734 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4735 " signals.\n",
4736 lwpid_of (thread));
4737
4738 return false;
4739 }
4740
4741 saved_thread = current_thread;
4742 current_thread = thread;
4743
4744 /* We can only step over breakpoints we know about. */
4745 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4746 {
4747 /* Don't step over a breakpoint that GDB expects to hit
4748 though. If the condition is being evaluated on the target's side
4749 and it evaluate to false, step over this breakpoint as well. */
4750 if (gdb_breakpoint_here (pc)
4751 && gdb_condition_true_at_breakpoint (pc)
4752 && gdb_no_commands_at_breakpoint (pc))
4753 {
4754 if (debug_threads)
4755 debug_printf ("Need step over [LWP %ld]? yes, but found"
4756 " GDB breakpoint at 0x%s; skipping step over\n",
4757 lwpid_of (thread), paddress (pc));
4758
4759 current_thread = saved_thread;
4760 return false;
4761 }
4762 else
4763 {
4764 if (debug_threads)
4765 debug_printf ("Need step over [LWP %ld]? yes, "
4766 "found breakpoint at 0x%s\n",
4767 lwpid_of (thread), paddress (pc));
4768
4769 /* We've found an lwp that needs stepping over --- return 1 so
4770 that find_thread stops looking. */
4771 current_thread = saved_thread;
4772
4773 return true;
4774 }
4775 }
4776
4777 current_thread = saved_thread;
4778
4779 if (debug_threads)
4780 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4781 " at 0x%s\n",
4782 lwpid_of (thread), paddress (pc));
4783
4784 return false;
4785 }
4786
4787 /* Start a step-over operation on LWP. When LWP stopped at a
4788 breakpoint, to make progress, we need to remove the breakpoint out
4789 of the way. If we let other threads run while we do that, they may
4790 pass by the breakpoint location and miss hitting it. To avoid
4791 that, a step-over momentarily stops all threads while LWP is
4792 single-stepped by either hardware or software while the breakpoint
4793 is temporarily uninserted from the inferior. When the single-step
4794 finishes, we reinsert the breakpoint, and let all threads that are
4795 supposed to be running, run again. */
4796
4797 static int
4798 start_step_over (struct lwp_info *lwp)
4799 {
4800 struct thread_info *thread = get_lwp_thread (lwp);
4801 struct thread_info *saved_thread;
4802 CORE_ADDR pc;
4803 int step;
4804
4805 if (debug_threads)
4806 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4807 lwpid_of (thread));
4808
4809 stop_all_lwps (1, lwp);
4810
4811 if (lwp->suspended != 0)
4812 {
4813 internal_error (__FILE__, __LINE__,
4814 "LWP %ld suspended=%d\n", lwpid_of (thread),
4815 lwp->suspended);
4816 }
4817
4818 if (debug_threads)
4819 debug_printf ("Done stopping all threads for step-over.\n");
4820
4821 /* Note, we should always reach here with an already adjusted PC,
4822 either by GDB (if we're resuming due to GDB's request), or by our
4823 caller, if we just finished handling an internal breakpoint GDB
4824 shouldn't care about. */
4825 pc = get_pc (lwp);
4826
4827 saved_thread = current_thread;
4828 current_thread = thread;
4829
4830 lwp->bp_reinsert = pc;
4831 uninsert_breakpoints_at (pc);
4832 uninsert_fast_tracepoint_jumps_at (pc);
4833
4834 step = single_step (lwp);
4835
4836 current_thread = saved_thread;
4837
4838 linux_resume_one_lwp (lwp, step, 0, NULL);
4839
4840 /* Require next event from this LWP. */
4841 step_over_bkpt = thread->id;
4842 return 1;
4843 }
4844
4845 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4846 start_step_over, if still there, and delete any single-step
4847 breakpoints we've set, on non hardware single-step targets. */
4848
4849 static int
4850 finish_step_over (struct lwp_info *lwp)
4851 {
4852 if (lwp->bp_reinsert != 0)
4853 {
4854 struct thread_info *saved_thread = current_thread;
4855
4856 if (debug_threads)
4857 debug_printf ("Finished step over.\n");
4858
4859 current_thread = get_lwp_thread (lwp);
4860
4861 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4862 may be no breakpoint to reinsert there by now. */
4863 reinsert_breakpoints_at (lwp->bp_reinsert);
4864 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4865
4866 lwp->bp_reinsert = 0;
4867
4868 /* Delete any single-step breakpoints. No longer needed. We
4869 don't have to worry about other threads hitting this trap,
4870 and later not being able to explain it, because we were
4871 stepping over a breakpoint, and we hold all threads but
4872 LWP stopped while doing that. */
4873 if (!can_hardware_single_step ())
4874 {
4875 gdb_assert (has_single_step_breakpoints (current_thread));
4876 delete_single_step_breakpoints (current_thread);
4877 }
4878
4879 step_over_bkpt = null_ptid;
4880 current_thread = saved_thread;
4881 return 1;
4882 }
4883 else
4884 return 0;
4885 }
4886
4887 /* If there's a step over in progress, wait until all threads stop
4888 (that is, until the stepping thread finishes its step), and
4889 unsuspend all lwps. The stepping thread ends with its status
4890 pending, which is processed later when we get back to processing
4891 events. */
4892
4893 static void
4894 complete_ongoing_step_over (void)
4895 {
4896 if (step_over_bkpt != null_ptid)
4897 {
4898 struct lwp_info *lwp;
4899 int wstat;
4900 int ret;
4901
4902 if (debug_threads)
4903 debug_printf ("detach: step over in progress, finish it first\n");
4904
4905 /* Passing NULL_PTID as filter indicates we want all events to
4906 be left pending. Eventually this returns when there are no
4907 unwaited-for children left. */
4908 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4909 &wstat, __WALL);
4910 gdb_assert (ret == -1);
4911
4912 lwp = find_lwp_pid (step_over_bkpt);
4913 if (lwp != NULL)
4914 finish_step_over (lwp);
4915 step_over_bkpt = null_ptid;
4916 unsuspend_all_lwps (lwp);
4917 }
4918 }
4919
4920 /* This function is called once per thread. We check the thread's resume
4921 request, which will tell us whether to resume, step, or leave the thread
4922 stopped; and what signal, if any, it should be sent.
4923
4924 For threads which we aren't explicitly told otherwise, we preserve
4925 the stepping flag; this is used for stepping over gdbserver-placed
4926 breakpoints.
4927
4928 If pending_flags was set in any thread, we queue any needed
4929 signals, since we won't actually resume. We already have a pending
4930 event to report, so we don't need to preserve any step requests;
4931 they should be re-issued if necessary. */
4932
4933 static void
4934 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4935 {
4936 struct lwp_info *lwp = get_thread_lwp (thread);
4937 int leave_pending;
4938
4939 if (lwp->resume == NULL)
4940 return;
4941
4942 if (lwp->resume->kind == resume_stop)
4943 {
4944 if (debug_threads)
4945 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4946
4947 if (!lwp->stopped)
4948 {
4949 if (debug_threads)
4950 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4951
4952 /* Stop the thread, and wait for the event asynchronously,
4953 through the event loop. */
4954 send_sigstop (lwp);
4955 }
4956 else
4957 {
4958 if (debug_threads)
4959 debug_printf ("already stopped LWP %ld\n",
4960 lwpid_of (thread));
4961
4962 /* The LWP may have been stopped in an internal event that
4963 was not meant to be notified back to GDB (e.g., gdbserver
4964 breakpoint), so we should be reporting a stop event in
4965 this case too. */
4966
4967 /* If the thread already has a pending SIGSTOP, this is a
4968 no-op. Otherwise, something later will presumably resume
4969 the thread and this will cause it to cancel any pending
4970 operation, due to last_resume_kind == resume_stop. If
4971 the thread already has a pending status to report, we
4972 will still report it the next time we wait - see
4973 status_pending_p_callback. */
4974
4975 /* If we already have a pending signal to report, then
4976 there's no need to queue a SIGSTOP, as this means we're
4977 midway through moving the LWP out of the jumppad, and we
4978 will report the pending signal as soon as that is
4979 finished. */
4980 if (lwp->pending_signals_to_report == NULL)
4981 send_sigstop (lwp);
4982 }
4983
4984 /* For stop requests, we're done. */
4985 lwp->resume = NULL;
4986 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4987 return;
4988 }
4989
4990 /* If this thread which is about to be resumed has a pending status,
4991 then don't resume it - we can just report the pending status.
4992 Likewise if it is suspended, because e.g., another thread is
4993 stepping past a breakpoint. Make sure to queue any signals that
4994 would otherwise be sent. In all-stop mode, we do this decision
4995 based on if *any* thread has a pending status. If there's a
4996 thread that needs the step-over-breakpoint dance, then don't
4997 resume any other thread but that particular one. */
4998 leave_pending = (lwp->suspended
4999 || lwp->status_pending_p
5000 || leave_all_stopped);
5001
5002 /* If we have a new signal, enqueue the signal. */
5003 if (lwp->resume->sig != 0)
5004 {
5005 siginfo_t info, *info_p;
5006
5007 /* If this is the same signal we were previously stopped by,
5008 make sure to queue its siginfo. */
5009 if (WIFSTOPPED (lwp->last_status)
5010 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5011 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5012 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5013 info_p = &info;
5014 else
5015 info_p = NULL;
5016
5017 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5018 }
5019
5020 if (!leave_pending)
5021 {
5022 if (debug_threads)
5023 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5024
5025 proceed_one_lwp (thread, NULL);
5026 }
5027 else
5028 {
5029 if (debug_threads)
5030 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5031 }
5032
5033 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5034 lwp->resume = NULL;
5035 }
5036
5037 static void
5038 linux_resume (struct thread_resume *resume_info, size_t n)
5039 {
5040 struct thread_info *need_step_over = NULL;
5041
5042 if (debug_threads)
5043 {
5044 debug_enter ();
5045 debug_printf ("linux_resume:\n");
5046 }
5047
5048 for_each_thread ([&] (thread_info *thread)
5049 {
5050 linux_set_resume_request (thread, resume_info, n);
5051 });
5052
5053 /* If there is a thread which would otherwise be resumed, which has
5054 a pending status, then don't resume any threads - we can just
5055 report the pending status. Make sure to queue any signals that
5056 would otherwise be sent. In non-stop mode, we'll apply this
5057 logic to each thread individually. We consume all pending events
5058 before considering to start a step-over (in all-stop). */
5059 bool any_pending = false;
5060 if (!non_stop)
5061 any_pending = find_thread (resume_status_pending_p) != NULL;
5062
5063 /* If there is a thread which would otherwise be resumed, which is
5064 stopped at a breakpoint that needs stepping over, then don't
5065 resume any threads - have it step over the breakpoint with all
5066 other threads stopped, then resume all threads again. Make sure
5067 to queue any signals that would otherwise be delivered or
5068 queued. */
5069 if (!any_pending && supports_breakpoints ())
5070 need_step_over = find_thread (need_step_over_p);
5071
5072 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5073
5074 if (debug_threads)
5075 {
5076 if (need_step_over != NULL)
5077 debug_printf ("Not resuming all, need step over\n");
5078 else if (any_pending)
5079 debug_printf ("Not resuming, all-stop and found "
5080 "an LWP with pending status\n");
5081 else
5082 debug_printf ("Resuming, no pending status or step over needed\n");
5083 }
5084
5085 /* Even if we're leaving threads stopped, queue all signals we'd
5086 otherwise deliver. */
5087 for_each_thread ([&] (thread_info *thread)
5088 {
5089 linux_resume_one_thread (thread, leave_all_stopped);
5090 });
5091
5092 if (need_step_over)
5093 start_step_over (get_thread_lwp (need_step_over));
5094
5095 if (debug_threads)
5096 {
5097 debug_printf ("linux_resume done\n");
5098 debug_exit ();
5099 }
5100
5101 /* We may have events that were pending that can/should be sent to
5102 the client now. Trigger a linux_wait call. */
5103 if (target_is_async_p ())
5104 async_file_mark ();
5105 }
5106
5107 /* This function is called once per thread. We check the thread's
5108 last resume request, which will tell us whether to resume, step, or
5109 leave the thread stopped. Any signal the client requested to be
5110 delivered has already been enqueued at this point.
5111
5112 If any thread that GDB wants running is stopped at an internal
5113 breakpoint that needs stepping over, we start a step-over operation
5114 on that particular thread, and leave all others stopped. */
5115
5116 static void
5117 proceed_one_lwp (thread_info *thread, lwp_info *except)
5118 {
5119 struct lwp_info *lwp = get_thread_lwp (thread);
5120 int step;
5121
5122 if (lwp == except)
5123 return;
5124
5125 if (debug_threads)
5126 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5127
5128 if (!lwp->stopped)
5129 {
5130 if (debug_threads)
5131 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5132 return;
5133 }
5134
5135 if (thread->last_resume_kind == resume_stop
5136 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5137 {
5138 if (debug_threads)
5139 debug_printf (" client wants LWP to remain %ld stopped\n",
5140 lwpid_of (thread));
5141 return;
5142 }
5143
5144 if (lwp->status_pending_p)
5145 {
5146 if (debug_threads)
5147 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5148 lwpid_of (thread));
5149 return;
5150 }
5151
5152 gdb_assert (lwp->suspended >= 0);
5153
5154 if (lwp->suspended)
5155 {
5156 if (debug_threads)
5157 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5158 return;
5159 }
5160
5161 if (thread->last_resume_kind == resume_stop
5162 && lwp->pending_signals_to_report == NULL
5163 && (lwp->collecting_fast_tracepoint
5164 == fast_tpoint_collect_result::not_collecting))
5165 {
5166 /* We haven't reported this LWP as stopped yet (otherwise, the
5167 last_status.kind check above would catch it, and we wouldn't
5168 reach here. This LWP may have been momentarily paused by a
5169 stop_all_lwps call while handling for example, another LWP's
5170 step-over. In that case, the pending expected SIGSTOP signal
5171 that was queued at vCont;t handling time will have already
5172 been consumed by wait_for_sigstop, and so we need to requeue
5173 another one here. Note that if the LWP already has a SIGSTOP
5174 pending, this is a no-op. */
5175
5176 if (debug_threads)
5177 debug_printf ("Client wants LWP %ld to stop. "
5178 "Making sure it has a SIGSTOP pending\n",
5179 lwpid_of (thread));
5180
5181 send_sigstop (lwp);
5182 }
5183
5184 if (thread->last_resume_kind == resume_step)
5185 {
5186 if (debug_threads)
5187 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5188 lwpid_of (thread));
5189
5190 /* If resume_step is requested by GDB, install single-step
5191 breakpoints when the thread is about to be actually resumed if
5192 the single-step breakpoints weren't removed. */
5193 if (can_software_single_step ()
5194 && !has_single_step_breakpoints (thread))
5195 install_software_single_step_breakpoints (lwp);
5196
5197 step = maybe_hw_step (thread);
5198 }
5199 else if (lwp->bp_reinsert != 0)
5200 {
5201 if (debug_threads)
5202 debug_printf (" stepping LWP %ld, reinsert set\n",
5203 lwpid_of (thread));
5204
5205 step = maybe_hw_step (thread);
5206 }
5207 else
5208 step = 0;
5209
5210 linux_resume_one_lwp (lwp, step, 0, NULL);
5211 }
5212
5213 static void
5214 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5215 {
5216 struct lwp_info *lwp = get_thread_lwp (thread);
5217
5218 if (lwp == except)
5219 return;
5220
5221 lwp_suspended_decr (lwp);
5222
5223 proceed_one_lwp (thread, except);
5224 }
5225
5226 /* When we finish a step-over, set threads running again. If there's
5227 another thread that may need a step-over, now's the time to start
5228 it. Eventually, we'll move all threads past their breakpoints. */
5229
5230 static void
5231 proceed_all_lwps (void)
5232 {
5233 struct thread_info *need_step_over;
5234
5235 /* If there is a thread which would otherwise be resumed, which is
5236 stopped at a breakpoint that needs stepping over, then don't
5237 resume any threads - have it step over the breakpoint with all
5238 other threads stopped, then resume all threads again. */
5239
5240 if (supports_breakpoints ())
5241 {
5242 need_step_over = find_thread (need_step_over_p);
5243
5244 if (need_step_over != NULL)
5245 {
5246 if (debug_threads)
5247 debug_printf ("proceed_all_lwps: found "
5248 "thread %ld needing a step-over\n",
5249 lwpid_of (need_step_over));
5250
5251 start_step_over (get_thread_lwp (need_step_over));
5252 return;
5253 }
5254 }
5255
5256 if (debug_threads)
5257 debug_printf ("Proceeding, no step-over needed\n");
5258
5259 for_each_thread ([] (thread_info *thread)
5260 {
5261 proceed_one_lwp (thread, NULL);
5262 });
5263 }
5264
5265 /* Stopped LWPs that the client wanted to be running, that don't have
5266 pending statuses, are set to run again, except for EXCEPT, if not
5267 NULL. This undoes a stop_all_lwps call. */
5268
5269 static void
5270 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5271 {
5272 if (debug_threads)
5273 {
5274 debug_enter ();
5275 if (except)
5276 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5277 lwpid_of (get_lwp_thread (except)));
5278 else
5279 debug_printf ("unstopping all lwps\n");
5280 }
5281
5282 if (unsuspend)
5283 for_each_thread ([&] (thread_info *thread)
5284 {
5285 unsuspend_and_proceed_one_lwp (thread, except);
5286 });
5287 else
5288 for_each_thread ([&] (thread_info *thread)
5289 {
5290 proceed_one_lwp (thread, except);
5291 });
5292
5293 if (debug_threads)
5294 {
5295 debug_printf ("unstop_all_lwps done\n");
5296 debug_exit ();
5297 }
5298 }
5299
5300
5301 #ifdef HAVE_LINUX_REGSETS
5302
5303 #define use_linux_regsets 1
5304
5305 /* Returns true if REGSET has been disabled. */
5306
5307 static int
5308 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5309 {
5310 return (info->disabled_regsets != NULL
5311 && info->disabled_regsets[regset - info->regsets]);
5312 }
5313
5314 /* Disable REGSET. */
5315
5316 static void
5317 disable_regset (struct regsets_info *info, struct regset_info *regset)
5318 {
5319 int dr_offset;
5320
5321 dr_offset = regset - info->regsets;
5322 if (info->disabled_regsets == NULL)
5323 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5324 info->disabled_regsets[dr_offset] = 1;
5325 }
5326
5327 static int
5328 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5329 struct regcache *regcache)
5330 {
5331 struct regset_info *regset;
5332 int saw_general_regs = 0;
5333 int pid;
5334 struct iovec iov;
5335
5336 pid = lwpid_of (current_thread);
5337 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5338 {
5339 void *buf, *data;
5340 int nt_type, res;
5341
5342 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5343 continue;
5344
5345 buf = xmalloc (regset->size);
5346
5347 nt_type = regset->nt_type;
5348 if (nt_type)
5349 {
5350 iov.iov_base = buf;
5351 iov.iov_len = regset->size;
5352 data = (void *) &iov;
5353 }
5354 else
5355 data = buf;
5356
5357 #ifndef __sparc__
5358 res = ptrace (regset->get_request, pid,
5359 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5360 #else
5361 res = ptrace (regset->get_request, pid, data, nt_type);
5362 #endif
5363 if (res < 0)
5364 {
5365 if (errno == EIO
5366 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5367 {
5368 /* If we get EIO on a regset, or an EINVAL and the regset is
5369 optional, do not try it again for this process mode. */
5370 disable_regset (regsets_info, regset);
5371 }
5372 else if (errno == ENODATA)
5373 {
5374 /* ENODATA may be returned if the regset is currently
5375 not "active". This can happen in normal operation,
5376 so suppress the warning in this case. */
5377 }
5378 else if (errno == ESRCH)
5379 {
5380 /* At this point, ESRCH should mean the process is
5381 already gone, in which case we simply ignore attempts
5382 to read its registers. */
5383 }
5384 else
5385 {
5386 char s[256];
5387 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5388 pid);
5389 perror (s);
5390 }
5391 }
5392 else
5393 {
5394 if (regset->type == GENERAL_REGS)
5395 saw_general_regs = 1;
5396 regset->store_function (regcache, buf);
5397 }
5398 free (buf);
5399 }
5400 if (saw_general_regs)
5401 return 0;
5402 else
5403 return 1;
5404 }
5405
5406 static int
5407 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5408 struct regcache *regcache)
5409 {
5410 struct regset_info *regset;
5411 int saw_general_regs = 0;
5412 int pid;
5413 struct iovec iov;
5414
5415 pid = lwpid_of (current_thread);
5416 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5417 {
5418 void *buf, *data;
5419 int nt_type, res;
5420
5421 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5422 || regset->fill_function == NULL)
5423 continue;
5424
5425 buf = xmalloc (regset->size);
5426
5427 /* First fill the buffer with the current register set contents,
5428 in case there are any items in the kernel's regset that are
5429 not in gdbserver's regcache. */
5430
5431 nt_type = regset->nt_type;
5432 if (nt_type)
5433 {
5434 iov.iov_base = buf;
5435 iov.iov_len = regset->size;
5436 data = (void *) &iov;
5437 }
5438 else
5439 data = buf;
5440
5441 #ifndef __sparc__
5442 res = ptrace (regset->get_request, pid,
5443 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5444 #else
5445 res = ptrace (regset->get_request, pid, data, nt_type);
5446 #endif
5447
5448 if (res == 0)
5449 {
5450 /* Then overlay our cached registers on that. */
5451 regset->fill_function (regcache, buf);
5452
5453 /* Only now do we write the register set. */
5454 #ifndef __sparc__
5455 res = ptrace (regset->set_request, pid,
5456 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5457 #else
5458 res = ptrace (regset->set_request, pid, data, nt_type);
5459 #endif
5460 }
5461
5462 if (res < 0)
5463 {
5464 if (errno == EIO
5465 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5466 {
5467 /* If we get EIO on a regset, or an EINVAL and the regset is
5468 optional, do not try it again for this process mode. */
5469 disable_regset (regsets_info, regset);
5470 }
5471 else if (errno == ESRCH)
5472 {
5473 /* At this point, ESRCH should mean the process is
5474 already gone, in which case we simply ignore attempts
5475 to change its registers. See also the related
5476 comment in linux_resume_one_lwp. */
5477 free (buf);
5478 return 0;
5479 }
5480 else
5481 {
5482 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5483 }
5484 }
5485 else if (regset->type == GENERAL_REGS)
5486 saw_general_regs = 1;
5487 free (buf);
5488 }
5489 if (saw_general_regs)
5490 return 0;
5491 else
5492 return 1;
5493 }
5494
5495 #else /* !HAVE_LINUX_REGSETS */
5496
5497 #define use_linux_regsets 0
5498 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5499 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5500
5501 #endif
5502
5503 /* Return 1 if register REGNO is supported by one of the regset ptrace
5504 calls or 0 if it has to be transferred individually. */
5505
5506 static int
5507 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5508 {
5509 unsigned char mask = 1 << (regno % 8);
5510 size_t index = regno / 8;
5511
5512 return (use_linux_regsets
5513 && (regs_info->regset_bitmap == NULL
5514 || (regs_info->regset_bitmap[index] & mask) != 0));
5515 }
5516
5517 #ifdef HAVE_LINUX_USRREGS
5518
5519 static int
5520 register_addr (const struct usrregs_info *usrregs, int regnum)
5521 {
5522 int addr;
5523
5524 if (regnum < 0 || regnum >= usrregs->num_regs)
5525 error ("Invalid register number %d.", regnum);
5526
5527 addr = usrregs->regmap[regnum];
5528
5529 return addr;
5530 }
5531
5532 /* Fetch one register. */
5533 static void
5534 fetch_register (const struct usrregs_info *usrregs,
5535 struct regcache *regcache, int regno)
5536 {
5537 CORE_ADDR regaddr;
5538 int i, size;
5539 char *buf;
5540 int pid;
5541
5542 if (regno >= usrregs->num_regs)
5543 return;
5544 if ((*the_low_target.cannot_fetch_register) (regno))
5545 return;
5546
5547 regaddr = register_addr (usrregs, regno);
5548 if (regaddr == -1)
5549 return;
5550
5551 size = ((register_size (regcache->tdesc, regno)
5552 + sizeof (PTRACE_XFER_TYPE) - 1)
5553 & -sizeof (PTRACE_XFER_TYPE));
5554 buf = (char *) alloca (size);
5555
5556 pid = lwpid_of (current_thread);
5557 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5558 {
5559 errno = 0;
5560 *(PTRACE_XFER_TYPE *) (buf + i) =
5561 ptrace (PTRACE_PEEKUSER, pid,
5562 /* Coerce to a uintptr_t first to avoid potential gcc warning
5563 of coercing an 8 byte integer to a 4 byte pointer. */
5564 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5565 regaddr += sizeof (PTRACE_XFER_TYPE);
5566 if (errno != 0)
5567 {
5568 /* Mark register REGNO unavailable. */
5569 supply_register (regcache, regno, NULL);
5570 return;
5571 }
5572 }
5573
5574 if (the_low_target.supply_ptrace_register)
5575 the_low_target.supply_ptrace_register (regcache, regno, buf);
5576 else
5577 supply_register (regcache, regno, buf);
5578 }
5579
5580 /* Store one register. */
5581 static void
5582 store_register (const struct usrregs_info *usrregs,
5583 struct regcache *regcache, int regno)
5584 {
5585 CORE_ADDR regaddr;
5586 int i, size;
5587 char *buf;
5588 int pid;
5589
5590 if (regno >= usrregs->num_regs)
5591 return;
5592 if ((*the_low_target.cannot_store_register) (regno))
5593 return;
5594
5595 regaddr = register_addr (usrregs, regno);
5596 if (regaddr == -1)
5597 return;
5598
5599 size = ((register_size (regcache->tdesc, regno)
5600 + sizeof (PTRACE_XFER_TYPE) - 1)
5601 & -sizeof (PTRACE_XFER_TYPE));
5602 buf = (char *) alloca (size);
5603 memset (buf, 0, size);
5604
5605 if (the_low_target.collect_ptrace_register)
5606 the_low_target.collect_ptrace_register (regcache, regno, buf);
5607 else
5608 collect_register (regcache, regno, buf);
5609
5610 pid = lwpid_of (current_thread);
5611 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5612 {
5613 errno = 0;
5614 ptrace (PTRACE_POKEUSER, pid,
5615 /* Coerce to a uintptr_t first to avoid potential gcc warning
5616 about coercing an 8 byte integer to a 4 byte pointer. */
5617 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5618 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5619 if (errno != 0)
5620 {
5621 /* At this point, ESRCH should mean the process is
5622 already gone, in which case we simply ignore attempts
5623 to change its registers. See also the related
5624 comment in linux_resume_one_lwp. */
5625 if (errno == ESRCH)
5626 return;
5627
5628 if ((*the_low_target.cannot_store_register) (regno) == 0)
5629 error ("writing register %d: %s", regno, strerror (errno));
5630 }
5631 regaddr += sizeof (PTRACE_XFER_TYPE);
5632 }
5633 }
5634
5635 /* Fetch all registers, or just one, from the child process.
5636 If REGNO is -1, do this for all registers, skipping any that are
5637 assumed to have been retrieved by regsets_fetch_inferior_registers,
5638 unless ALL is non-zero.
5639 Otherwise, REGNO specifies which register (so we can save time). */
5640 static void
5641 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5642 struct regcache *regcache, int regno, int all)
5643 {
5644 struct usrregs_info *usr = regs_info->usrregs;
5645
5646 if (regno == -1)
5647 {
5648 for (regno = 0; regno < usr->num_regs; regno++)
5649 if (all || !linux_register_in_regsets (regs_info, regno))
5650 fetch_register (usr, regcache, regno);
5651 }
5652 else
5653 fetch_register (usr, regcache, regno);
5654 }
5655
5656 /* Store our register values back into the inferior.
5657 If REGNO is -1, do this for all registers, skipping any that are
5658 assumed to have been saved by regsets_store_inferior_registers,
5659 unless ALL is non-zero.
5660 Otherwise, REGNO specifies which register (so we can save time). */
5661 static void
5662 usr_store_inferior_registers (const struct regs_info *regs_info,
5663 struct regcache *regcache, int regno, int all)
5664 {
5665 struct usrregs_info *usr = regs_info->usrregs;
5666
5667 if (regno == -1)
5668 {
5669 for (regno = 0; regno < usr->num_regs; regno++)
5670 if (all || !linux_register_in_regsets (regs_info, regno))
5671 store_register (usr, regcache, regno);
5672 }
5673 else
5674 store_register (usr, regcache, regno);
5675 }
5676
5677 #else /* !HAVE_LINUX_USRREGS */
5678
5679 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5680 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5681
5682 #endif
5683
5684
5685 static void
5686 linux_fetch_registers (struct regcache *regcache, int regno)
5687 {
5688 int use_regsets;
5689 int all = 0;
5690 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5691
5692 if (regno == -1)
5693 {
5694 if (the_low_target.fetch_register != NULL
5695 && regs_info->usrregs != NULL)
5696 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5697 (*the_low_target.fetch_register) (regcache, regno);
5698
5699 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5700 if (regs_info->usrregs != NULL)
5701 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5702 }
5703 else
5704 {
5705 if (the_low_target.fetch_register != NULL
5706 && (*the_low_target.fetch_register) (regcache, regno))
5707 return;
5708
5709 use_regsets = linux_register_in_regsets (regs_info, regno);
5710 if (use_regsets)
5711 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5712 regcache);
5713 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5714 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5715 }
5716 }
5717
5718 static void
5719 linux_store_registers (struct regcache *regcache, int regno)
5720 {
5721 int use_regsets;
5722 int all = 0;
5723 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5724
5725 if (regno == -1)
5726 {
5727 all = regsets_store_inferior_registers (regs_info->regsets_info,
5728 regcache);
5729 if (regs_info->usrregs != NULL)
5730 usr_store_inferior_registers (regs_info, regcache, regno, all);
5731 }
5732 else
5733 {
5734 use_regsets = linux_register_in_regsets (regs_info, regno);
5735 if (use_regsets)
5736 all = regsets_store_inferior_registers (regs_info->regsets_info,
5737 regcache);
5738 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5739 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5740 }
5741 }
5742
5743
5744 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5745 to debugger memory starting at MYADDR. */
5746
5747 static int
5748 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5749 {
5750 int pid = lwpid_of (current_thread);
5751 PTRACE_XFER_TYPE *buffer;
5752 CORE_ADDR addr;
5753 int count;
5754 char filename[64];
5755 int i;
5756 int ret;
5757 int fd;
5758
5759 /* Try using /proc. Don't bother for one word. */
5760 if (len >= 3 * sizeof (long))
5761 {
5762 int bytes;
5763
5764 /* We could keep this file open and cache it - possibly one per
5765 thread. That requires some juggling, but is even faster. */
5766 sprintf (filename, "/proc/%d/mem", pid);
5767 fd = open (filename, O_RDONLY | O_LARGEFILE);
5768 if (fd == -1)
5769 goto no_proc;
5770
5771 /* If pread64 is available, use it. It's faster if the kernel
5772 supports it (only one syscall), and it's 64-bit safe even on
5773 32-bit platforms (for instance, SPARC debugging a SPARC64
5774 application). */
5775 #ifdef HAVE_PREAD64
5776 bytes = pread64 (fd, myaddr, len, memaddr);
5777 #else
5778 bytes = -1;
5779 if (lseek (fd, memaddr, SEEK_SET) != -1)
5780 bytes = read (fd, myaddr, len);
5781 #endif
5782
5783 close (fd);
5784 if (bytes == len)
5785 return 0;
5786
5787 /* Some data was read, we'll try to get the rest with ptrace. */
5788 if (bytes > 0)
5789 {
5790 memaddr += bytes;
5791 myaddr += bytes;
5792 len -= bytes;
5793 }
5794 }
5795
5796 no_proc:
5797 /* Round starting address down to longword boundary. */
5798 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5799 /* Round ending address up; get number of longwords that makes. */
5800 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5801 / sizeof (PTRACE_XFER_TYPE));
5802 /* Allocate buffer of that many longwords. */
5803 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5804
5805 /* Read all the longwords */
5806 errno = 0;
5807 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5808 {
5809 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5810 about coercing an 8 byte integer to a 4 byte pointer. */
5811 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5812 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5813 (PTRACE_TYPE_ARG4) 0);
5814 if (errno)
5815 break;
5816 }
5817 ret = errno;
5818
5819 /* Copy appropriate bytes out of the buffer. */
5820 if (i > 0)
5821 {
5822 i *= sizeof (PTRACE_XFER_TYPE);
5823 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5824 memcpy (myaddr,
5825 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5826 i < len ? i : len);
5827 }
5828
5829 return ret;
5830 }
5831
5832 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5833 memory at MEMADDR. On failure (cannot write to the inferior)
5834 returns the value of errno. Always succeeds if LEN is zero. */
5835
5836 static int
5837 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5838 {
5839 int i;
5840 /* Round starting address down to longword boundary. */
5841 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5842 /* Round ending address up; get number of longwords that makes. */
5843 int count
5844 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5845 / sizeof (PTRACE_XFER_TYPE);
5846
5847 /* Allocate buffer of that many longwords. */
5848 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5849
5850 int pid = lwpid_of (current_thread);
5851
5852 if (len == 0)
5853 {
5854 /* Zero length write always succeeds. */
5855 return 0;
5856 }
5857
5858 if (debug_threads)
5859 {
5860 /* Dump up to four bytes. */
5861 char str[4 * 2 + 1];
5862 char *p = str;
5863 int dump = len < 4 ? len : 4;
5864
5865 for (i = 0; i < dump; i++)
5866 {
5867 sprintf (p, "%02x", myaddr[i]);
5868 p += 2;
5869 }
5870 *p = '\0';
5871
5872 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5873 str, (long) memaddr, pid);
5874 }
5875
5876 /* Fill start and end extra bytes of buffer with existing memory data. */
5877
5878 errno = 0;
5879 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5880 about coercing an 8 byte integer to a 4 byte pointer. */
5881 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5882 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5883 (PTRACE_TYPE_ARG4) 0);
5884 if (errno)
5885 return errno;
5886
5887 if (count > 1)
5888 {
5889 errno = 0;
5890 buffer[count - 1]
5891 = ptrace (PTRACE_PEEKTEXT, pid,
5892 /* Coerce to a uintptr_t first to avoid potential gcc warning
5893 about coercing an 8 byte integer to a 4 byte pointer. */
5894 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5895 * sizeof (PTRACE_XFER_TYPE)),
5896 (PTRACE_TYPE_ARG4) 0);
5897 if (errno)
5898 return errno;
5899 }
5900
5901 /* Copy data to be written over corresponding part of buffer. */
5902
5903 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5904 myaddr, len);
5905
5906 /* Write the entire buffer. */
5907
5908 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5909 {
5910 errno = 0;
5911 ptrace (PTRACE_POKETEXT, pid,
5912 /* Coerce to a uintptr_t first to avoid potential gcc warning
5913 about coercing an 8 byte integer to a 4 byte pointer. */
5914 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5915 (PTRACE_TYPE_ARG4) buffer[i]);
5916 if (errno)
5917 return errno;
5918 }
5919
5920 return 0;
5921 }
5922
5923 static void
5924 linux_look_up_symbols (void)
5925 {
5926 #ifdef USE_THREAD_DB
5927 struct process_info *proc = current_process ();
5928
5929 if (proc->priv->thread_db != NULL)
5930 return;
5931
5932 thread_db_init ();
5933 #endif
5934 }
5935
5936 static void
5937 linux_request_interrupt (void)
5938 {
5939 /* Send a SIGINT to the process group. This acts just like the user
5940 typed a ^C on the controlling terminal. */
5941 kill (-signal_pid, SIGINT);
5942 }
5943
5944 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5945 to debugger memory starting at MYADDR. */
5946
5947 static int
5948 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5949 {
5950 char filename[PATH_MAX];
5951 int fd, n;
5952 int pid = lwpid_of (current_thread);
5953
5954 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5955
5956 fd = open (filename, O_RDONLY);
5957 if (fd < 0)
5958 return -1;
5959
5960 if (offset != (CORE_ADDR) 0
5961 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5962 n = -1;
5963 else
5964 n = read (fd, myaddr, len);
5965
5966 close (fd);
5967
5968 return n;
5969 }
5970
5971 /* These breakpoint and watchpoint related wrapper functions simply
5972 pass on the function call if the target has registered a
5973 corresponding function. */
5974
5975 static int
5976 linux_supports_z_point_type (char z_type)
5977 {
5978 return (the_low_target.supports_z_point_type != NULL
5979 && the_low_target.supports_z_point_type (z_type));
5980 }
5981
5982 static int
5983 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5984 int size, struct raw_breakpoint *bp)
5985 {
5986 if (type == raw_bkpt_type_sw)
5987 return insert_memory_breakpoint (bp);
5988 else if (the_low_target.insert_point != NULL)
5989 return the_low_target.insert_point (type, addr, size, bp);
5990 else
5991 /* Unsupported (see target.h). */
5992 return 1;
5993 }
5994
5995 static int
5996 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5997 int size, struct raw_breakpoint *bp)
5998 {
5999 if (type == raw_bkpt_type_sw)
6000 return remove_memory_breakpoint (bp);
6001 else if (the_low_target.remove_point != NULL)
6002 return the_low_target.remove_point (type, addr, size, bp);
6003 else
6004 /* Unsupported (see target.h). */
6005 return 1;
6006 }
6007
6008 /* Implement the to_stopped_by_sw_breakpoint target_ops
6009 method. */
6010
6011 static int
6012 linux_stopped_by_sw_breakpoint (void)
6013 {
6014 struct lwp_info *lwp = get_thread_lwp (current_thread);
6015
6016 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6017 }
6018
6019 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6020 method. */
6021
6022 static int
6023 linux_supports_stopped_by_sw_breakpoint (void)
6024 {
6025 return USE_SIGTRAP_SIGINFO;
6026 }
6027
6028 /* Implement the to_stopped_by_hw_breakpoint target_ops
6029 method. */
6030
6031 static int
6032 linux_stopped_by_hw_breakpoint (void)
6033 {
6034 struct lwp_info *lwp = get_thread_lwp (current_thread);
6035
6036 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6037 }
6038
6039 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6040 method. */
6041
6042 static int
6043 linux_supports_stopped_by_hw_breakpoint (void)
6044 {
6045 return USE_SIGTRAP_SIGINFO;
6046 }
6047
6048 /* Implement the supports_hardware_single_step target_ops method. */
6049
6050 static int
6051 linux_supports_hardware_single_step (void)
6052 {
6053 return can_hardware_single_step ();
6054 }
6055
6056 static int
6057 linux_supports_software_single_step (void)
6058 {
6059 return can_software_single_step ();
6060 }
6061
6062 static int
6063 linux_stopped_by_watchpoint (void)
6064 {
6065 struct lwp_info *lwp = get_thread_lwp (current_thread);
6066
6067 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6068 }
6069
6070 static CORE_ADDR
6071 linux_stopped_data_address (void)
6072 {
6073 struct lwp_info *lwp = get_thread_lwp (current_thread);
6074
6075 return lwp->stopped_data_address;
6076 }
6077
6078 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6079 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6080 && defined(PT_TEXT_END_ADDR)
6081
6082 /* This is only used for targets that define PT_TEXT_ADDR,
6083 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6084 the target has different ways of acquiring this information, like
6085 loadmaps. */
6086
6087 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6088 to tell gdb about. */
6089
6090 static int
6091 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6092 {
6093 unsigned long text, text_end, data;
6094 int pid = lwpid_of (current_thread);
6095
6096 errno = 0;
6097
6098 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6099 (PTRACE_TYPE_ARG4) 0);
6100 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6101 (PTRACE_TYPE_ARG4) 0);
6102 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6103 (PTRACE_TYPE_ARG4) 0);
6104
6105 if (errno == 0)
6106 {
6107 /* Both text and data offsets produced at compile-time (and so
6108 used by gdb) are relative to the beginning of the program,
6109 with the data segment immediately following the text segment.
6110 However, the actual runtime layout in memory may put the data
6111 somewhere else, so when we send gdb a data base-address, we
6112 use the real data base address and subtract the compile-time
6113 data base-address from it (which is just the length of the
6114 text segment). BSS immediately follows data in both
6115 cases. */
6116 *text_p = text;
6117 *data_p = data - (text_end - text);
6118
6119 return 1;
6120 }
6121 return 0;
6122 }
6123 #endif
6124
6125 static int
6126 linux_qxfer_osdata (const char *annex,
6127 unsigned char *readbuf, unsigned const char *writebuf,
6128 CORE_ADDR offset, int len)
6129 {
6130 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6131 }
6132
6133 /* Convert a native/host siginfo object, into/from the siginfo in the
6134 layout of the inferiors' architecture. */
6135
6136 static void
6137 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6138 {
6139 int done = 0;
6140
6141 if (the_low_target.siginfo_fixup != NULL)
6142 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6143
6144 /* If there was no callback, or the callback didn't do anything,
6145 then just do a straight memcpy. */
6146 if (!done)
6147 {
6148 if (direction == 1)
6149 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6150 else
6151 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6152 }
6153 }
6154
6155 static int
6156 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6157 unsigned const char *writebuf, CORE_ADDR offset, int len)
6158 {
6159 int pid;
6160 siginfo_t siginfo;
6161 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6162
6163 if (current_thread == NULL)
6164 return -1;
6165
6166 pid = lwpid_of (current_thread);
6167
6168 if (debug_threads)
6169 debug_printf ("%s siginfo for lwp %d.\n",
6170 readbuf != NULL ? "Reading" : "Writing",
6171 pid);
6172
6173 if (offset >= sizeof (siginfo))
6174 return -1;
6175
6176 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6177 return -1;
6178
6179 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6180 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6181 inferior with a 64-bit GDBSERVER should look the same as debugging it
6182 with a 32-bit GDBSERVER, we need to convert it. */
6183 siginfo_fixup (&siginfo, inf_siginfo, 0);
6184
6185 if (offset + len > sizeof (siginfo))
6186 len = sizeof (siginfo) - offset;
6187
6188 if (readbuf != NULL)
6189 memcpy (readbuf, inf_siginfo + offset, len);
6190 else
6191 {
6192 memcpy (inf_siginfo + offset, writebuf, len);
6193
6194 /* Convert back to ptrace layout before flushing it out. */
6195 siginfo_fixup (&siginfo, inf_siginfo, 1);
6196
6197 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6198 return -1;
6199 }
6200
6201 return len;
6202 }
6203
6204 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6205 so we notice when children change state; as the handler for the
6206 sigsuspend in my_waitpid. */
6207
6208 static void
6209 sigchld_handler (int signo)
6210 {
6211 int old_errno = errno;
6212
6213 if (debug_threads)
6214 {
6215 do
6216 {
6217 /* fprintf is not async-signal-safe, so call write
6218 directly. */
6219 if (write (2, "sigchld_handler\n",
6220 sizeof ("sigchld_handler\n") - 1) < 0)
6221 break; /* just ignore */
6222 } while (0);
6223 }
6224
6225 if (target_is_async_p ())
6226 async_file_mark (); /* trigger a linux_wait */
6227
6228 errno = old_errno;
6229 }
6230
6231 static int
6232 linux_supports_non_stop (void)
6233 {
6234 return 1;
6235 }
6236
6237 static int
6238 linux_async (int enable)
6239 {
6240 int previous = target_is_async_p ();
6241
6242 if (debug_threads)
6243 debug_printf ("linux_async (%d), previous=%d\n",
6244 enable, previous);
6245
6246 if (previous != enable)
6247 {
6248 sigset_t mask;
6249 sigemptyset (&mask);
6250 sigaddset (&mask, SIGCHLD);
6251
6252 sigprocmask (SIG_BLOCK, &mask, NULL);
6253
6254 if (enable)
6255 {
6256 if (pipe (linux_event_pipe) == -1)
6257 {
6258 linux_event_pipe[0] = -1;
6259 linux_event_pipe[1] = -1;
6260 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6261
6262 warning ("creating event pipe failed.");
6263 return previous;
6264 }
6265
6266 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6267 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6268
6269 /* Register the event loop handler. */
6270 add_file_handler (linux_event_pipe[0],
6271 handle_target_event, NULL);
6272
6273 /* Always trigger a linux_wait. */
6274 async_file_mark ();
6275 }
6276 else
6277 {
6278 delete_file_handler (linux_event_pipe[0]);
6279
6280 close (linux_event_pipe[0]);
6281 close (linux_event_pipe[1]);
6282 linux_event_pipe[0] = -1;
6283 linux_event_pipe[1] = -1;
6284 }
6285
6286 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6287 }
6288
6289 return previous;
6290 }
6291
6292 static int
6293 linux_start_non_stop (int nonstop)
6294 {
6295 /* Register or unregister from event-loop accordingly. */
6296 linux_async (nonstop);
6297
6298 if (target_is_async_p () != (nonstop != 0))
6299 return -1;
6300
6301 return 0;
6302 }
6303
6304 static int
6305 linux_supports_multi_process (void)
6306 {
6307 return 1;
6308 }
6309
6310 /* Check if fork events are supported. */
6311
6312 static int
6313 linux_supports_fork_events (void)
6314 {
6315 return linux_supports_tracefork ();
6316 }
6317
6318 /* Check if vfork events are supported. */
6319
6320 static int
6321 linux_supports_vfork_events (void)
6322 {
6323 return linux_supports_tracefork ();
6324 }
6325
6326 /* Check if exec events are supported. */
6327
6328 static int
6329 linux_supports_exec_events (void)
6330 {
6331 return linux_supports_traceexec ();
6332 }
6333
6334 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6335 ptrace flags for all inferiors. This is in case the new GDB connection
6336 doesn't support the same set of events that the previous one did. */
6337
6338 static void
6339 linux_handle_new_gdb_connection (void)
6340 {
6341 /* Request that all the lwps reset their ptrace options. */
6342 for_each_thread ([] (thread_info *thread)
6343 {
6344 struct lwp_info *lwp = get_thread_lwp (thread);
6345
6346 if (!lwp->stopped)
6347 {
6348 /* Stop the lwp so we can modify its ptrace options. */
6349 lwp->must_set_ptrace_flags = 1;
6350 linux_stop_lwp (lwp);
6351 }
6352 else
6353 {
6354 /* Already stopped; go ahead and set the ptrace options. */
6355 struct process_info *proc = find_process_pid (pid_of (thread));
6356 int options = linux_low_ptrace_options (proc->attached);
6357
6358 linux_enable_event_reporting (lwpid_of (thread), options);
6359 lwp->must_set_ptrace_flags = 0;
6360 }
6361 });
6362 }
6363
6364 static int
6365 linux_supports_disable_randomization (void)
6366 {
6367 #ifdef HAVE_PERSONALITY
6368 return 1;
6369 #else
6370 return 0;
6371 #endif
6372 }
6373
6374 static int
6375 linux_supports_agent (void)
6376 {
6377 return 1;
6378 }
6379
6380 static int
6381 linux_supports_range_stepping (void)
6382 {
6383 if (can_software_single_step ())
6384 return 1;
6385 if (*the_low_target.supports_range_stepping == NULL)
6386 return 0;
6387
6388 return (*the_low_target.supports_range_stepping) ();
6389 }
6390
6391 /* Enumerate spufs IDs for process PID. */
6392 static int
6393 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6394 {
6395 int pos = 0;
6396 int written = 0;
6397 char path[128];
6398 DIR *dir;
6399 struct dirent *entry;
6400
6401 sprintf (path, "/proc/%ld/fd", pid);
6402 dir = opendir (path);
6403 if (!dir)
6404 return -1;
6405
6406 rewinddir (dir);
6407 while ((entry = readdir (dir)) != NULL)
6408 {
6409 struct stat st;
6410 struct statfs stfs;
6411 int fd;
6412
6413 fd = atoi (entry->d_name);
6414 if (!fd)
6415 continue;
6416
6417 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6418 if (stat (path, &st) != 0)
6419 continue;
6420 if (!S_ISDIR (st.st_mode))
6421 continue;
6422
6423 if (statfs (path, &stfs) != 0)
6424 continue;
6425 if (stfs.f_type != SPUFS_MAGIC)
6426 continue;
6427
6428 if (pos >= offset && pos + 4 <= offset + len)
6429 {
6430 *(unsigned int *)(buf + pos - offset) = fd;
6431 written += 4;
6432 }
6433 pos += 4;
6434 }
6435
6436 closedir (dir);
6437 return written;
6438 }
6439
6440 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6441 object type, using the /proc file system. */
6442 static int
6443 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6444 unsigned const char *writebuf,
6445 CORE_ADDR offset, int len)
6446 {
6447 long pid = lwpid_of (current_thread);
6448 char buf[128];
6449 int fd = 0;
6450 int ret = 0;
6451
6452 if (!writebuf && !readbuf)
6453 return -1;
6454
6455 if (!*annex)
6456 {
6457 if (!readbuf)
6458 return -1;
6459 else
6460 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6461 }
6462
6463 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6464 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6465 if (fd <= 0)
6466 return -1;
6467
6468 if (offset != 0
6469 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6470 {
6471 close (fd);
6472 return 0;
6473 }
6474
6475 if (writebuf)
6476 ret = write (fd, writebuf, (size_t) len);
6477 else
6478 ret = read (fd, readbuf, (size_t) len);
6479
6480 close (fd);
6481 return ret;
6482 }
6483
6484 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6485 struct target_loadseg
6486 {
6487 /* Core address to which the segment is mapped. */
6488 Elf32_Addr addr;
6489 /* VMA recorded in the program header. */
6490 Elf32_Addr p_vaddr;
6491 /* Size of this segment in memory. */
6492 Elf32_Word p_memsz;
6493 };
6494
6495 # if defined PT_GETDSBT
6496 struct target_loadmap
6497 {
6498 /* Protocol version number, must be zero. */
6499 Elf32_Word version;
6500 /* Pointer to the DSBT table, its size, and the DSBT index. */
6501 unsigned *dsbt_table;
6502 unsigned dsbt_size, dsbt_index;
6503 /* Number of segments in this map. */
6504 Elf32_Word nsegs;
6505 /* The actual memory map. */
6506 struct target_loadseg segs[/*nsegs*/];
6507 };
6508 # define LINUX_LOADMAP PT_GETDSBT
6509 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6510 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6511 # else
6512 struct target_loadmap
6513 {
6514 /* Protocol version number, must be zero. */
6515 Elf32_Half version;
6516 /* Number of segments in this map. */
6517 Elf32_Half nsegs;
6518 /* The actual memory map. */
6519 struct target_loadseg segs[/*nsegs*/];
6520 };
6521 # define LINUX_LOADMAP PTRACE_GETFDPIC
6522 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6523 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6524 # endif
6525
6526 static int
6527 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6528 unsigned char *myaddr, unsigned int len)
6529 {
6530 int pid = lwpid_of (current_thread);
6531 int addr = -1;
6532 struct target_loadmap *data = NULL;
6533 unsigned int actual_length, copy_length;
6534
6535 if (strcmp (annex, "exec") == 0)
6536 addr = (int) LINUX_LOADMAP_EXEC;
6537 else if (strcmp (annex, "interp") == 0)
6538 addr = (int) LINUX_LOADMAP_INTERP;
6539 else
6540 return -1;
6541
6542 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6543 return -1;
6544
6545 if (data == NULL)
6546 return -1;
6547
6548 actual_length = sizeof (struct target_loadmap)
6549 + sizeof (struct target_loadseg) * data->nsegs;
6550
6551 if (offset < 0 || offset > actual_length)
6552 return -1;
6553
6554 copy_length = actual_length - offset < len ? actual_length - offset : len;
6555 memcpy (myaddr, (char *) data + offset, copy_length);
6556 return copy_length;
6557 }
6558 #else
6559 # define linux_read_loadmap NULL
6560 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6561
6562 static void
6563 linux_process_qsupported (char **features, int count)
6564 {
6565 if (the_low_target.process_qsupported != NULL)
6566 the_low_target.process_qsupported (features, count);
6567 }
6568
6569 static int
6570 linux_supports_catch_syscall (void)
6571 {
6572 return (the_low_target.get_syscall_trapinfo != NULL
6573 && linux_supports_tracesysgood ());
6574 }
6575
6576 static int
6577 linux_get_ipa_tdesc_idx (void)
6578 {
6579 if (the_low_target.get_ipa_tdesc_idx == NULL)
6580 return 0;
6581
6582 return (*the_low_target.get_ipa_tdesc_idx) ();
6583 }
6584
6585 static int
6586 linux_supports_tracepoints (void)
6587 {
6588 if (*the_low_target.supports_tracepoints == NULL)
6589 return 0;
6590
6591 return (*the_low_target.supports_tracepoints) ();
6592 }
6593
6594 static CORE_ADDR
6595 linux_read_pc (struct regcache *regcache)
6596 {
6597 if (the_low_target.get_pc == NULL)
6598 return 0;
6599
6600 return (*the_low_target.get_pc) (regcache);
6601 }
6602
6603 static void
6604 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6605 {
6606 gdb_assert (the_low_target.set_pc != NULL);
6607
6608 (*the_low_target.set_pc) (regcache, pc);
6609 }
6610
6611 static int
6612 linux_thread_stopped (struct thread_info *thread)
6613 {
6614 return get_thread_lwp (thread)->stopped;
6615 }
6616
6617 /* This exposes stop-all-threads functionality to other modules. */
6618
6619 static void
6620 linux_pause_all (int freeze)
6621 {
6622 stop_all_lwps (freeze, NULL);
6623 }
6624
6625 /* This exposes unstop-all-threads functionality to other gdbserver
6626 modules. */
6627
6628 static void
6629 linux_unpause_all (int unfreeze)
6630 {
6631 unstop_all_lwps (unfreeze, NULL);
6632 }
6633
6634 static int
6635 linux_prepare_to_access_memory (void)
6636 {
6637 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6638 running LWP. */
6639 if (non_stop)
6640 linux_pause_all (1);
6641 return 0;
6642 }
6643
6644 static void
6645 linux_done_accessing_memory (void)
6646 {
6647 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6648 running LWP. */
6649 if (non_stop)
6650 linux_unpause_all (1);
6651 }
6652
6653 static int
6654 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6655 CORE_ADDR collector,
6656 CORE_ADDR lockaddr,
6657 ULONGEST orig_size,
6658 CORE_ADDR *jump_entry,
6659 CORE_ADDR *trampoline,
6660 ULONGEST *trampoline_size,
6661 unsigned char *jjump_pad_insn,
6662 ULONGEST *jjump_pad_insn_size,
6663 CORE_ADDR *adjusted_insn_addr,
6664 CORE_ADDR *adjusted_insn_addr_end,
6665 char *err)
6666 {
6667 return (*the_low_target.install_fast_tracepoint_jump_pad)
6668 (tpoint, tpaddr, collector, lockaddr, orig_size,
6669 jump_entry, trampoline, trampoline_size,
6670 jjump_pad_insn, jjump_pad_insn_size,
6671 adjusted_insn_addr, adjusted_insn_addr_end,
6672 err);
6673 }
6674
6675 static struct emit_ops *
6676 linux_emit_ops (void)
6677 {
6678 if (the_low_target.emit_ops != NULL)
6679 return (*the_low_target.emit_ops) ();
6680 else
6681 return NULL;
6682 }
6683
6684 static int
6685 linux_get_min_fast_tracepoint_insn_len (void)
6686 {
6687 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6688 }
6689
6690 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6691
6692 static int
6693 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6694 CORE_ADDR *phdr_memaddr, int *num_phdr)
6695 {
6696 char filename[PATH_MAX];
6697 int fd;
6698 const int auxv_size = is_elf64
6699 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6700 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6701
6702 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6703
6704 fd = open (filename, O_RDONLY);
6705 if (fd < 0)
6706 return 1;
6707
6708 *phdr_memaddr = 0;
6709 *num_phdr = 0;
6710 while (read (fd, buf, auxv_size) == auxv_size
6711 && (*phdr_memaddr == 0 || *num_phdr == 0))
6712 {
6713 if (is_elf64)
6714 {
6715 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6716
6717 switch (aux->a_type)
6718 {
6719 case AT_PHDR:
6720 *phdr_memaddr = aux->a_un.a_val;
6721 break;
6722 case AT_PHNUM:
6723 *num_phdr = aux->a_un.a_val;
6724 break;
6725 }
6726 }
6727 else
6728 {
6729 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6730
6731 switch (aux->a_type)
6732 {
6733 case AT_PHDR:
6734 *phdr_memaddr = aux->a_un.a_val;
6735 break;
6736 case AT_PHNUM:
6737 *num_phdr = aux->a_un.a_val;
6738 break;
6739 }
6740 }
6741 }
6742
6743 close (fd);
6744
6745 if (*phdr_memaddr == 0 || *num_phdr == 0)
6746 {
6747 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6748 "phdr_memaddr = %ld, phdr_num = %d",
6749 (long) *phdr_memaddr, *num_phdr);
6750 return 2;
6751 }
6752
6753 return 0;
6754 }
6755
6756 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6757
6758 static CORE_ADDR
6759 get_dynamic (const int pid, const int is_elf64)
6760 {
6761 CORE_ADDR phdr_memaddr, relocation;
6762 int num_phdr, i;
6763 unsigned char *phdr_buf;
6764 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6765
6766 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6767 return 0;
6768
6769 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6770 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6771
6772 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6773 return 0;
6774
6775 /* Compute relocation: it is expected to be 0 for "regular" executables,
6776 non-zero for PIE ones. */
6777 relocation = -1;
6778 for (i = 0; relocation == -1 && i < num_phdr; i++)
6779 if (is_elf64)
6780 {
6781 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6782
6783 if (p->p_type == PT_PHDR)
6784 relocation = phdr_memaddr - p->p_vaddr;
6785 }
6786 else
6787 {
6788 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6789
6790 if (p->p_type == PT_PHDR)
6791 relocation = phdr_memaddr - p->p_vaddr;
6792 }
6793
6794 if (relocation == -1)
6795 {
6796 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6797 any real world executables, including PIE executables, have always
6798 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6799 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6800 or present DT_DEBUG anyway (fpc binaries are statically linked).
6801
6802 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6803
6804 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6805
6806 return 0;
6807 }
6808
6809 for (i = 0; i < num_phdr; i++)
6810 {
6811 if (is_elf64)
6812 {
6813 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6814
6815 if (p->p_type == PT_DYNAMIC)
6816 return p->p_vaddr + relocation;
6817 }
6818 else
6819 {
6820 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6821
6822 if (p->p_type == PT_DYNAMIC)
6823 return p->p_vaddr + relocation;
6824 }
6825 }
6826
6827 return 0;
6828 }
6829
6830 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6831 can be 0 if the inferior does not yet have the library list initialized.
6832 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6833 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6834
6835 static CORE_ADDR
6836 get_r_debug (const int pid, const int is_elf64)
6837 {
6838 CORE_ADDR dynamic_memaddr;
6839 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6840 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6841 CORE_ADDR map = -1;
6842
6843 dynamic_memaddr = get_dynamic (pid, is_elf64);
6844 if (dynamic_memaddr == 0)
6845 return map;
6846
6847 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6848 {
6849 if (is_elf64)
6850 {
6851 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6852 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6853 union
6854 {
6855 Elf64_Xword map;
6856 unsigned char buf[sizeof (Elf64_Xword)];
6857 }
6858 rld_map;
6859 #endif
6860 #ifdef DT_MIPS_RLD_MAP
6861 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6862 {
6863 if (linux_read_memory (dyn->d_un.d_val,
6864 rld_map.buf, sizeof (rld_map.buf)) == 0)
6865 return rld_map.map;
6866 else
6867 break;
6868 }
6869 #endif /* DT_MIPS_RLD_MAP */
6870 #ifdef DT_MIPS_RLD_MAP_REL
6871 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6872 {
6873 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6874 rld_map.buf, sizeof (rld_map.buf)) == 0)
6875 return rld_map.map;
6876 else
6877 break;
6878 }
6879 #endif /* DT_MIPS_RLD_MAP_REL */
6880
6881 if (dyn->d_tag == DT_DEBUG && map == -1)
6882 map = dyn->d_un.d_val;
6883
6884 if (dyn->d_tag == DT_NULL)
6885 break;
6886 }
6887 else
6888 {
6889 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6890 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6891 union
6892 {
6893 Elf32_Word map;
6894 unsigned char buf[sizeof (Elf32_Word)];
6895 }
6896 rld_map;
6897 #endif
6898 #ifdef DT_MIPS_RLD_MAP
6899 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6900 {
6901 if (linux_read_memory (dyn->d_un.d_val,
6902 rld_map.buf, sizeof (rld_map.buf)) == 0)
6903 return rld_map.map;
6904 else
6905 break;
6906 }
6907 #endif /* DT_MIPS_RLD_MAP */
6908 #ifdef DT_MIPS_RLD_MAP_REL
6909 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6910 {
6911 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6912 rld_map.buf, sizeof (rld_map.buf)) == 0)
6913 return rld_map.map;
6914 else
6915 break;
6916 }
6917 #endif /* DT_MIPS_RLD_MAP_REL */
6918
6919 if (dyn->d_tag == DT_DEBUG && map == -1)
6920 map = dyn->d_un.d_val;
6921
6922 if (dyn->d_tag == DT_NULL)
6923 break;
6924 }
6925
6926 dynamic_memaddr += dyn_size;
6927 }
6928
6929 return map;
6930 }
6931
6932 /* Read one pointer from MEMADDR in the inferior. */
6933
6934 static int
6935 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6936 {
6937 int ret;
6938
6939 /* Go through a union so this works on either big or little endian
6940 hosts, when the inferior's pointer size is smaller than the size
6941 of CORE_ADDR. It is assumed the inferior's endianness is the
6942 same of the superior's. */
6943 union
6944 {
6945 CORE_ADDR core_addr;
6946 unsigned int ui;
6947 unsigned char uc;
6948 } addr;
6949
6950 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6951 if (ret == 0)
6952 {
6953 if (ptr_size == sizeof (CORE_ADDR))
6954 *ptr = addr.core_addr;
6955 else if (ptr_size == sizeof (unsigned int))
6956 *ptr = addr.ui;
6957 else
6958 gdb_assert_not_reached ("unhandled pointer size");
6959 }
6960 return ret;
6961 }
6962
6963 struct link_map_offsets
6964 {
6965 /* Offset and size of r_debug.r_version. */
6966 int r_version_offset;
6967
6968 /* Offset and size of r_debug.r_map. */
6969 int r_map_offset;
6970
6971 /* Offset to l_addr field in struct link_map. */
6972 int l_addr_offset;
6973
6974 /* Offset to l_name field in struct link_map. */
6975 int l_name_offset;
6976
6977 /* Offset to l_ld field in struct link_map. */
6978 int l_ld_offset;
6979
6980 /* Offset to l_next field in struct link_map. */
6981 int l_next_offset;
6982
6983 /* Offset to l_prev field in struct link_map. */
6984 int l_prev_offset;
6985 };
6986
6987 /* Construct qXfer:libraries-svr4:read reply. */
6988
6989 static int
6990 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6991 unsigned const char *writebuf,
6992 CORE_ADDR offset, int len)
6993 {
6994 struct process_info_private *const priv = current_process ()->priv;
6995 char filename[PATH_MAX];
6996 int pid, is_elf64;
6997
6998 static const struct link_map_offsets lmo_32bit_offsets =
6999 {
7000 0, /* r_version offset. */
7001 4, /* r_debug.r_map offset. */
7002 0, /* l_addr offset in link_map. */
7003 4, /* l_name offset in link_map. */
7004 8, /* l_ld offset in link_map. */
7005 12, /* l_next offset in link_map. */
7006 16 /* l_prev offset in link_map. */
7007 };
7008
7009 static const struct link_map_offsets lmo_64bit_offsets =
7010 {
7011 0, /* r_version offset. */
7012 8, /* r_debug.r_map offset. */
7013 0, /* l_addr offset in link_map. */
7014 8, /* l_name offset in link_map. */
7015 16, /* l_ld offset in link_map. */
7016 24, /* l_next offset in link_map. */
7017 32 /* l_prev offset in link_map. */
7018 };
7019 const struct link_map_offsets *lmo;
7020 unsigned int machine;
7021 int ptr_size;
7022 CORE_ADDR lm_addr = 0, lm_prev = 0;
7023 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7024 int header_done = 0;
7025
7026 if (writebuf != NULL)
7027 return -2;
7028 if (readbuf == NULL)
7029 return -1;
7030
7031 pid = lwpid_of (current_thread);
7032 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7033 is_elf64 = elf_64_file_p (filename, &machine);
7034 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7035 ptr_size = is_elf64 ? 8 : 4;
7036
7037 while (annex[0] != '\0')
7038 {
7039 const char *sep;
7040 CORE_ADDR *addrp;
7041 int name_len;
7042
7043 sep = strchr (annex, '=');
7044 if (sep == NULL)
7045 break;
7046
7047 name_len = sep - annex;
7048 if (name_len == 5 && startswith (annex, "start"))
7049 addrp = &lm_addr;
7050 else if (name_len == 4 && startswith (annex, "prev"))
7051 addrp = &lm_prev;
7052 else
7053 {
7054 annex = strchr (sep, ';');
7055 if (annex == NULL)
7056 break;
7057 annex++;
7058 continue;
7059 }
7060
7061 annex = decode_address_to_semicolon (addrp, sep + 1);
7062 }
7063
7064 if (lm_addr == 0)
7065 {
7066 int r_version = 0;
7067
7068 if (priv->r_debug == 0)
7069 priv->r_debug = get_r_debug (pid, is_elf64);
7070
7071 /* We failed to find DT_DEBUG. Such situation will not change
7072 for this inferior - do not retry it. Report it to GDB as
7073 E01, see for the reasons at the GDB solib-svr4.c side. */
7074 if (priv->r_debug == (CORE_ADDR) -1)
7075 return -1;
7076
7077 if (priv->r_debug != 0)
7078 {
7079 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7080 (unsigned char *) &r_version,
7081 sizeof (r_version)) != 0
7082 || r_version != 1)
7083 {
7084 warning ("unexpected r_debug version %d", r_version);
7085 }
7086 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7087 &lm_addr, ptr_size) != 0)
7088 {
7089 warning ("unable to read r_map from 0x%lx",
7090 (long) priv->r_debug + lmo->r_map_offset);
7091 }
7092 }
7093 }
7094
7095 std::string document = "<library-list-svr4 version=\"1.0\"";
7096
7097 while (lm_addr
7098 && read_one_ptr (lm_addr + lmo->l_name_offset,
7099 &l_name, ptr_size) == 0
7100 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7101 &l_addr, ptr_size) == 0
7102 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7103 &l_ld, ptr_size) == 0
7104 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7105 &l_prev, ptr_size) == 0
7106 && read_one_ptr (lm_addr + lmo->l_next_offset,
7107 &l_next, ptr_size) == 0)
7108 {
7109 unsigned char libname[PATH_MAX];
7110
7111 if (lm_prev != l_prev)
7112 {
7113 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7114 (long) lm_prev, (long) l_prev);
7115 break;
7116 }
7117
7118 /* Ignore the first entry even if it has valid name as the first entry
7119 corresponds to the main executable. The first entry should not be
7120 skipped if the dynamic loader was loaded late by a static executable
7121 (see solib-svr4.c parameter ignore_first). But in such case the main
7122 executable does not have PT_DYNAMIC present and this function already
7123 exited above due to failed get_r_debug. */
7124 if (lm_prev == 0)
7125 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7126 else
7127 {
7128 /* Not checking for error because reading may stop before
7129 we've got PATH_MAX worth of characters. */
7130 libname[0] = '\0';
7131 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7132 libname[sizeof (libname) - 1] = '\0';
7133 if (libname[0] != '\0')
7134 {
7135 if (!header_done)
7136 {
7137 /* Terminate `<library-list-svr4'. */
7138 document += '>';
7139 header_done = 1;
7140 }
7141
7142 string_appendf (document, "<library name=\"");
7143 xml_escape_text_append (&document, (char *) libname);
7144 string_appendf (document, "\" lm=\"0x%lx\" "
7145 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7146 (unsigned long) lm_addr, (unsigned long) l_addr,
7147 (unsigned long) l_ld);
7148 }
7149 }
7150
7151 lm_prev = lm_addr;
7152 lm_addr = l_next;
7153 }
7154
7155 if (!header_done)
7156 {
7157 /* Empty list; terminate `<library-list-svr4'. */
7158 document += "/>";
7159 }
7160 else
7161 document += "</library-list-svr4>";
7162
7163 int document_len = document.length ();
7164 if (offset < document_len)
7165 document_len -= offset;
7166 else
7167 document_len = 0;
7168 if (len > document_len)
7169 len = document_len;
7170
7171 memcpy (readbuf, document.data () + offset, len);
7172
7173 return len;
7174 }
7175
7176 #ifdef HAVE_LINUX_BTRACE
7177
7178 /* See to_disable_btrace target method. */
7179
7180 static int
7181 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7182 {
7183 enum btrace_error err;
7184
7185 err = linux_disable_btrace (tinfo);
7186 return (err == BTRACE_ERR_NONE ? 0 : -1);
7187 }
7188
7189 /* Encode an Intel Processor Trace configuration. */
7190
7191 static void
7192 linux_low_encode_pt_config (struct buffer *buffer,
7193 const struct btrace_data_pt_config *config)
7194 {
7195 buffer_grow_str (buffer, "<pt-config>\n");
7196
7197 switch (config->cpu.vendor)
7198 {
7199 case CV_INTEL:
7200 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7201 "model=\"%u\" stepping=\"%u\"/>\n",
7202 config->cpu.family, config->cpu.model,
7203 config->cpu.stepping);
7204 break;
7205
7206 default:
7207 break;
7208 }
7209
7210 buffer_grow_str (buffer, "</pt-config>\n");
7211 }
7212
7213 /* Encode a raw buffer. */
7214
7215 static void
7216 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7217 unsigned int size)
7218 {
7219 if (size == 0)
7220 return;
7221
7222 /* We use hex encoding - see common/rsp-low.h. */
7223 buffer_grow_str (buffer, "<raw>\n");
7224
7225 while (size-- > 0)
7226 {
7227 char elem[2];
7228
7229 elem[0] = tohex ((*data >> 4) & 0xf);
7230 elem[1] = tohex (*data++ & 0xf);
7231
7232 buffer_grow (buffer, elem, 2);
7233 }
7234
7235 buffer_grow_str (buffer, "</raw>\n");
7236 }
7237
7238 /* See to_read_btrace target method. */
7239
7240 static int
7241 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7242 enum btrace_read_type type)
7243 {
7244 struct btrace_data btrace;
7245 struct btrace_block *block;
7246 enum btrace_error err;
7247 int i;
7248
7249 err = linux_read_btrace (&btrace, tinfo, type);
7250 if (err != BTRACE_ERR_NONE)
7251 {
7252 if (err == BTRACE_ERR_OVERFLOW)
7253 buffer_grow_str0 (buffer, "E.Overflow.");
7254 else
7255 buffer_grow_str0 (buffer, "E.Generic Error.");
7256
7257 return -1;
7258 }
7259
7260 switch (btrace.format)
7261 {
7262 case BTRACE_FORMAT_NONE:
7263 buffer_grow_str0 (buffer, "E.No Trace.");
7264 return -1;
7265
7266 case BTRACE_FORMAT_BTS:
7267 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7268 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7269
7270 for (i = 0;
7271 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7272 i++)
7273 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7274 paddress (block->begin), paddress (block->end));
7275
7276 buffer_grow_str0 (buffer, "</btrace>\n");
7277 break;
7278
7279 case BTRACE_FORMAT_PT:
7280 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7281 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7282 buffer_grow_str (buffer, "<pt>\n");
7283
7284 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7285
7286 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7287 btrace.variant.pt.size);
7288
7289 buffer_grow_str (buffer, "</pt>\n");
7290 buffer_grow_str0 (buffer, "</btrace>\n");
7291 break;
7292
7293 default:
7294 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7295 return -1;
7296 }
7297
7298 return 0;
7299 }
7300
7301 /* See to_btrace_conf target method. */
7302
7303 static int
7304 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7305 struct buffer *buffer)
7306 {
7307 const struct btrace_config *conf;
7308
7309 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7310 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7311
7312 conf = linux_btrace_conf (tinfo);
7313 if (conf != NULL)
7314 {
7315 switch (conf->format)
7316 {
7317 case BTRACE_FORMAT_NONE:
7318 break;
7319
7320 case BTRACE_FORMAT_BTS:
7321 buffer_xml_printf (buffer, "<bts");
7322 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7323 buffer_xml_printf (buffer, " />\n");
7324 break;
7325
7326 case BTRACE_FORMAT_PT:
7327 buffer_xml_printf (buffer, "<pt");
7328 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7329 buffer_xml_printf (buffer, "/>\n");
7330 break;
7331 }
7332 }
7333
7334 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7335 return 0;
7336 }
7337 #endif /* HAVE_LINUX_BTRACE */
7338
7339 /* See nat/linux-nat.h. */
7340
7341 ptid_t
7342 current_lwp_ptid (void)
7343 {
7344 return ptid_of (current_thread);
7345 }
7346
7347 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7348
7349 static int
7350 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7351 {
7352 if (the_low_target.breakpoint_kind_from_pc != NULL)
7353 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7354 else
7355 return default_breakpoint_kind_from_pc (pcptr);
7356 }
7357
7358 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7359
7360 static const gdb_byte *
7361 linux_sw_breakpoint_from_kind (int kind, int *size)
7362 {
7363 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7364
7365 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7366 }
7367
7368 /* Implementation of the target_ops method
7369 "breakpoint_kind_from_current_state". */
7370
7371 static int
7372 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7373 {
7374 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7375 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7376 else
7377 return linux_breakpoint_kind_from_pc (pcptr);
7378 }
7379
7380 /* Default implementation of linux_target_ops method "set_pc" for
7381 32-bit pc register which is literally named "pc". */
7382
7383 void
7384 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7385 {
7386 uint32_t newpc = pc;
7387
7388 supply_register_by_name (regcache, "pc", &newpc);
7389 }
7390
7391 /* Default implementation of linux_target_ops method "get_pc" for
7392 32-bit pc register which is literally named "pc". */
7393
7394 CORE_ADDR
7395 linux_get_pc_32bit (struct regcache *regcache)
7396 {
7397 uint32_t pc;
7398
7399 collect_register_by_name (regcache, "pc", &pc);
7400 if (debug_threads)
7401 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7402 return pc;
7403 }
7404
7405 /* Default implementation of linux_target_ops method "set_pc" for
7406 64-bit pc register which is literally named "pc". */
7407
7408 void
7409 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7410 {
7411 uint64_t newpc = pc;
7412
7413 supply_register_by_name (regcache, "pc", &newpc);
7414 }
7415
7416 /* Default implementation of linux_target_ops method "get_pc" for
7417 64-bit pc register which is literally named "pc". */
7418
7419 CORE_ADDR
7420 linux_get_pc_64bit (struct regcache *regcache)
7421 {
7422 uint64_t pc;
7423
7424 collect_register_by_name (regcache, "pc", &pc);
7425 if (debug_threads)
7426 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7427 return pc;
7428 }
7429
7430 /* See linux-low.h. */
7431
7432 int
7433 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7434 {
7435 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7436 int offset = 0;
7437
7438 gdb_assert (wordsize == 4 || wordsize == 8);
7439
7440 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
7441 {
7442 if (wordsize == 4)
7443 {
7444 uint32_t *data_p = (uint32_t *) data;
7445 if (data_p[0] == match)
7446 {
7447 *valp = data_p[1];
7448 return 1;
7449 }
7450 }
7451 else
7452 {
7453 uint64_t *data_p = (uint64_t *) data;
7454 if (data_p[0] == match)
7455 {
7456 *valp = data_p[1];
7457 return 1;
7458 }
7459 }
7460
7461 offset += 2 * wordsize;
7462 }
7463
7464 return 0;
7465 }
7466
7467 /* See linux-low.h. */
7468
7469 CORE_ADDR
7470 linux_get_hwcap (int wordsize)
7471 {
7472 CORE_ADDR hwcap = 0;
7473 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7474 return hwcap;
7475 }
7476
7477 /* See linux-low.h. */
7478
7479 CORE_ADDR
7480 linux_get_hwcap2 (int wordsize)
7481 {
7482 CORE_ADDR hwcap2 = 0;
7483 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7484 return hwcap2;
7485 }
7486
7487 static struct target_ops linux_target_ops = {
7488 linux_create_inferior,
7489 linux_post_create_inferior,
7490 linux_attach,
7491 linux_kill,
7492 linux_detach,
7493 linux_mourn,
7494 linux_join,
7495 linux_thread_alive,
7496 linux_resume,
7497 linux_wait,
7498 linux_fetch_registers,
7499 linux_store_registers,
7500 linux_prepare_to_access_memory,
7501 linux_done_accessing_memory,
7502 linux_read_memory,
7503 linux_write_memory,
7504 linux_look_up_symbols,
7505 linux_request_interrupt,
7506 linux_read_auxv,
7507 linux_supports_z_point_type,
7508 linux_insert_point,
7509 linux_remove_point,
7510 linux_stopped_by_sw_breakpoint,
7511 linux_supports_stopped_by_sw_breakpoint,
7512 linux_stopped_by_hw_breakpoint,
7513 linux_supports_stopped_by_hw_breakpoint,
7514 linux_supports_hardware_single_step,
7515 linux_stopped_by_watchpoint,
7516 linux_stopped_data_address,
7517 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7518 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7519 && defined(PT_TEXT_END_ADDR)
7520 linux_read_offsets,
7521 #else
7522 NULL,
7523 #endif
7524 #ifdef USE_THREAD_DB
7525 thread_db_get_tls_address,
7526 #else
7527 NULL,
7528 #endif
7529 linux_qxfer_spu,
7530 hostio_last_error_from_errno,
7531 linux_qxfer_osdata,
7532 linux_xfer_siginfo,
7533 linux_supports_non_stop,
7534 linux_async,
7535 linux_start_non_stop,
7536 linux_supports_multi_process,
7537 linux_supports_fork_events,
7538 linux_supports_vfork_events,
7539 linux_supports_exec_events,
7540 linux_handle_new_gdb_connection,
7541 #ifdef USE_THREAD_DB
7542 thread_db_handle_monitor_command,
7543 #else
7544 NULL,
7545 #endif
7546 linux_common_core_of_thread,
7547 linux_read_loadmap,
7548 linux_process_qsupported,
7549 linux_supports_tracepoints,
7550 linux_read_pc,
7551 linux_write_pc,
7552 linux_thread_stopped,
7553 NULL,
7554 linux_pause_all,
7555 linux_unpause_all,
7556 linux_stabilize_threads,
7557 linux_install_fast_tracepoint_jump_pad,
7558 linux_emit_ops,
7559 linux_supports_disable_randomization,
7560 linux_get_min_fast_tracepoint_insn_len,
7561 linux_qxfer_libraries_svr4,
7562 linux_supports_agent,
7563 #ifdef HAVE_LINUX_BTRACE
7564 linux_enable_btrace,
7565 linux_low_disable_btrace,
7566 linux_low_read_btrace,
7567 linux_low_btrace_conf,
7568 #else
7569 NULL,
7570 NULL,
7571 NULL,
7572 NULL,
7573 #endif
7574 linux_supports_range_stepping,
7575 linux_proc_pid_to_exec_file,
7576 linux_mntns_open_cloexec,
7577 linux_mntns_unlink,
7578 linux_mntns_readlink,
7579 linux_breakpoint_kind_from_pc,
7580 linux_sw_breakpoint_from_kind,
7581 linux_proc_tid_get_name,
7582 linux_breakpoint_kind_from_current_state,
7583 linux_supports_software_single_step,
7584 linux_supports_catch_syscall,
7585 linux_get_ipa_tdesc_idx,
7586 #if USE_THREAD_DB
7587 thread_db_thread_handle,
7588 #else
7589 NULL,
7590 #endif
7591 };
7592
7593 #ifdef HAVE_LINUX_REGSETS
7594 void
7595 initialize_regsets_info (struct regsets_info *info)
7596 {
7597 for (info->num_regsets = 0;
7598 info->regsets[info->num_regsets].size >= 0;
7599 info->num_regsets++)
7600 ;
7601 }
7602 #endif
7603
7604 void
7605 initialize_low (void)
7606 {
7607 struct sigaction sigchld_action;
7608
7609 memset (&sigchld_action, 0, sizeof (sigchld_action));
7610 set_target_ops (&linux_target_ops);
7611
7612 linux_ptrace_init_warnings ();
7613 linux_proc_init_warnings ();
7614
7615 sigchld_action.sa_handler = sigchld_handler;
7616 sigemptyset (&sigchld_action.sa_mask);
7617 sigchld_action.sa_flags = SA_RESTART;
7618 sigaction (SIGCHLD, &sigchld_action, NULL);
7619
7620 initialize_low_arch ();
7621
7622 linux_check_ptrace_features ();
7623 }
This page took 0.234905 seconds and 5 git commands to generate.