AArch64: gdbserver: read pauth registers
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2019 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "common/agent.h"
23 #include "tdesc.h"
24 #include "common/rsp-low.h"
25 #include "common/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "common/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "common/filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common/common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "common/environ.h"
53 #include "common/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef SPUFS_MAGIC
64 #define SPUFS_MAGIC 0x23c9b64e
65 #endif
66
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
71 # endif
72 #endif
73
74 #ifndef O_LARGEFILE
75 #define O_LARGEFILE 0
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #ifdef HAVE_LINUX_BTRACE
103 # include "nat/linux-btrace.h"
104 # include "common/btrace-common.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* Does the current host support PTRACE_GETREGSET? */
138 int have_ptrace_getregset = -1;
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 int
186 lwp_is_stepping (struct lwp_info *lwp)
187 {
188 return lwp->stepping;
189 }
190
191 /* A list of all unknown processes which receive stop signals. Some
192 other process will presumably claim each of these as forked
193 children momentarily. */
194
195 struct simple_pid_list
196 {
197 /* The process ID. */
198 int pid;
199
200 /* The status as reported by waitpid. */
201 int status;
202
203 /* Next in chain. */
204 struct simple_pid_list *next;
205 };
206 struct simple_pid_list *stopped_pids;
207
208 /* Trivial list manipulation functions to keep track of a list of new
209 stopped processes. */
210
211 static void
212 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
213 {
214 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
215
216 new_pid->pid = pid;
217 new_pid->status = status;
218 new_pid->next = *listp;
219 *listp = new_pid;
220 }
221
222 static int
223 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
224 {
225 struct simple_pid_list **p;
226
227 for (p = listp; *p != NULL; p = &(*p)->next)
228 if ((*p)->pid == pid)
229 {
230 struct simple_pid_list *next = (*p)->next;
231
232 *statusp = (*p)->status;
233 xfree (*p);
234 *p = next;
235 return 1;
236 }
237 return 0;
238 }
239
240 enum stopping_threads_kind
241 {
242 /* Not stopping threads presently. */
243 NOT_STOPPING_THREADS,
244
245 /* Stopping threads. */
246 STOPPING_THREADS,
247
248 /* Stopping and suspending threads. */
249 STOPPING_AND_SUSPENDING_THREADS
250 };
251
252 /* This is set while stop_all_lwps is in effect. */
253 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
254
255 /* FIXME make into a target method? */
256 int using_threads = 1;
257
258 /* True if we're presently stabilizing threads (moving them out of
259 jump pads). */
260 static int stabilizing_threads;
261
262 static void linux_resume_one_lwp (struct lwp_info *lwp,
263 int step, int signal, siginfo_t *info);
264 static void linux_resume (struct thread_resume *resume_info, size_t n);
265 static void stop_all_lwps (int suspend, struct lwp_info *except);
266 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
267 static void unsuspend_all_lwps (struct lwp_info *except);
268 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
269 int *wstat, int options);
270 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static void linux_mourn (struct process_info *process);
273 static int linux_stopped_by_watchpoint (void);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static void proceed_all_lwps (void);
277 static int finish_step_over (struct lwp_info *lwp);
278 static int kill_lwp (unsigned long lwpid, int signo);
279 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
280 static void complete_ongoing_step_over (void);
281 static int linux_low_ptrace_options (int attached);
282 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
283 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
284
285 /* When the event-loop is doing a step-over, this points at the thread
286 being stepped. */
287 ptid_t step_over_bkpt;
288
289 /* True if the low target can hardware single-step. */
290
291 static int
292 can_hardware_single_step (void)
293 {
294 if (the_low_target.supports_hardware_single_step != NULL)
295 return the_low_target.supports_hardware_single_step ();
296 else
297 return 0;
298 }
299
300 /* True if the low target can software single-step. Such targets
301 implement the GET_NEXT_PCS callback. */
302
303 static int
304 can_software_single_step (void)
305 {
306 return (the_low_target.get_next_pcs != NULL);
307 }
308
309 /* True if the low target supports memory breakpoints. If so, we'll
310 have a GET_PC implementation. */
311
312 static int
313 supports_breakpoints (void)
314 {
315 return (the_low_target.get_pc != NULL);
316 }
317
318 /* Returns true if this target can support fast tracepoints. This
319 does not mean that the in-process agent has been loaded in the
320 inferior. */
321
322 static int
323 supports_fast_tracepoints (void)
324 {
325 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
326 }
327
328 /* True if LWP is stopped in its stepping range. */
329
330 static int
331 lwp_in_step_range (struct lwp_info *lwp)
332 {
333 CORE_ADDR pc = lwp->stop_pc;
334
335 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
336 }
337
338 struct pending_signals
339 {
340 int signal;
341 siginfo_t info;
342 struct pending_signals *prev;
343 };
344
345 /* The read/write ends of the pipe registered as waitable file in the
346 event loop. */
347 static int linux_event_pipe[2] = { -1, -1 };
348
349 /* True if we're currently in async mode. */
350 #define target_is_async_p() (linux_event_pipe[0] != -1)
351
352 static void send_sigstop (struct lwp_info *lwp);
353 static void wait_for_sigstop (void);
354
355 /* Return non-zero if HEADER is a 64-bit ELF file. */
356
357 static int
358 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
359 {
360 if (header->e_ident[EI_MAG0] == ELFMAG0
361 && header->e_ident[EI_MAG1] == ELFMAG1
362 && header->e_ident[EI_MAG2] == ELFMAG2
363 && header->e_ident[EI_MAG3] == ELFMAG3)
364 {
365 *machine = header->e_machine;
366 return header->e_ident[EI_CLASS] == ELFCLASS64;
367
368 }
369 *machine = EM_NONE;
370 return -1;
371 }
372
373 /* Return non-zero if FILE is a 64-bit ELF file,
374 zero if the file is not a 64-bit ELF file,
375 and -1 if the file is not accessible or doesn't exist. */
376
377 static int
378 elf_64_file_p (const char *file, unsigned int *machine)
379 {
380 Elf64_Ehdr header;
381 int fd;
382
383 fd = open (file, O_RDONLY);
384 if (fd < 0)
385 return -1;
386
387 if (read (fd, &header, sizeof (header)) != sizeof (header))
388 {
389 close (fd);
390 return 0;
391 }
392 close (fd);
393
394 return elf_64_header_p (&header, machine);
395 }
396
397 /* Accepts an integer PID; Returns true if the executable PID is
398 running is a 64-bit ELF file.. */
399
400 int
401 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
402 {
403 char file[PATH_MAX];
404
405 sprintf (file, "/proc/%d/exe", pid);
406 return elf_64_file_p (file, machine);
407 }
408
409 static void
410 delete_lwp (struct lwp_info *lwp)
411 {
412 struct thread_info *thr = get_lwp_thread (lwp);
413
414 if (debug_threads)
415 debug_printf ("deleting %ld\n", lwpid_of (thr));
416
417 remove_thread (thr);
418
419 if (the_low_target.delete_thread != NULL)
420 the_low_target.delete_thread (lwp->arch_private);
421 else
422 gdb_assert (lwp->arch_private == NULL);
423
424 free (lwp);
425 }
426
427 /* Add a process to the common process list, and set its private
428 data. */
429
430 static struct process_info *
431 linux_add_process (int pid, int attached)
432 {
433 struct process_info *proc;
434
435 proc = add_process (pid, attached);
436 proc->priv = XCNEW (struct process_info_private);
437
438 if (the_low_target.new_process != NULL)
439 proc->priv->arch_private = the_low_target.new_process ();
440
441 return proc;
442 }
443
444 static CORE_ADDR get_pc (struct lwp_info *lwp);
445
446 /* Call the target arch_setup function on the current thread. */
447
448 static void
449 linux_arch_setup (void)
450 {
451 the_low_target.arch_setup ();
452 }
453
454 /* Call the target arch_setup function on THREAD. */
455
456 static void
457 linux_arch_setup_thread (struct thread_info *thread)
458 {
459 struct thread_info *saved_thread;
460
461 saved_thread = current_thread;
462 current_thread = thread;
463
464 linux_arch_setup ();
465
466 current_thread = saved_thread;
467 }
468
469 /* Handle a GNU/Linux extended wait response. If we see a clone,
470 fork, or vfork event, we need to add the new LWP to our list
471 (and return 0 so as not to report the trap to higher layers).
472 If we see an exec event, we will modify ORIG_EVENT_LWP to point
473 to a new LWP representing the new program. */
474
475 static int
476 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
477 {
478 client_state &cs = get_client_state ();
479 struct lwp_info *event_lwp = *orig_event_lwp;
480 int event = linux_ptrace_get_extended_event (wstat);
481 struct thread_info *event_thr = get_lwp_thread (event_lwp);
482 struct lwp_info *new_lwp;
483
484 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
485
486 /* All extended events we currently use are mid-syscall. Only
487 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
488 you have to be using PTRACE_SEIZE to get that. */
489 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
490
491 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
492 || (event == PTRACE_EVENT_CLONE))
493 {
494 ptid_t ptid;
495 unsigned long new_pid;
496 int ret, status;
497
498 /* Get the pid of the new lwp. */
499 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
500 &new_pid);
501
502 /* If we haven't already seen the new PID stop, wait for it now. */
503 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
504 {
505 /* The new child has a pending SIGSTOP. We can't affect it until it
506 hits the SIGSTOP, but we're already attached. */
507
508 ret = my_waitpid (new_pid, &status, __WALL);
509
510 if (ret == -1)
511 perror_with_name ("waiting for new child");
512 else if (ret != new_pid)
513 warning ("wait returned unexpected PID %d", ret);
514 else if (!WIFSTOPPED (status))
515 warning ("wait returned unexpected status 0x%x", status);
516 }
517
518 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
519 {
520 struct process_info *parent_proc;
521 struct process_info *child_proc;
522 struct lwp_info *child_lwp;
523 struct thread_info *child_thr;
524 struct target_desc *tdesc;
525
526 ptid = ptid_t (new_pid, new_pid, 0);
527
528 if (debug_threads)
529 {
530 debug_printf ("HEW: Got fork event from LWP %ld, "
531 "new child is %d\n",
532 ptid_of (event_thr).lwp (),
533 ptid.pid ());
534 }
535
536 /* Add the new process to the tables and clone the breakpoint
537 lists of the parent. We need to do this even if the new process
538 will be detached, since we will need the process object and the
539 breakpoints to remove any breakpoints from memory when we
540 detach, and the client side will access registers. */
541 child_proc = linux_add_process (new_pid, 0);
542 gdb_assert (child_proc != NULL);
543 child_lwp = add_lwp (ptid);
544 gdb_assert (child_lwp != NULL);
545 child_lwp->stopped = 1;
546 child_lwp->must_set_ptrace_flags = 1;
547 child_lwp->status_pending_p = 0;
548 child_thr = get_lwp_thread (child_lwp);
549 child_thr->last_resume_kind = resume_stop;
550 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
551
552 /* If we're suspending all threads, leave this one suspended
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp->bp_reinsert != 0)
558 {
559 if (debug_threads)
560 debug_printf ("HEW: leaving child suspended\n");
561 child_lwp->suspended = 1;
562 }
563
564 parent_proc = get_thread_process (event_thr);
565 child_proc->attached = parent_proc->attached;
566
567 if (event_lwp->bp_reinsert != 0
568 && can_software_single_step ()
569 && event == PTRACE_EVENT_VFORK)
570 {
571 /* If we leave single-step breakpoints there, child will
572 hit it, so uninsert single-step breakpoints from parent
573 (and child). Once vfork child is done, reinsert
574 them back to parent. */
575 uninsert_single_step_breakpoints (event_thr);
576 }
577
578 clone_all_breakpoints (child_thr, event_thr);
579
580 tdesc = allocate_target_description ();
581 copy_target_description (tdesc, parent_proc->tdesc);
582 child_proc->tdesc = tdesc;
583
584 /* Clone arch-specific process data. */
585 if (the_low_target.new_fork != NULL)
586 the_low_target.new_fork (parent_proc, child_proc);
587
588 /* Save fork info in the parent thread. */
589 if (event == PTRACE_EVENT_FORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
591 else if (event == PTRACE_EVENT_VFORK)
592 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
593
594 event_lwp->waitstatus.value.related_pid = ptid;
595
596 /* The status_pending field contains bits denoting the
597 extended event, so when the pending event is handled,
598 the handler will look at lwp->waitstatus. */
599 event_lwp->status_pending_p = 1;
600 event_lwp->status_pending = wstat;
601
602 /* Link the threads until the parent event is passed on to
603 higher layers. */
604 event_lwp->fork_relative = child_lwp;
605 child_lwp->fork_relative = event_lwp;
606
607 /* If the parent thread is doing step-over with single-step
608 breakpoints, the list of single-step breakpoints are cloned
609 from the parent's. Remove them from the child process.
610 In case of vfork, we'll reinsert them back once vforked
611 child is done. */
612 if (event_lwp->bp_reinsert != 0
613 && can_software_single_step ())
614 {
615 /* The child process is forked and stopped, so it is safe
616 to access its memory without stopping all other threads
617 from other processes. */
618 delete_single_step_breakpoints (child_thr);
619
620 gdb_assert (has_single_step_breakpoints (event_thr));
621 gdb_assert (!has_single_step_breakpoints (child_thr));
622 }
623
624 /* Report the event. */
625 return 0;
626 }
627
628 if (debug_threads)
629 debug_printf ("HEW: Got clone event "
630 "from LWP %ld, new child is LWP %ld\n",
631 lwpid_of (event_thr), new_pid);
632
633 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
634 new_lwp = add_lwp (ptid);
635
636 /* Either we're going to immediately resume the new thread
637 or leave it stopped. linux_resume_one_lwp is a nop if it
638 thinks the thread is currently running, so set this first
639 before calling linux_resume_one_lwp. */
640 new_lwp->stopped = 1;
641
642 /* If we're suspending all threads, leave this one suspended
643 too. If the fork/clone parent is stepping over a breakpoint,
644 all other threads have been suspended already. Leave the
645 child suspended too. */
646 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
647 || event_lwp->bp_reinsert != 0)
648 new_lwp->suspended = 1;
649
650 /* Normally we will get the pending SIGSTOP. But in some cases
651 we might get another signal delivered to the group first.
652 If we do get another signal, be sure not to lose it. */
653 if (WSTOPSIG (status) != SIGSTOP)
654 {
655 new_lwp->stop_expected = 1;
656 new_lwp->status_pending_p = 1;
657 new_lwp->status_pending = status;
658 }
659 else if (cs.report_thread_events)
660 {
661 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
662 new_lwp->status_pending_p = 1;
663 new_lwp->status_pending = status;
664 }
665
666 #ifdef USE_THREAD_DB
667 thread_db_notice_clone (event_thr, ptid);
668 #endif
669
670 /* Don't report the event. */
671 return 1;
672 }
673 else if (event == PTRACE_EVENT_VFORK_DONE)
674 {
675 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
676
677 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
678 {
679 reinsert_single_step_breakpoints (event_thr);
680
681 gdb_assert (has_single_step_breakpoints (event_thr));
682 }
683
684 /* Report the event. */
685 return 0;
686 }
687 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
688 {
689 struct process_info *proc;
690 std::vector<int> syscalls_to_catch;
691 ptid_t event_ptid;
692 pid_t event_pid;
693
694 if (debug_threads)
695 {
696 debug_printf ("HEW: Got exec event from LWP %ld\n",
697 lwpid_of (event_thr));
698 }
699
700 /* Get the event ptid. */
701 event_ptid = ptid_of (event_thr);
702 event_pid = event_ptid.pid ();
703
704 /* Save the syscall list from the execing process. */
705 proc = get_thread_process (event_thr);
706 syscalls_to_catch = std::move (proc->syscalls_to_catch);
707
708 /* Delete the execing process and all its threads. */
709 linux_mourn (proc);
710 current_thread = NULL;
711
712 /* Create a new process/lwp/thread. */
713 proc = linux_add_process (event_pid, 0);
714 event_lwp = add_lwp (event_ptid);
715 event_thr = get_lwp_thread (event_lwp);
716 gdb_assert (current_thread == event_thr);
717 linux_arch_setup_thread (event_thr);
718
719 /* Set the event status. */
720 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
721 event_lwp->waitstatus.value.execd_pathname
722 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
723
724 /* Mark the exec status as pending. */
725 event_lwp->stopped = 1;
726 event_lwp->status_pending_p = 1;
727 event_lwp->status_pending = wstat;
728 event_thr->last_resume_kind = resume_continue;
729 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
730
731 /* Update syscall state in the new lwp, effectively mid-syscall too. */
732 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
733
734 /* Restore the list to catch. Don't rely on the client, which is free
735 to avoid sending a new list when the architecture doesn't change.
736 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
737 proc->syscalls_to_catch = std::move (syscalls_to_catch);
738
739 /* Report the event. */
740 *orig_event_lwp = event_lwp;
741 return 0;
742 }
743
744 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
745 }
746
747 /* Return the PC as read from the regcache of LWP, without any
748 adjustment. */
749
750 static CORE_ADDR
751 get_pc (struct lwp_info *lwp)
752 {
753 struct thread_info *saved_thread;
754 struct regcache *regcache;
755 CORE_ADDR pc;
756
757 if (the_low_target.get_pc == NULL)
758 return 0;
759
760 saved_thread = current_thread;
761 current_thread = get_lwp_thread (lwp);
762
763 regcache = get_thread_regcache (current_thread, 1);
764 pc = (*the_low_target.get_pc) (regcache);
765
766 if (debug_threads)
767 debug_printf ("pc is 0x%lx\n", (long) pc);
768
769 current_thread = saved_thread;
770 return pc;
771 }
772
773 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
774 Fill *SYSNO with the syscall nr trapped. */
775
776 static void
777 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
778 {
779 struct thread_info *saved_thread;
780 struct regcache *regcache;
781
782 if (the_low_target.get_syscall_trapinfo == NULL)
783 {
784 /* If we cannot get the syscall trapinfo, report an unknown
785 system call number. */
786 *sysno = UNKNOWN_SYSCALL;
787 return;
788 }
789
790 saved_thread = current_thread;
791 current_thread = get_lwp_thread (lwp);
792
793 regcache = get_thread_regcache (current_thread, 1);
794 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
795
796 if (debug_threads)
797 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
798
799 current_thread = saved_thread;
800 }
801
802 static int check_stopped_by_watchpoint (struct lwp_info *child);
803
804 /* Called when the LWP stopped for a signal/trap. If it stopped for a
805 trap check what caused it (breakpoint, watchpoint, trace, etc.),
806 and save the result in the LWP's stop_reason field. If it stopped
807 for a breakpoint, decrement the PC if necessary on the lwp's
808 architecture. Returns true if we now have the LWP's stop PC. */
809
810 static int
811 save_stop_reason (struct lwp_info *lwp)
812 {
813 CORE_ADDR pc;
814 CORE_ADDR sw_breakpoint_pc;
815 struct thread_info *saved_thread;
816 #if USE_SIGTRAP_SIGINFO
817 siginfo_t siginfo;
818 #endif
819
820 if (the_low_target.get_pc == NULL)
821 return 0;
822
823 pc = get_pc (lwp);
824 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
825
826 /* breakpoint_at reads from the current thread. */
827 saved_thread = current_thread;
828 current_thread = get_lwp_thread (lwp);
829
830 #if USE_SIGTRAP_SIGINFO
831 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
832 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
833 {
834 if (siginfo.si_signo == SIGTRAP)
835 {
836 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
837 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
838 {
839 /* The si_code is ambiguous on this arch -- check debug
840 registers. */
841 if (!check_stopped_by_watchpoint (lwp))
842 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
843 }
844 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
845 {
846 /* If we determine the LWP stopped for a SW breakpoint,
847 trust it. Particularly don't check watchpoint
848 registers, because at least on s390, we'd find
849 stopped-by-watchpoint as long as there's a watchpoint
850 set. */
851 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
852 }
853 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
854 {
855 /* This can indicate either a hardware breakpoint or
856 hardware watchpoint. Check debug registers. */
857 if (!check_stopped_by_watchpoint (lwp))
858 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
859 }
860 else if (siginfo.si_code == TRAP_TRACE)
861 {
862 /* We may have single stepped an instruction that
863 triggered a watchpoint. In that case, on some
864 architectures (such as x86), instead of TRAP_HWBKPT,
865 si_code indicates TRAP_TRACE, and we need to check
866 the debug registers separately. */
867 if (!check_stopped_by_watchpoint (lwp))
868 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
869 }
870 }
871 }
872 #else
873 /* We may have just stepped a breakpoint instruction. E.g., in
874 non-stop mode, GDB first tells the thread A to step a range, and
875 then the user inserts a breakpoint inside the range. In that
876 case we need to report the breakpoint PC. */
877 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
878 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
879 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
880
881 if (hardware_breakpoint_inserted_here (pc))
882 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
885 check_stopped_by_watchpoint (lwp);
886 #endif
887
888 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
889 {
890 if (debug_threads)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893
894 debug_printf ("CSBB: %s stopped by software breakpoint\n",
895 target_pid_to_str (ptid_of (thr)));
896 }
897
898 /* Back up the PC if necessary. */
899 if (pc != sw_breakpoint_pc)
900 {
901 struct regcache *regcache
902 = get_thread_regcache (current_thread, 1);
903 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
904 }
905
906 /* Update this so we record the correct stop PC below. */
907 pc = sw_breakpoint_pc;
908 }
909 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
910 {
911 if (debug_threads)
912 {
913 struct thread_info *thr = get_lwp_thread (lwp);
914
915 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
916 target_pid_to_str (ptid_of (thr)));
917 }
918 }
919 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
920 {
921 if (debug_threads)
922 {
923 struct thread_info *thr = get_lwp_thread (lwp);
924
925 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
926 target_pid_to_str (ptid_of (thr)));
927 }
928 }
929 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
930 {
931 if (debug_threads)
932 {
933 struct thread_info *thr = get_lwp_thread (lwp);
934
935 debug_printf ("CSBB: %s stopped by trace\n",
936 target_pid_to_str (ptid_of (thr)));
937 }
938 }
939
940 lwp->stop_pc = pc;
941 current_thread = saved_thread;
942 return 1;
943 }
944
945 static struct lwp_info *
946 add_lwp (ptid_t ptid)
947 {
948 struct lwp_info *lwp;
949
950 lwp = XCNEW (struct lwp_info);
951
952 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
953
954 lwp->thread = add_thread (ptid, lwp);
955
956 if (the_low_target.new_thread != NULL)
957 the_low_target.new_thread (lwp);
958
959 return lwp;
960 }
961
962 /* Callback to be used when calling fork_inferior, responsible for
963 actually initiating the tracing of the inferior. */
964
965 static void
966 linux_ptrace_fun ()
967 {
968 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
969 (PTRACE_TYPE_ARG4) 0) < 0)
970 trace_start_error_with_name ("ptrace");
971
972 if (setpgid (0, 0) < 0)
973 trace_start_error_with_name ("setpgid");
974
975 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
976 stdout to stderr so that inferior i/o doesn't corrupt the connection.
977 Also, redirect stdin to /dev/null. */
978 if (remote_connection_is_stdio ())
979 {
980 if (close (0) < 0)
981 trace_start_error_with_name ("close");
982 if (open ("/dev/null", O_RDONLY) < 0)
983 trace_start_error_with_name ("open");
984 if (dup2 (2, 1) < 0)
985 trace_start_error_with_name ("dup2");
986 if (write (2, "stdin/stdout redirected\n",
987 sizeof ("stdin/stdout redirected\n") - 1) < 0)
988 {
989 /* Errors ignored. */;
990 }
991 }
992 }
993
994 /* Start an inferior process and returns its pid.
995 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
996 are its arguments. */
997
998 static int
999 linux_create_inferior (const char *program,
1000 const std::vector<char *> &program_args)
1001 {
1002 client_state &cs = get_client_state ();
1003 struct lwp_info *new_lwp;
1004 int pid;
1005 ptid_t ptid;
1006
1007 {
1008 maybe_disable_address_space_randomization restore_personality
1009 (cs.disable_randomization);
1010 std::string str_program_args = stringify_argv (program_args);
1011
1012 pid = fork_inferior (program,
1013 str_program_args.c_str (),
1014 get_environ ()->envp (), linux_ptrace_fun,
1015 NULL, NULL, NULL, NULL);
1016 }
1017
1018 linux_add_process (pid, 0);
1019
1020 ptid = ptid_t (pid, pid, 0);
1021 new_lwp = add_lwp (ptid);
1022 new_lwp->must_set_ptrace_flags = 1;
1023
1024 post_fork_inferior (pid, program);
1025
1026 return pid;
1027 }
1028
1029 /* Implement the post_create_inferior target_ops method. */
1030
1031 static void
1032 linux_post_create_inferior (void)
1033 {
1034 struct lwp_info *lwp = get_thread_lwp (current_thread);
1035
1036 linux_arch_setup ();
1037
1038 if (lwp->must_set_ptrace_flags)
1039 {
1040 struct process_info *proc = current_process ();
1041 int options = linux_low_ptrace_options (proc->attached);
1042
1043 linux_enable_event_reporting (lwpid_of (current_thread), options);
1044 lwp->must_set_ptrace_flags = 0;
1045 }
1046 }
1047
1048 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1049 error. */
1050
1051 int
1052 linux_attach_lwp (ptid_t ptid)
1053 {
1054 struct lwp_info *new_lwp;
1055 int lwpid = ptid.lwp ();
1056
1057 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1058 != 0)
1059 return errno;
1060
1061 new_lwp = add_lwp (ptid);
1062
1063 /* We need to wait for SIGSTOP before being able to make the next
1064 ptrace call on this LWP. */
1065 new_lwp->must_set_ptrace_flags = 1;
1066
1067 if (linux_proc_pid_is_stopped (lwpid))
1068 {
1069 if (debug_threads)
1070 debug_printf ("Attached to a stopped process\n");
1071
1072 /* The process is definitely stopped. It is in a job control
1073 stop, unless the kernel predates the TASK_STOPPED /
1074 TASK_TRACED distinction, in which case it might be in a
1075 ptrace stop. Make sure it is in a ptrace stop; from there we
1076 can kill it, signal it, et cetera.
1077
1078 First make sure there is a pending SIGSTOP. Since we are
1079 already attached, the process can not transition from stopped
1080 to running without a PTRACE_CONT; so we know this signal will
1081 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1082 probably already in the queue (unless this kernel is old
1083 enough to use TASK_STOPPED for ptrace stops); but since
1084 SIGSTOP is not an RT signal, it can only be queued once. */
1085 kill_lwp (lwpid, SIGSTOP);
1086
1087 /* Finally, resume the stopped process. This will deliver the
1088 SIGSTOP (or a higher priority signal, just like normal
1089 PTRACE_ATTACH), which we'll catch later on. */
1090 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1091 }
1092
1093 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1094 brings it to a halt.
1095
1096 There are several cases to consider here:
1097
1098 1) gdbserver has already attached to the process and is being notified
1099 of a new thread that is being created.
1100 In this case we should ignore that SIGSTOP and resume the
1101 process. This is handled below by setting stop_expected = 1,
1102 and the fact that add_thread sets last_resume_kind ==
1103 resume_continue.
1104
1105 2) This is the first thread (the process thread), and we're attaching
1106 to it via attach_inferior.
1107 In this case we want the process thread to stop.
1108 This is handled by having linux_attach set last_resume_kind ==
1109 resume_stop after we return.
1110
1111 If the pid we are attaching to is also the tgid, we attach to and
1112 stop all the existing threads. Otherwise, we attach to pid and
1113 ignore any other threads in the same group as this pid.
1114
1115 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1116 existing threads.
1117 In this case we want the thread to stop.
1118 FIXME: This case is currently not properly handled.
1119 We should wait for the SIGSTOP but don't. Things work apparently
1120 because enough time passes between when we ptrace (ATTACH) and when
1121 gdb makes the next ptrace call on the thread.
1122
1123 On the other hand, if we are currently trying to stop all threads, we
1124 should treat the new thread as if we had sent it a SIGSTOP. This works
1125 because we are guaranteed that the add_lwp call above added us to the
1126 end of the list, and so the new thread has not yet reached
1127 wait_for_sigstop (but will). */
1128 new_lwp->stop_expected = 1;
1129
1130 return 0;
1131 }
1132
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137 static int
1138 attach_proc_task_lwp_callback (ptid_t ptid)
1139 {
1140 /* Is this a new thread? */
1141 if (find_thread_ptid (ptid) == NULL)
1142 {
1143 int lwpid = ptid.lwp ();
1144 int err;
1145
1146 if (debug_threads)
1147 debug_printf ("Found new lwp %d\n", lwpid);
1148
1149 err = linux_attach_lwp (ptid);
1150
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1155 if (err == ESRCH
1156 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1157 {
1158 if (debug_threads)
1159 {
1160 debug_printf ("Cannot attach to lwp %d: "
1161 "thread is gone (%d: %s)\n",
1162 lwpid, err, strerror (err));
1163 }
1164 }
1165 else if (err != 0)
1166 {
1167 std::string reason
1168 = linux_ptrace_attach_fail_reason_string (ptid, err);
1169
1170 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1171 }
1172
1173 return 1;
1174 }
1175 return 0;
1176 }
1177
1178 static void async_file_mark (void);
1179
1180 /* Attach to PID. If PID is the tgid, attach to it and all
1181 of its threads. */
1182
1183 static int
1184 linux_attach (unsigned long pid)
1185 {
1186 struct process_info *proc;
1187 struct thread_info *initial_thread;
1188 ptid_t ptid = ptid_t (pid, pid, 0);
1189 int err;
1190
1191 proc = linux_add_process (pid, 1);
1192
1193 /* Attach to PID. We will check for other threads
1194 soon. */
1195 err = linux_attach_lwp (ptid);
1196 if (err != 0)
1197 {
1198 remove_process (proc);
1199
1200 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1201 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1202 }
1203
1204 /* Don't ignore the initial SIGSTOP if we just attached to this
1205 process. It will be collected by wait shortly. */
1206 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1207 initial_thread->last_resume_kind = resume_stop;
1208
1209 /* We must attach to every LWP. If /proc is mounted, use that to
1210 find them now. On the one hand, the inferior may be using raw
1211 clone instead of using pthreads. On the other hand, even if it
1212 is using pthreads, GDB may not be connected yet (thread_db needs
1213 to do symbol lookups, through qSymbol). Also, thread_db walks
1214 structures in the inferior's address space to find the list of
1215 threads/LWPs, and those structures may well be corrupted. Note
1216 that once thread_db is loaded, we'll still use it to list threads
1217 and associate pthread info with each LWP. */
1218 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1219
1220 /* GDB will shortly read the xml target description for this
1221 process, to figure out the process' architecture. But the target
1222 description is only filled in when the first process/thread in
1223 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1224 that now, otherwise, if GDB is fast enough, it could read the
1225 target description _before_ that initial stop. */
1226 if (non_stop)
1227 {
1228 struct lwp_info *lwp;
1229 int wstat, lwpid;
1230 ptid_t pid_ptid = ptid_t (pid);
1231
1232 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1233 &wstat, __WALL);
1234 gdb_assert (lwpid > 0);
1235
1236 lwp = find_lwp_pid (ptid_t (lwpid));
1237
1238 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1239 {
1240 lwp->status_pending_p = 1;
1241 lwp->status_pending = wstat;
1242 }
1243
1244 initial_thread->last_resume_kind = resume_continue;
1245
1246 async_file_mark ();
1247
1248 gdb_assert (proc->tdesc != NULL);
1249 }
1250
1251 return 0;
1252 }
1253
1254 static int
1255 last_thread_of_process_p (int pid)
1256 {
1257 bool seen_one = false;
1258
1259 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1260 {
1261 if (!seen_one)
1262 {
1263 /* This is the first thread of this process we see. */
1264 seen_one = true;
1265 return false;
1266 }
1267 else
1268 {
1269 /* This is the second thread of this process we see. */
1270 return true;
1271 }
1272 });
1273
1274 return thread == NULL;
1275 }
1276
1277 /* Kill LWP. */
1278
1279 static void
1280 linux_kill_one_lwp (struct lwp_info *lwp)
1281 {
1282 struct thread_info *thr = get_lwp_thread (lwp);
1283 int pid = lwpid_of (thr);
1284
1285 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1286 there is no signal context, and ptrace(PTRACE_KILL) (or
1287 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1288 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1289 alternative is to kill with SIGKILL. We only need one SIGKILL
1290 per process, not one for each thread. But since we still support
1291 support debugging programs using raw clone without CLONE_THREAD,
1292 we send one for each thread. For years, we used PTRACE_KILL
1293 only, so we're being a bit paranoid about some old kernels where
1294 PTRACE_KILL might work better (dubious if there are any such, but
1295 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1296 second, and so we're fine everywhere. */
1297
1298 errno = 0;
1299 kill_lwp (pid, SIGKILL);
1300 if (debug_threads)
1301 {
1302 int save_errno = errno;
1303
1304 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1305 target_pid_to_str (ptid_of (thr)),
1306 save_errno ? strerror (save_errno) : "OK");
1307 }
1308
1309 errno = 0;
1310 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1311 if (debug_threads)
1312 {
1313 int save_errno = errno;
1314
1315 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1316 target_pid_to_str (ptid_of (thr)),
1317 save_errno ? strerror (save_errno) : "OK");
1318 }
1319 }
1320
1321 /* Kill LWP and wait for it to die. */
1322
1323 static void
1324 kill_wait_lwp (struct lwp_info *lwp)
1325 {
1326 struct thread_info *thr = get_lwp_thread (lwp);
1327 int pid = ptid_of (thr).pid ();
1328 int lwpid = ptid_of (thr).lwp ();
1329 int wstat;
1330 int res;
1331
1332 if (debug_threads)
1333 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1334
1335 do
1336 {
1337 linux_kill_one_lwp (lwp);
1338
1339 /* Make sure it died. Notes:
1340
1341 - The loop is most likely unnecessary.
1342
1343 - We don't use linux_wait_for_event as that could delete lwps
1344 while we're iterating over them. We're not interested in
1345 any pending status at this point, only in making sure all
1346 wait status on the kernel side are collected until the
1347 process is reaped.
1348
1349 - We don't use __WALL here as the __WALL emulation relies on
1350 SIGCHLD, and killing a stopped process doesn't generate
1351 one, nor an exit status.
1352 */
1353 res = my_waitpid (lwpid, &wstat, 0);
1354 if (res == -1 && errno == ECHILD)
1355 res = my_waitpid (lwpid, &wstat, __WCLONE);
1356 } while (res > 0 && WIFSTOPPED (wstat));
1357
1358 /* Even if it was stopped, the child may have already disappeared.
1359 E.g., if it was killed by SIGKILL. */
1360 if (res < 0 && errno != ECHILD)
1361 perror_with_name ("kill_wait_lwp");
1362 }
1363
1364 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1365 except the leader. */
1366
1367 static void
1368 kill_one_lwp_callback (thread_info *thread, int pid)
1369 {
1370 struct lwp_info *lwp = get_thread_lwp (thread);
1371
1372 /* We avoid killing the first thread here, because of a Linux kernel (at
1373 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1374 the children get a chance to be reaped, it will remain a zombie
1375 forever. */
1376
1377 if (lwpid_of (thread) == pid)
1378 {
1379 if (debug_threads)
1380 debug_printf ("lkop: is last of process %s\n",
1381 target_pid_to_str (thread->id));
1382 return;
1383 }
1384
1385 kill_wait_lwp (lwp);
1386 }
1387
1388 static int
1389 linux_kill (process_info *process)
1390 {
1391 int pid = process->pid;
1392
1393 /* If we're killing a running inferior, make sure it is stopped
1394 first, as PTRACE_KILL will not work otherwise. */
1395 stop_all_lwps (0, NULL);
1396
1397 for_each_thread (pid, [&] (thread_info *thread)
1398 {
1399 kill_one_lwp_callback (thread, pid);
1400 });
1401
1402 /* See the comment in linux_kill_one_lwp. We did not kill the first
1403 thread in the list, so do so now. */
1404 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1405
1406 if (lwp == NULL)
1407 {
1408 if (debug_threads)
1409 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1410 pid);
1411 }
1412 else
1413 kill_wait_lwp (lwp);
1414
1415 the_target->mourn (process);
1416
1417 /* Since we presently can only stop all lwps of all processes, we
1418 need to unstop lwps of other processes. */
1419 unstop_all_lwps (0, NULL);
1420 return 0;
1421 }
1422
1423 /* Get pending signal of THREAD, for detaching purposes. This is the
1424 signal the thread last stopped for, which we need to deliver to the
1425 thread when detaching, otherwise, it'd be suppressed/lost. */
1426
1427 static int
1428 get_detach_signal (struct thread_info *thread)
1429 {
1430 client_state &cs = get_client_state ();
1431 enum gdb_signal signo = GDB_SIGNAL_0;
1432 int status;
1433 struct lwp_info *lp = get_thread_lwp (thread);
1434
1435 if (lp->status_pending_p)
1436 status = lp->status_pending;
1437 else
1438 {
1439 /* If the thread had been suspended by gdbserver, and it stopped
1440 cleanly, then it'll have stopped with SIGSTOP. But we don't
1441 want to deliver that SIGSTOP. */
1442 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1443 || thread->last_status.value.sig == GDB_SIGNAL_0)
1444 return 0;
1445
1446 /* Otherwise, we may need to deliver the signal we
1447 intercepted. */
1448 status = lp->last_status;
1449 }
1450
1451 if (!WIFSTOPPED (status))
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1455 target_pid_to_str (ptid_of (thread)));
1456 return 0;
1457 }
1458
1459 /* Extended wait statuses aren't real SIGTRAPs. */
1460 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1461 {
1462 if (debug_threads)
1463 debug_printf ("GPS: lwp %s had stopped with extended "
1464 "status: no pending signal\n",
1465 target_pid_to_str (ptid_of (thread)));
1466 return 0;
1467 }
1468
1469 signo = gdb_signal_from_host (WSTOPSIG (status));
1470
1471 if (cs.program_signals_p && !cs.program_signals[signo])
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1475 target_pid_to_str (ptid_of (thread)),
1476 gdb_signal_to_string (signo));
1477 return 0;
1478 }
1479 else if (!cs.program_signals_p
1480 /* If we have no way to know which signals GDB does not
1481 want to have passed to the program, assume
1482 SIGTRAP/SIGINT, which is GDB's default. */
1483 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1484 {
1485 if (debug_threads)
1486 debug_printf ("GPS: lwp %s had signal %s, "
1487 "but we don't know if we should pass it. "
1488 "Default to not.\n",
1489 target_pid_to_str (ptid_of (thread)),
1490 gdb_signal_to_string (signo));
1491 return 0;
1492 }
1493 else
1494 {
1495 if (debug_threads)
1496 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1497 target_pid_to_str (ptid_of (thread)),
1498 gdb_signal_to_string (signo));
1499
1500 return WSTOPSIG (status);
1501 }
1502 }
1503
1504 /* Detach from LWP. */
1505
1506 static void
1507 linux_detach_one_lwp (struct lwp_info *lwp)
1508 {
1509 struct thread_info *thread = get_lwp_thread (lwp);
1510 int sig;
1511 int lwpid;
1512
1513 /* If there is a pending SIGSTOP, get rid of it. */
1514 if (lwp->stop_expected)
1515 {
1516 if (debug_threads)
1517 debug_printf ("Sending SIGCONT to %s\n",
1518 target_pid_to_str (ptid_of (thread)));
1519
1520 kill_lwp (lwpid_of (thread), SIGCONT);
1521 lwp->stop_expected = 0;
1522 }
1523
1524 /* Pass on any pending signal for this thread. */
1525 sig = get_detach_signal (thread);
1526
1527 /* Preparing to resume may try to write registers, and fail if the
1528 lwp is zombie. If that happens, ignore the error. We'll handle
1529 it below, when detach fails with ESRCH. */
1530 TRY
1531 {
1532 /* Flush any pending changes to the process's registers. */
1533 regcache_invalidate_thread (thread);
1534
1535 /* Finally, let it resume. */
1536 if (the_low_target.prepare_to_resume != NULL)
1537 the_low_target.prepare_to_resume (lwp);
1538 }
1539 CATCH (ex, RETURN_MASK_ERROR)
1540 {
1541 if (!check_ptrace_stopped_lwp_gone (lwp))
1542 throw_exception (ex);
1543 }
1544 END_CATCH
1545
1546 lwpid = lwpid_of (thread);
1547 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1548 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1549 {
1550 int save_errno = errno;
1551
1552 /* We know the thread exists, so ESRCH must mean the lwp is
1553 zombie. This can happen if one of the already-detached
1554 threads exits the whole thread group. In that case we're
1555 still attached, and must reap the lwp. */
1556 if (save_errno == ESRCH)
1557 {
1558 int ret, status;
1559
1560 ret = my_waitpid (lwpid, &status, __WALL);
1561 if (ret == -1)
1562 {
1563 warning (_("Couldn't reap LWP %d while detaching: %s"),
1564 lwpid, strerror (errno));
1565 }
1566 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1567 {
1568 warning (_("Reaping LWP %d while detaching "
1569 "returned unexpected status 0x%x"),
1570 lwpid, status);
1571 }
1572 }
1573 else
1574 {
1575 error (_("Can't detach %s: %s"),
1576 target_pid_to_str (ptid_of (thread)),
1577 strerror (save_errno));
1578 }
1579 }
1580 else if (debug_threads)
1581 {
1582 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1583 target_pid_to_str (ptid_of (thread)),
1584 strsignal (sig));
1585 }
1586
1587 delete_lwp (lwp);
1588 }
1589
1590 /* Callback for for_each_thread. Detaches from non-leader threads of a
1591 given process. */
1592
1593 static void
1594 linux_detach_lwp_callback (thread_info *thread)
1595 {
1596 /* We don't actually detach from the thread group leader just yet.
1597 If the thread group exits, we must reap the zombie clone lwps
1598 before we're able to reap the leader. */
1599 if (thread->id.pid () == thread->id.lwp ())
1600 return;
1601
1602 lwp_info *lwp = get_thread_lwp (thread);
1603 linux_detach_one_lwp (lwp);
1604 }
1605
1606 static int
1607 linux_detach (process_info *process)
1608 {
1609 struct lwp_info *main_lwp;
1610
1611 /* As there's a step over already in progress, let it finish first,
1612 otherwise nesting a stabilize_threads operation on top gets real
1613 messy. */
1614 complete_ongoing_step_over ();
1615
1616 /* Stop all threads before detaching. First, ptrace requires that
1617 the thread is stopped to sucessfully detach. Second, thread_db
1618 may need to uninstall thread event breakpoints from memory, which
1619 only works with a stopped process anyway. */
1620 stop_all_lwps (0, NULL);
1621
1622 #ifdef USE_THREAD_DB
1623 thread_db_detach (process);
1624 #endif
1625
1626 /* Stabilize threads (move out of jump pads). */
1627 stabilize_threads ();
1628
1629 /* Detach from the clone lwps first. If the thread group exits just
1630 while we're detaching, we must reap the clone lwps before we're
1631 able to reap the leader. */
1632 for_each_thread (process->pid, linux_detach_lwp_callback);
1633
1634 main_lwp = find_lwp_pid (ptid_t (process->pid));
1635 linux_detach_one_lwp (main_lwp);
1636
1637 the_target->mourn (process);
1638
1639 /* Since we presently can only stop all lwps of all processes, we
1640 need to unstop lwps of other processes. */
1641 unstop_all_lwps (0, NULL);
1642 return 0;
1643 }
1644
1645 /* Remove all LWPs that belong to process PROC from the lwp list. */
1646
1647 static void
1648 linux_mourn (struct process_info *process)
1649 {
1650 struct process_info_private *priv;
1651
1652 #ifdef USE_THREAD_DB
1653 thread_db_mourn (process);
1654 #endif
1655
1656 for_each_thread (process->pid, [] (thread_info *thread)
1657 {
1658 delete_lwp (get_thread_lwp (thread));
1659 });
1660
1661 /* Freeing all private data. */
1662 priv = process->priv;
1663 if (the_low_target.delete_process != NULL)
1664 the_low_target.delete_process (priv->arch_private);
1665 else
1666 gdb_assert (priv->arch_private == NULL);
1667 free (priv);
1668 process->priv = NULL;
1669
1670 remove_process (process);
1671 }
1672
1673 static void
1674 linux_join (int pid)
1675 {
1676 int status, ret;
1677
1678 do {
1679 ret = my_waitpid (pid, &status, 0);
1680 if (WIFEXITED (status) || WIFSIGNALED (status))
1681 break;
1682 } while (ret != -1 || errno != ECHILD);
1683 }
1684
1685 /* Return nonzero if the given thread is still alive. */
1686 static int
1687 linux_thread_alive (ptid_t ptid)
1688 {
1689 struct lwp_info *lwp = find_lwp_pid (ptid);
1690
1691 /* We assume we always know if a thread exits. If a whole process
1692 exited but we still haven't been able to report it to GDB, we'll
1693 hold on to the last lwp of the dead process. */
1694 if (lwp != NULL)
1695 return !lwp_is_marked_dead (lwp);
1696 else
1697 return 0;
1698 }
1699
1700 /* Return 1 if this lwp still has an interesting status pending. If
1701 not (e.g., it had stopped for a breakpoint that is gone), return
1702 false. */
1703
1704 static int
1705 thread_still_has_status_pending_p (struct thread_info *thread)
1706 {
1707 struct lwp_info *lp = get_thread_lwp (thread);
1708
1709 if (!lp->status_pending_p)
1710 return 0;
1711
1712 if (thread->last_resume_kind != resume_stop
1713 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1714 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1715 {
1716 struct thread_info *saved_thread;
1717 CORE_ADDR pc;
1718 int discard = 0;
1719
1720 gdb_assert (lp->last_status != 0);
1721
1722 pc = get_pc (lp);
1723
1724 saved_thread = current_thread;
1725 current_thread = thread;
1726
1727 if (pc != lp->stop_pc)
1728 {
1729 if (debug_threads)
1730 debug_printf ("PC of %ld changed\n",
1731 lwpid_of (thread));
1732 discard = 1;
1733 }
1734
1735 #if !USE_SIGTRAP_SIGINFO
1736 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1737 && !(*the_low_target.breakpoint_at) (pc))
1738 {
1739 if (debug_threads)
1740 debug_printf ("previous SW breakpoint of %ld gone\n",
1741 lwpid_of (thread));
1742 discard = 1;
1743 }
1744 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1745 && !hardware_breakpoint_inserted_here (pc))
1746 {
1747 if (debug_threads)
1748 debug_printf ("previous HW breakpoint of %ld gone\n",
1749 lwpid_of (thread));
1750 discard = 1;
1751 }
1752 #endif
1753
1754 current_thread = saved_thread;
1755
1756 if (discard)
1757 {
1758 if (debug_threads)
1759 debug_printf ("discarding pending breakpoint status\n");
1760 lp->status_pending_p = 0;
1761 return 0;
1762 }
1763 }
1764
1765 return 1;
1766 }
1767
1768 /* Returns true if LWP is resumed from the client's perspective. */
1769
1770 static int
1771 lwp_resumed (struct lwp_info *lwp)
1772 {
1773 struct thread_info *thread = get_lwp_thread (lwp);
1774
1775 if (thread->last_resume_kind != resume_stop)
1776 return 1;
1777
1778 /* Did gdb send us a `vCont;t', but we haven't reported the
1779 corresponding stop to gdb yet? If so, the thread is still
1780 resumed/running from gdb's perspective. */
1781 if (thread->last_resume_kind == resume_stop
1782 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1783 return 1;
1784
1785 return 0;
1786 }
1787
1788 /* Return true if this lwp has an interesting status pending. */
1789 static bool
1790 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1791 {
1792 struct lwp_info *lp = get_thread_lwp (thread);
1793
1794 /* Check if we're only interested in events from a specific process
1795 or a specific LWP. */
1796 if (!thread->id.matches (ptid))
1797 return 0;
1798
1799 if (!lwp_resumed (lp))
1800 return 0;
1801
1802 if (lp->status_pending_p
1803 && !thread_still_has_status_pending_p (thread))
1804 {
1805 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1806 return 0;
1807 }
1808
1809 return lp->status_pending_p;
1810 }
1811
1812 struct lwp_info *
1813 find_lwp_pid (ptid_t ptid)
1814 {
1815 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1816 {
1817 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1818 return thr_arg->id.lwp () == lwp;
1819 });
1820
1821 if (thread == NULL)
1822 return NULL;
1823
1824 return get_thread_lwp (thread);
1825 }
1826
1827 /* Return the number of known LWPs in the tgid given by PID. */
1828
1829 static int
1830 num_lwps (int pid)
1831 {
1832 int count = 0;
1833
1834 for_each_thread (pid, [&] (thread_info *thread)
1835 {
1836 count++;
1837 });
1838
1839 return count;
1840 }
1841
1842 /* See nat/linux-nat.h. */
1843
1844 struct lwp_info *
1845 iterate_over_lwps (ptid_t filter,
1846 gdb::function_view<iterate_over_lwps_ftype> callback)
1847 {
1848 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1849 {
1850 lwp_info *lwp = get_thread_lwp (thr_arg);
1851
1852 return callback (lwp);
1853 });
1854
1855 if (thread == NULL)
1856 return NULL;
1857
1858 return get_thread_lwp (thread);
1859 }
1860
1861 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1862 their exits until all other threads in the group have exited. */
1863
1864 static void
1865 check_zombie_leaders (void)
1866 {
1867 for_each_process ([] (process_info *proc) {
1868 pid_t leader_pid = pid_of (proc);
1869 struct lwp_info *leader_lp;
1870
1871 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1872
1873 if (debug_threads)
1874 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1875 "num_lwps=%d, zombie=%d\n",
1876 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1877 linux_proc_pid_is_zombie (leader_pid));
1878
1879 if (leader_lp != NULL && !leader_lp->stopped
1880 /* Check if there are other threads in the group, as we may
1881 have raced with the inferior simply exiting. */
1882 && !last_thread_of_process_p (leader_pid)
1883 && linux_proc_pid_is_zombie (leader_pid))
1884 {
1885 /* A leader zombie can mean one of two things:
1886
1887 - It exited, and there's an exit status pending
1888 available, or only the leader exited (not the whole
1889 program). In the latter case, we can't waitpid the
1890 leader's exit status until all other threads are gone.
1891
1892 - There are 3 or more threads in the group, and a thread
1893 other than the leader exec'd. On an exec, the Linux
1894 kernel destroys all other threads (except the execing
1895 one) in the thread group, and resets the execing thread's
1896 tid to the tgid. No exit notification is sent for the
1897 execing thread -- from the ptracer's perspective, it
1898 appears as though the execing thread just vanishes.
1899 Until we reap all other threads except the leader and the
1900 execing thread, the leader will be zombie, and the
1901 execing thread will be in `D (disc sleep)'. As soon as
1902 all other threads are reaped, the execing thread changes
1903 it's tid to the tgid, and the previous (zombie) leader
1904 vanishes, giving place to the "new" leader. We could try
1905 distinguishing the exit and exec cases, by waiting once
1906 more, and seeing if something comes out, but it doesn't
1907 sound useful. The previous leader _does_ go away, and
1908 we'll re-add the new one once we see the exec event
1909 (which is just the same as what would happen if the
1910 previous leader did exit voluntarily before some other
1911 thread execs). */
1912
1913 if (debug_threads)
1914 debug_printf ("CZL: Thread group leader %d zombie "
1915 "(it exited, or another thread execd).\n",
1916 leader_pid);
1917
1918 delete_lwp (leader_lp);
1919 }
1920 });
1921 }
1922
1923 /* Callback for `find_thread'. Returns the first LWP that is not
1924 stopped. */
1925
1926 static bool
1927 not_stopped_callback (thread_info *thread, ptid_t filter)
1928 {
1929 if (!thread->id.matches (filter))
1930 return false;
1931
1932 lwp_info *lwp = get_thread_lwp (thread);
1933
1934 return !lwp->stopped;
1935 }
1936
1937 /* Increment LWP's suspend count. */
1938
1939 static void
1940 lwp_suspended_inc (struct lwp_info *lwp)
1941 {
1942 lwp->suspended++;
1943
1944 if (debug_threads && lwp->suspended > 4)
1945 {
1946 struct thread_info *thread = get_lwp_thread (lwp);
1947
1948 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1949 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1950 }
1951 }
1952
1953 /* Decrement LWP's suspend count. */
1954
1955 static void
1956 lwp_suspended_decr (struct lwp_info *lwp)
1957 {
1958 lwp->suspended--;
1959
1960 if (lwp->suspended < 0)
1961 {
1962 struct thread_info *thread = get_lwp_thread (lwp);
1963
1964 internal_error (__FILE__, __LINE__,
1965 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1966 lwp->suspended);
1967 }
1968 }
1969
1970 /* This function should only be called if the LWP got a SIGTRAP.
1971
1972 Handle any tracepoint steps or hits. Return true if a tracepoint
1973 event was handled, 0 otherwise. */
1974
1975 static int
1976 handle_tracepoints (struct lwp_info *lwp)
1977 {
1978 struct thread_info *tinfo = get_lwp_thread (lwp);
1979 int tpoint_related_event = 0;
1980
1981 gdb_assert (lwp->suspended == 0);
1982
1983 /* If this tracepoint hit causes a tracing stop, we'll immediately
1984 uninsert tracepoints. To do this, we temporarily pause all
1985 threads, unpatch away, and then unpause threads. We need to make
1986 sure the unpausing doesn't resume LWP too. */
1987 lwp_suspended_inc (lwp);
1988
1989 /* And we need to be sure that any all-threads-stopping doesn't try
1990 to move threads out of the jump pads, as it could deadlock the
1991 inferior (LWP could be in the jump pad, maybe even holding the
1992 lock.) */
1993
1994 /* Do any necessary step collect actions. */
1995 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1996
1997 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1998
1999 /* See if we just hit a tracepoint and do its main collect
2000 actions. */
2001 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2002
2003 lwp_suspended_decr (lwp);
2004
2005 gdb_assert (lwp->suspended == 0);
2006 gdb_assert (!stabilizing_threads
2007 || (lwp->collecting_fast_tracepoint
2008 != fast_tpoint_collect_result::not_collecting));
2009
2010 if (tpoint_related_event)
2011 {
2012 if (debug_threads)
2013 debug_printf ("got a tracepoint event\n");
2014 return 1;
2015 }
2016
2017 return 0;
2018 }
2019
2020 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2021 collection status. */
2022
2023 static fast_tpoint_collect_result
2024 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2025 struct fast_tpoint_collect_status *status)
2026 {
2027 CORE_ADDR thread_area;
2028 struct thread_info *thread = get_lwp_thread (lwp);
2029
2030 if (the_low_target.get_thread_area == NULL)
2031 return fast_tpoint_collect_result::not_collecting;
2032
2033 /* Get the thread area address. This is used to recognize which
2034 thread is which when tracing with the in-process agent library.
2035 We don't read anything from the address, and treat it as opaque;
2036 it's the address itself that we assume is unique per-thread. */
2037 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2038 return fast_tpoint_collect_result::not_collecting;
2039
2040 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2041 }
2042
2043 /* The reason we resume in the caller, is because we want to be able
2044 to pass lwp->status_pending as WSTAT, and we need to clear
2045 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2046 refuses to resume. */
2047
2048 static int
2049 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2050 {
2051 struct thread_info *saved_thread;
2052
2053 saved_thread = current_thread;
2054 current_thread = get_lwp_thread (lwp);
2055
2056 if ((wstat == NULL
2057 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2058 && supports_fast_tracepoints ()
2059 && agent_loaded_p ())
2060 {
2061 struct fast_tpoint_collect_status status;
2062
2063 if (debug_threads)
2064 debug_printf ("Checking whether LWP %ld needs to move out of the "
2065 "jump pad.\n",
2066 lwpid_of (current_thread));
2067
2068 fast_tpoint_collect_result r
2069 = linux_fast_tracepoint_collecting (lwp, &status);
2070
2071 if (wstat == NULL
2072 || (WSTOPSIG (*wstat) != SIGILL
2073 && WSTOPSIG (*wstat) != SIGFPE
2074 && WSTOPSIG (*wstat) != SIGSEGV
2075 && WSTOPSIG (*wstat) != SIGBUS))
2076 {
2077 lwp->collecting_fast_tracepoint = r;
2078
2079 if (r != fast_tpoint_collect_result::not_collecting)
2080 {
2081 if (r == fast_tpoint_collect_result::before_insn
2082 && lwp->exit_jump_pad_bkpt == NULL)
2083 {
2084 /* Haven't executed the original instruction yet.
2085 Set breakpoint there, and wait till it's hit,
2086 then single-step until exiting the jump pad. */
2087 lwp->exit_jump_pad_bkpt
2088 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2089 }
2090
2091 if (debug_threads)
2092 debug_printf ("Checking whether LWP %ld needs to move out of "
2093 "the jump pad...it does\n",
2094 lwpid_of (current_thread));
2095 current_thread = saved_thread;
2096
2097 return 1;
2098 }
2099 }
2100 else
2101 {
2102 /* If we get a synchronous signal while collecting, *and*
2103 while executing the (relocated) original instruction,
2104 reset the PC to point at the tpoint address, before
2105 reporting to GDB. Otherwise, it's an IPA lib bug: just
2106 report the signal to GDB, and pray for the best. */
2107
2108 lwp->collecting_fast_tracepoint
2109 = fast_tpoint_collect_result::not_collecting;
2110
2111 if (r != fast_tpoint_collect_result::not_collecting
2112 && (status.adjusted_insn_addr <= lwp->stop_pc
2113 && lwp->stop_pc < status.adjusted_insn_addr_end))
2114 {
2115 siginfo_t info;
2116 struct regcache *regcache;
2117
2118 /* The si_addr on a few signals references the address
2119 of the faulting instruction. Adjust that as
2120 well. */
2121 if ((WSTOPSIG (*wstat) == SIGILL
2122 || WSTOPSIG (*wstat) == SIGFPE
2123 || WSTOPSIG (*wstat) == SIGBUS
2124 || WSTOPSIG (*wstat) == SIGSEGV)
2125 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2126 (PTRACE_TYPE_ARG3) 0, &info) == 0
2127 /* Final check just to make sure we don't clobber
2128 the siginfo of non-kernel-sent signals. */
2129 && (uintptr_t) info.si_addr == lwp->stop_pc)
2130 {
2131 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2132 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2133 (PTRACE_TYPE_ARG3) 0, &info);
2134 }
2135
2136 regcache = get_thread_regcache (current_thread, 1);
2137 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2138 lwp->stop_pc = status.tpoint_addr;
2139
2140 /* Cancel any fast tracepoint lock this thread was
2141 holding. */
2142 force_unlock_trace_buffer ();
2143 }
2144
2145 if (lwp->exit_jump_pad_bkpt != NULL)
2146 {
2147 if (debug_threads)
2148 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2149 "stopping all threads momentarily.\n");
2150
2151 stop_all_lwps (1, lwp);
2152
2153 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2154 lwp->exit_jump_pad_bkpt = NULL;
2155
2156 unstop_all_lwps (1, lwp);
2157
2158 gdb_assert (lwp->suspended >= 0);
2159 }
2160 }
2161 }
2162
2163 if (debug_threads)
2164 debug_printf ("Checking whether LWP %ld needs to move out of the "
2165 "jump pad...no\n",
2166 lwpid_of (current_thread));
2167
2168 current_thread = saved_thread;
2169 return 0;
2170 }
2171
2172 /* Enqueue one signal in the "signals to report later when out of the
2173 jump pad" list. */
2174
2175 static void
2176 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2177 {
2178 struct pending_signals *p_sig;
2179 struct thread_info *thread = get_lwp_thread (lwp);
2180
2181 if (debug_threads)
2182 debug_printf ("Deferring signal %d for LWP %ld.\n",
2183 WSTOPSIG (*wstat), lwpid_of (thread));
2184
2185 if (debug_threads)
2186 {
2187 struct pending_signals *sig;
2188
2189 for (sig = lwp->pending_signals_to_report;
2190 sig != NULL;
2191 sig = sig->prev)
2192 debug_printf (" Already queued %d\n",
2193 sig->signal);
2194
2195 debug_printf (" (no more currently queued signals)\n");
2196 }
2197
2198 /* Don't enqueue non-RT signals if they are already in the deferred
2199 queue. (SIGSTOP being the easiest signal to see ending up here
2200 twice) */
2201 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2202 {
2203 struct pending_signals *sig;
2204
2205 for (sig = lwp->pending_signals_to_report;
2206 sig != NULL;
2207 sig = sig->prev)
2208 {
2209 if (sig->signal == WSTOPSIG (*wstat))
2210 {
2211 if (debug_threads)
2212 debug_printf ("Not requeuing already queued non-RT signal %d"
2213 " for LWP %ld\n",
2214 sig->signal,
2215 lwpid_of (thread));
2216 return;
2217 }
2218 }
2219 }
2220
2221 p_sig = XCNEW (struct pending_signals);
2222 p_sig->prev = lwp->pending_signals_to_report;
2223 p_sig->signal = WSTOPSIG (*wstat);
2224
2225 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2226 &p_sig->info);
2227
2228 lwp->pending_signals_to_report = p_sig;
2229 }
2230
2231 /* Dequeue one signal from the "signals to report later when out of
2232 the jump pad" list. */
2233
2234 static int
2235 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2236 {
2237 struct thread_info *thread = get_lwp_thread (lwp);
2238
2239 if (lwp->pending_signals_to_report != NULL)
2240 {
2241 struct pending_signals **p_sig;
2242
2243 p_sig = &lwp->pending_signals_to_report;
2244 while ((*p_sig)->prev != NULL)
2245 p_sig = &(*p_sig)->prev;
2246
2247 *wstat = W_STOPCODE ((*p_sig)->signal);
2248 if ((*p_sig)->info.si_signo != 0)
2249 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2250 &(*p_sig)->info);
2251 free (*p_sig);
2252 *p_sig = NULL;
2253
2254 if (debug_threads)
2255 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2256 WSTOPSIG (*wstat), lwpid_of (thread));
2257
2258 if (debug_threads)
2259 {
2260 struct pending_signals *sig;
2261
2262 for (sig = lwp->pending_signals_to_report;
2263 sig != NULL;
2264 sig = sig->prev)
2265 debug_printf (" Still queued %d\n",
2266 sig->signal);
2267
2268 debug_printf (" (no more queued signals)\n");
2269 }
2270
2271 return 1;
2272 }
2273
2274 return 0;
2275 }
2276
2277 /* Fetch the possibly triggered data watchpoint info and store it in
2278 CHILD.
2279
2280 On some archs, like x86, that use debug registers to set
2281 watchpoints, it's possible that the way to know which watched
2282 address trapped, is to check the register that is used to select
2283 which address to watch. Problem is, between setting the watchpoint
2284 and reading back which data address trapped, the user may change
2285 the set of watchpoints, and, as a consequence, GDB changes the
2286 debug registers in the inferior. To avoid reading back a stale
2287 stopped-data-address when that happens, we cache in LP the fact
2288 that a watchpoint trapped, and the corresponding data address, as
2289 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2290 registers meanwhile, we have the cached data we can rely on. */
2291
2292 static int
2293 check_stopped_by_watchpoint (struct lwp_info *child)
2294 {
2295 if (the_low_target.stopped_by_watchpoint != NULL)
2296 {
2297 struct thread_info *saved_thread;
2298
2299 saved_thread = current_thread;
2300 current_thread = get_lwp_thread (child);
2301
2302 if (the_low_target.stopped_by_watchpoint ())
2303 {
2304 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2305
2306 if (the_low_target.stopped_data_address != NULL)
2307 child->stopped_data_address
2308 = the_low_target.stopped_data_address ();
2309 else
2310 child->stopped_data_address = 0;
2311 }
2312
2313 current_thread = saved_thread;
2314 }
2315
2316 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2317 }
2318
2319 /* Return the ptrace options that we want to try to enable. */
2320
2321 static int
2322 linux_low_ptrace_options (int attached)
2323 {
2324 client_state &cs = get_client_state ();
2325 int options = 0;
2326
2327 if (!attached)
2328 options |= PTRACE_O_EXITKILL;
2329
2330 if (cs.report_fork_events)
2331 options |= PTRACE_O_TRACEFORK;
2332
2333 if (cs.report_vfork_events)
2334 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2335
2336 if (cs.report_exec_events)
2337 options |= PTRACE_O_TRACEEXEC;
2338
2339 options |= PTRACE_O_TRACESYSGOOD;
2340
2341 return options;
2342 }
2343
2344 /* Do low-level handling of the event, and check if we should go on
2345 and pass it to caller code. Return the affected lwp if we are, or
2346 NULL otherwise. */
2347
2348 static struct lwp_info *
2349 linux_low_filter_event (int lwpid, int wstat)
2350 {
2351 client_state &cs = get_client_state ();
2352 struct lwp_info *child;
2353 struct thread_info *thread;
2354 int have_stop_pc = 0;
2355
2356 child = find_lwp_pid (ptid_t (lwpid));
2357
2358 /* Check for stop events reported by a process we didn't already
2359 know about - anything not already in our LWP list.
2360
2361 If we're expecting to receive stopped processes after
2362 fork, vfork, and clone events, then we'll just add the
2363 new one to our list and go back to waiting for the event
2364 to be reported - the stopped process might be returned
2365 from waitpid before or after the event is.
2366
2367 But note the case of a non-leader thread exec'ing after the
2368 leader having exited, and gone from our lists (because
2369 check_zombie_leaders deleted it). The non-leader thread
2370 changes its tid to the tgid. */
2371
2372 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2373 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2374 {
2375 ptid_t child_ptid;
2376
2377 /* A multi-thread exec after we had seen the leader exiting. */
2378 if (debug_threads)
2379 {
2380 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2381 "after exec.\n", lwpid);
2382 }
2383
2384 child_ptid = ptid_t (lwpid, lwpid, 0);
2385 child = add_lwp (child_ptid);
2386 child->stopped = 1;
2387 current_thread = child->thread;
2388 }
2389
2390 /* If we didn't find a process, one of two things presumably happened:
2391 - A process we started and then detached from has exited. Ignore it.
2392 - A process we are controlling has forked and the new child's stop
2393 was reported to us by the kernel. Save its PID. */
2394 if (child == NULL && WIFSTOPPED (wstat))
2395 {
2396 add_to_pid_list (&stopped_pids, lwpid, wstat);
2397 return NULL;
2398 }
2399 else if (child == NULL)
2400 return NULL;
2401
2402 thread = get_lwp_thread (child);
2403
2404 child->stopped = 1;
2405
2406 child->last_status = wstat;
2407
2408 /* Check if the thread has exited. */
2409 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2410 {
2411 if (debug_threads)
2412 debug_printf ("LLFE: %d exited.\n", lwpid);
2413
2414 if (finish_step_over (child))
2415 {
2416 /* Unsuspend all other LWPs, and set them back running again. */
2417 unsuspend_all_lwps (child);
2418 }
2419
2420 /* If there is at least one more LWP, then the exit signal was
2421 not the end of the debugged application and should be
2422 ignored, unless GDB wants to hear about thread exits. */
2423 if (cs.report_thread_events
2424 || last_thread_of_process_p (pid_of (thread)))
2425 {
2426 /* Since events are serialized to GDB core, and we can't
2427 report this one right now. Leave the status pending for
2428 the next time we're able to report it. */
2429 mark_lwp_dead (child, wstat);
2430 return child;
2431 }
2432 else
2433 {
2434 delete_lwp (child);
2435 return NULL;
2436 }
2437 }
2438
2439 gdb_assert (WIFSTOPPED (wstat));
2440
2441 if (WIFSTOPPED (wstat))
2442 {
2443 struct process_info *proc;
2444
2445 /* Architecture-specific setup after inferior is running. */
2446 proc = find_process_pid (pid_of (thread));
2447 if (proc->tdesc == NULL)
2448 {
2449 if (proc->attached)
2450 {
2451 /* This needs to happen after we have attached to the
2452 inferior and it is stopped for the first time, but
2453 before we access any inferior registers. */
2454 linux_arch_setup_thread (thread);
2455 }
2456 else
2457 {
2458 /* The process is started, but GDBserver will do
2459 architecture-specific setup after the program stops at
2460 the first instruction. */
2461 child->status_pending_p = 1;
2462 child->status_pending = wstat;
2463 return child;
2464 }
2465 }
2466 }
2467
2468 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2469 {
2470 struct process_info *proc = find_process_pid (pid_of (thread));
2471 int options = linux_low_ptrace_options (proc->attached);
2472
2473 linux_enable_event_reporting (lwpid, options);
2474 child->must_set_ptrace_flags = 0;
2475 }
2476
2477 /* Always update syscall_state, even if it will be filtered later. */
2478 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2479 {
2480 child->syscall_state
2481 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2482 ? TARGET_WAITKIND_SYSCALL_RETURN
2483 : TARGET_WAITKIND_SYSCALL_ENTRY);
2484 }
2485 else
2486 {
2487 /* Almost all other ptrace-stops are known to be outside of system
2488 calls, with further exceptions in handle_extended_wait. */
2489 child->syscall_state = TARGET_WAITKIND_IGNORE;
2490 }
2491
2492 /* Be careful to not overwrite stop_pc until save_stop_reason is
2493 called. */
2494 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2495 && linux_is_extended_waitstatus (wstat))
2496 {
2497 child->stop_pc = get_pc (child);
2498 if (handle_extended_wait (&child, wstat))
2499 {
2500 /* The event has been handled, so just return without
2501 reporting it. */
2502 return NULL;
2503 }
2504 }
2505
2506 if (linux_wstatus_maybe_breakpoint (wstat))
2507 {
2508 if (save_stop_reason (child))
2509 have_stop_pc = 1;
2510 }
2511
2512 if (!have_stop_pc)
2513 child->stop_pc = get_pc (child);
2514
2515 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2516 && child->stop_expected)
2517 {
2518 if (debug_threads)
2519 debug_printf ("Expected stop.\n");
2520 child->stop_expected = 0;
2521
2522 if (thread->last_resume_kind == resume_stop)
2523 {
2524 /* We want to report the stop to the core. Treat the
2525 SIGSTOP as a normal event. */
2526 if (debug_threads)
2527 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2528 target_pid_to_str (ptid_of (thread)));
2529 }
2530 else if (stopping_threads != NOT_STOPPING_THREADS)
2531 {
2532 /* Stopping threads. We don't want this SIGSTOP to end up
2533 pending. */
2534 if (debug_threads)
2535 debug_printf ("LLW: SIGSTOP caught for %s "
2536 "while stopping threads.\n",
2537 target_pid_to_str (ptid_of (thread)));
2538 return NULL;
2539 }
2540 else
2541 {
2542 /* This is a delayed SIGSTOP. Filter out the event. */
2543 if (debug_threads)
2544 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2545 child->stepping ? "step" : "continue",
2546 target_pid_to_str (ptid_of (thread)));
2547
2548 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2549 return NULL;
2550 }
2551 }
2552
2553 child->status_pending_p = 1;
2554 child->status_pending = wstat;
2555 return child;
2556 }
2557
2558 /* Return true if THREAD is doing hardware single step. */
2559
2560 static int
2561 maybe_hw_step (struct thread_info *thread)
2562 {
2563 if (can_hardware_single_step ())
2564 return 1;
2565 else
2566 {
2567 /* GDBserver must insert single-step breakpoint for software
2568 single step. */
2569 gdb_assert (has_single_step_breakpoints (thread));
2570 return 0;
2571 }
2572 }
2573
2574 /* Resume LWPs that are currently stopped without any pending status
2575 to report, but are resumed from the core's perspective. */
2576
2577 static void
2578 resume_stopped_resumed_lwps (thread_info *thread)
2579 {
2580 struct lwp_info *lp = get_thread_lwp (thread);
2581
2582 if (lp->stopped
2583 && !lp->suspended
2584 && !lp->status_pending_p
2585 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2586 {
2587 int step = 0;
2588
2589 if (thread->last_resume_kind == resume_step)
2590 step = maybe_hw_step (thread);
2591
2592 if (debug_threads)
2593 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2594 target_pid_to_str (ptid_of (thread)),
2595 paddress (lp->stop_pc),
2596 step);
2597
2598 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2599 }
2600 }
2601
2602 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2603 match FILTER_PTID (leaving others pending). The PTIDs can be:
2604 minus_one_ptid, to specify any child; a pid PTID, specifying all
2605 lwps of a thread group; or a PTID representing a single lwp. Store
2606 the stop status through the status pointer WSTAT. OPTIONS is
2607 passed to the waitpid call. Return 0 if no event was found and
2608 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2609 was found. Return the PID of the stopped child otherwise. */
2610
2611 static int
2612 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2613 int *wstatp, int options)
2614 {
2615 struct thread_info *event_thread;
2616 struct lwp_info *event_child, *requested_child;
2617 sigset_t block_mask, prev_mask;
2618
2619 retry:
2620 /* N.B. event_thread points to the thread_info struct that contains
2621 event_child. Keep them in sync. */
2622 event_thread = NULL;
2623 event_child = NULL;
2624 requested_child = NULL;
2625
2626 /* Check for a lwp with a pending status. */
2627
2628 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2629 {
2630 event_thread = find_thread_in_random ([&] (thread_info *thread)
2631 {
2632 return status_pending_p_callback (thread, filter_ptid);
2633 });
2634
2635 if (event_thread != NULL)
2636 event_child = get_thread_lwp (event_thread);
2637 if (debug_threads && event_thread)
2638 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2639 }
2640 else if (filter_ptid != null_ptid)
2641 {
2642 requested_child = find_lwp_pid (filter_ptid);
2643
2644 if (stopping_threads == NOT_STOPPING_THREADS
2645 && requested_child->status_pending_p
2646 && (requested_child->collecting_fast_tracepoint
2647 != fast_tpoint_collect_result::not_collecting))
2648 {
2649 enqueue_one_deferred_signal (requested_child,
2650 &requested_child->status_pending);
2651 requested_child->status_pending_p = 0;
2652 requested_child->status_pending = 0;
2653 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2654 }
2655
2656 if (requested_child->suspended
2657 && requested_child->status_pending_p)
2658 {
2659 internal_error (__FILE__, __LINE__,
2660 "requesting an event out of a"
2661 " suspended child?");
2662 }
2663
2664 if (requested_child->status_pending_p)
2665 {
2666 event_child = requested_child;
2667 event_thread = get_lwp_thread (event_child);
2668 }
2669 }
2670
2671 if (event_child != NULL)
2672 {
2673 if (debug_threads)
2674 debug_printf ("Got an event from pending child %ld (%04x)\n",
2675 lwpid_of (event_thread), event_child->status_pending);
2676 *wstatp = event_child->status_pending;
2677 event_child->status_pending_p = 0;
2678 event_child->status_pending = 0;
2679 current_thread = event_thread;
2680 return lwpid_of (event_thread);
2681 }
2682
2683 /* But if we don't find a pending event, we'll have to wait.
2684
2685 We only enter this loop if no process has a pending wait status.
2686 Thus any action taken in response to a wait status inside this
2687 loop is responding as soon as we detect the status, not after any
2688 pending events. */
2689
2690 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2691 all signals while here. */
2692 sigfillset (&block_mask);
2693 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2694
2695 /* Always pull all events out of the kernel. We'll randomly select
2696 an event LWP out of all that have events, to prevent
2697 starvation. */
2698 while (event_child == NULL)
2699 {
2700 pid_t ret = 0;
2701
2702 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2703 quirks:
2704
2705 - If the thread group leader exits while other threads in the
2706 thread group still exist, waitpid(TGID, ...) hangs. That
2707 waitpid won't return an exit status until the other threads
2708 in the group are reaped.
2709
2710 - When a non-leader thread execs, that thread just vanishes
2711 without reporting an exit (so we'd hang if we waited for it
2712 explicitly in that case). The exec event is reported to
2713 the TGID pid. */
2714 errno = 0;
2715 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2716
2717 if (debug_threads)
2718 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2719 ret, errno ? strerror (errno) : "ERRNO-OK");
2720
2721 if (ret > 0)
2722 {
2723 if (debug_threads)
2724 {
2725 debug_printf ("LLW: waitpid %ld received %s\n",
2726 (long) ret, status_to_str (*wstatp));
2727 }
2728
2729 /* Filter all events. IOW, leave all events pending. We'll
2730 randomly select an event LWP out of all that have events
2731 below. */
2732 linux_low_filter_event (ret, *wstatp);
2733 /* Retry until nothing comes out of waitpid. A single
2734 SIGCHLD can indicate more than one child stopped. */
2735 continue;
2736 }
2737
2738 /* Now that we've pulled all events out of the kernel, resume
2739 LWPs that don't have an interesting event to report. */
2740 if (stopping_threads == NOT_STOPPING_THREADS)
2741 for_each_thread (resume_stopped_resumed_lwps);
2742
2743 /* ... and find an LWP with a status to report to the core, if
2744 any. */
2745 event_thread = find_thread_in_random ([&] (thread_info *thread)
2746 {
2747 return status_pending_p_callback (thread, filter_ptid);
2748 });
2749
2750 if (event_thread != NULL)
2751 {
2752 event_child = get_thread_lwp (event_thread);
2753 *wstatp = event_child->status_pending;
2754 event_child->status_pending_p = 0;
2755 event_child->status_pending = 0;
2756 break;
2757 }
2758
2759 /* Check for zombie thread group leaders. Those can't be reaped
2760 until all other threads in the thread group are. */
2761 check_zombie_leaders ();
2762
2763 auto not_stopped = [&] (thread_info *thread)
2764 {
2765 return not_stopped_callback (thread, wait_ptid);
2766 };
2767
2768 /* If there are no resumed children left in the set of LWPs we
2769 want to wait for, bail. We can't just block in
2770 waitpid/sigsuspend, because lwps might have been left stopped
2771 in trace-stop state, and we'd be stuck forever waiting for
2772 their status to change (which would only happen if we resumed
2773 them). Even if WNOHANG is set, this return code is preferred
2774 over 0 (below), as it is more detailed. */
2775 if (find_thread (not_stopped) == NULL)
2776 {
2777 if (debug_threads)
2778 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2779 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2780 return -1;
2781 }
2782
2783 /* No interesting event to report to the caller. */
2784 if ((options & WNOHANG))
2785 {
2786 if (debug_threads)
2787 debug_printf ("WNOHANG set, no event found\n");
2788
2789 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2790 return 0;
2791 }
2792
2793 /* Block until we get an event reported with SIGCHLD. */
2794 if (debug_threads)
2795 debug_printf ("sigsuspend'ing\n");
2796
2797 sigsuspend (&prev_mask);
2798 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2799 goto retry;
2800 }
2801
2802 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2803
2804 current_thread = event_thread;
2805
2806 return lwpid_of (event_thread);
2807 }
2808
2809 /* Wait for an event from child(ren) PTID. PTIDs can be:
2810 minus_one_ptid, to specify any child; a pid PTID, specifying all
2811 lwps of a thread group; or a PTID representing a single lwp. Store
2812 the stop status through the status pointer WSTAT. OPTIONS is
2813 passed to the waitpid call. Return 0 if no event was found and
2814 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2815 was found. Return the PID of the stopped child otherwise. */
2816
2817 static int
2818 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2819 {
2820 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2821 }
2822
2823 /* Select one LWP out of those that have events pending. */
2824
2825 static void
2826 select_event_lwp (struct lwp_info **orig_lp)
2827 {
2828 int random_selector;
2829 struct thread_info *event_thread = NULL;
2830
2831 /* In all-stop, give preference to the LWP that is being
2832 single-stepped. There will be at most one, and it's the LWP that
2833 the core is most interested in. If we didn't do this, then we'd
2834 have to handle pending step SIGTRAPs somehow in case the core
2835 later continues the previously-stepped thread, otherwise we'd
2836 report the pending SIGTRAP, and the core, not having stepped the
2837 thread, wouldn't understand what the trap was for, and therefore
2838 would report it to the user as a random signal. */
2839 if (!non_stop)
2840 {
2841 event_thread = find_thread ([] (thread_info *thread)
2842 {
2843 lwp_info *lp = get_thread_lwp (thread);
2844
2845 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2846 && thread->last_resume_kind == resume_step
2847 && lp->status_pending_p);
2848 });
2849
2850 if (event_thread != NULL)
2851 {
2852 if (debug_threads)
2853 debug_printf ("SEL: Select single-step %s\n",
2854 target_pid_to_str (ptid_of (event_thread)));
2855 }
2856 }
2857 if (event_thread == NULL)
2858 {
2859 /* No single-stepping LWP. Select one at random, out of those
2860 which have had events. */
2861
2862 /* First see how many events we have. */
2863 int num_events = 0;
2864 for_each_thread ([&] (thread_info *thread)
2865 {
2866 lwp_info *lp = get_thread_lwp (thread);
2867
2868 /* Count only resumed LWPs that have an event pending. */
2869 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2870 && lp->status_pending_p)
2871 num_events++;
2872 });
2873 gdb_assert (num_events > 0);
2874
2875 /* Now randomly pick a LWP out of those that have had
2876 events. */
2877 random_selector = (int)
2878 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2879
2880 if (debug_threads && num_events > 1)
2881 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2882 num_events, random_selector);
2883
2884 event_thread = find_thread ([&] (thread_info *thread)
2885 {
2886 lwp_info *lp = get_thread_lwp (thread);
2887
2888 /* Select only resumed LWPs that have an event pending. */
2889 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2890 && lp->status_pending_p)
2891 if (random_selector-- == 0)
2892 return true;
2893
2894 return false;
2895 });
2896 }
2897
2898 if (event_thread != NULL)
2899 {
2900 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2901
2902 /* Switch the event LWP. */
2903 *orig_lp = event_lp;
2904 }
2905 }
2906
2907 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2908 NULL. */
2909
2910 static void
2911 unsuspend_all_lwps (struct lwp_info *except)
2912 {
2913 for_each_thread ([&] (thread_info *thread)
2914 {
2915 lwp_info *lwp = get_thread_lwp (thread);
2916
2917 if (lwp != except)
2918 lwp_suspended_decr (lwp);
2919 });
2920 }
2921
2922 static void move_out_of_jump_pad_callback (thread_info *thread);
2923 static bool stuck_in_jump_pad_callback (thread_info *thread);
2924 static bool lwp_running (thread_info *thread);
2925 static ptid_t linux_wait_1 (ptid_t ptid,
2926 struct target_waitstatus *ourstatus,
2927 int target_options);
2928
2929 /* Stabilize threads (move out of jump pads).
2930
2931 If a thread is midway collecting a fast tracepoint, we need to
2932 finish the collection and move it out of the jump pad before
2933 reporting the signal.
2934
2935 This avoids recursion while collecting (when a signal arrives
2936 midway, and the signal handler itself collects), which would trash
2937 the trace buffer. In case the user set a breakpoint in a signal
2938 handler, this avoids the backtrace showing the jump pad, etc..
2939 Most importantly, there are certain things we can't do safely if
2940 threads are stopped in a jump pad (or in its callee's). For
2941 example:
2942
2943 - starting a new trace run. A thread still collecting the
2944 previous run, could trash the trace buffer when resumed. The trace
2945 buffer control structures would have been reset but the thread had
2946 no way to tell. The thread could even midway memcpy'ing to the
2947 buffer, which would mean that when resumed, it would clobber the
2948 trace buffer that had been set for a new run.
2949
2950 - we can't rewrite/reuse the jump pads for new tracepoints
2951 safely. Say you do tstart while a thread is stopped midway while
2952 collecting. When the thread is later resumed, it finishes the
2953 collection, and returns to the jump pad, to execute the original
2954 instruction that was under the tracepoint jump at the time the
2955 older run had been started. If the jump pad had been rewritten
2956 since for something else in the new run, the thread would now
2957 execute the wrong / random instructions. */
2958
2959 static void
2960 linux_stabilize_threads (void)
2961 {
2962 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2963
2964 if (thread_stuck != NULL)
2965 {
2966 if (debug_threads)
2967 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2968 lwpid_of (thread_stuck));
2969 return;
2970 }
2971
2972 thread_info *saved_thread = current_thread;
2973
2974 stabilizing_threads = 1;
2975
2976 /* Kick 'em all. */
2977 for_each_thread (move_out_of_jump_pad_callback);
2978
2979 /* Loop until all are stopped out of the jump pads. */
2980 while (find_thread (lwp_running) != NULL)
2981 {
2982 struct target_waitstatus ourstatus;
2983 struct lwp_info *lwp;
2984 int wstat;
2985
2986 /* Note that we go through the full wait even loop. While
2987 moving threads out of jump pad, we need to be able to step
2988 over internal breakpoints and such. */
2989 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2990
2991 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2992 {
2993 lwp = get_thread_lwp (current_thread);
2994
2995 /* Lock it. */
2996 lwp_suspended_inc (lwp);
2997
2998 if (ourstatus.value.sig != GDB_SIGNAL_0
2999 || current_thread->last_resume_kind == resume_stop)
3000 {
3001 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3002 enqueue_one_deferred_signal (lwp, &wstat);
3003 }
3004 }
3005 }
3006
3007 unsuspend_all_lwps (NULL);
3008
3009 stabilizing_threads = 0;
3010
3011 current_thread = saved_thread;
3012
3013 if (debug_threads)
3014 {
3015 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3016
3017 if (thread_stuck != NULL)
3018 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3019 lwpid_of (thread_stuck));
3020 }
3021 }
3022
3023 /* Convenience function that is called when the kernel reports an
3024 event that is not passed out to GDB. */
3025
3026 static ptid_t
3027 ignore_event (struct target_waitstatus *ourstatus)
3028 {
3029 /* If we got an event, there may still be others, as a single
3030 SIGCHLD can indicate more than one child stopped. This forces
3031 another target_wait call. */
3032 async_file_mark ();
3033
3034 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3035 return null_ptid;
3036 }
3037
3038 /* Convenience function that is called when the kernel reports an exit
3039 event. This decides whether to report the event to GDB as a
3040 process exit event, a thread exit event, or to suppress the
3041 event. */
3042
3043 static ptid_t
3044 filter_exit_event (struct lwp_info *event_child,
3045 struct target_waitstatus *ourstatus)
3046 {
3047 client_state &cs = get_client_state ();
3048 struct thread_info *thread = get_lwp_thread (event_child);
3049 ptid_t ptid = ptid_of (thread);
3050
3051 if (!last_thread_of_process_p (pid_of (thread)))
3052 {
3053 if (cs.report_thread_events)
3054 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3055 else
3056 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3057
3058 delete_lwp (event_child);
3059 }
3060 return ptid;
3061 }
3062
3063 /* Returns 1 if GDB is interested in any event_child syscalls. */
3064
3065 static int
3066 gdb_catching_syscalls_p (struct lwp_info *event_child)
3067 {
3068 struct thread_info *thread = get_lwp_thread (event_child);
3069 struct process_info *proc = get_thread_process (thread);
3070
3071 return !proc->syscalls_to_catch.empty ();
3072 }
3073
3074 /* Returns 1 if GDB is interested in the event_child syscall.
3075 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3076
3077 static int
3078 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3079 {
3080 int sysno;
3081 struct thread_info *thread = get_lwp_thread (event_child);
3082 struct process_info *proc = get_thread_process (thread);
3083
3084 if (proc->syscalls_to_catch.empty ())
3085 return 0;
3086
3087 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3088 return 1;
3089
3090 get_syscall_trapinfo (event_child, &sysno);
3091
3092 for (int iter : proc->syscalls_to_catch)
3093 if (iter == sysno)
3094 return 1;
3095
3096 return 0;
3097 }
3098
3099 /* Wait for process, returns status. */
3100
3101 static ptid_t
3102 linux_wait_1 (ptid_t ptid,
3103 struct target_waitstatus *ourstatus, int target_options)
3104 {
3105 client_state &cs = get_client_state ();
3106 int w;
3107 struct lwp_info *event_child;
3108 int options;
3109 int pid;
3110 int step_over_finished;
3111 int bp_explains_trap;
3112 int maybe_internal_trap;
3113 int report_to_gdb;
3114 int trace_event;
3115 int in_step_range;
3116 int any_resumed;
3117
3118 if (debug_threads)
3119 {
3120 debug_enter ();
3121 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3122 }
3123
3124 /* Translate generic target options into linux options. */
3125 options = __WALL;
3126 if (target_options & TARGET_WNOHANG)
3127 options |= WNOHANG;
3128
3129 bp_explains_trap = 0;
3130 trace_event = 0;
3131 in_step_range = 0;
3132 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3133
3134 auto status_pending_p_any = [&] (thread_info *thread)
3135 {
3136 return status_pending_p_callback (thread, minus_one_ptid);
3137 };
3138
3139 auto not_stopped = [&] (thread_info *thread)
3140 {
3141 return not_stopped_callback (thread, minus_one_ptid);
3142 };
3143
3144 /* Find a resumed LWP, if any. */
3145 if (find_thread (status_pending_p_any) != NULL)
3146 any_resumed = 1;
3147 else if (find_thread (not_stopped) != NULL)
3148 any_resumed = 1;
3149 else
3150 any_resumed = 0;
3151
3152 if (step_over_bkpt == null_ptid)
3153 pid = linux_wait_for_event (ptid, &w, options);
3154 else
3155 {
3156 if (debug_threads)
3157 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3158 target_pid_to_str (step_over_bkpt));
3159 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3160 }
3161
3162 if (pid == 0 || (pid == -1 && !any_resumed))
3163 {
3164 gdb_assert (target_options & TARGET_WNOHANG);
3165
3166 if (debug_threads)
3167 {
3168 debug_printf ("linux_wait_1 ret = null_ptid, "
3169 "TARGET_WAITKIND_IGNORE\n");
3170 debug_exit ();
3171 }
3172
3173 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3174 return null_ptid;
3175 }
3176 else if (pid == -1)
3177 {
3178 if (debug_threads)
3179 {
3180 debug_printf ("linux_wait_1 ret = null_ptid, "
3181 "TARGET_WAITKIND_NO_RESUMED\n");
3182 debug_exit ();
3183 }
3184
3185 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3186 return null_ptid;
3187 }
3188
3189 event_child = get_thread_lwp (current_thread);
3190
3191 /* linux_wait_for_event only returns an exit status for the last
3192 child of a process. Report it. */
3193 if (WIFEXITED (w) || WIFSIGNALED (w))
3194 {
3195 if (WIFEXITED (w))
3196 {
3197 ourstatus->kind = TARGET_WAITKIND_EXITED;
3198 ourstatus->value.integer = WEXITSTATUS (w);
3199
3200 if (debug_threads)
3201 {
3202 debug_printf ("linux_wait_1 ret = %s, exited with "
3203 "retcode %d\n",
3204 target_pid_to_str (ptid_of (current_thread)),
3205 WEXITSTATUS (w));
3206 debug_exit ();
3207 }
3208 }
3209 else
3210 {
3211 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3212 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3213
3214 if (debug_threads)
3215 {
3216 debug_printf ("linux_wait_1 ret = %s, terminated with "
3217 "signal %d\n",
3218 target_pid_to_str (ptid_of (current_thread)),
3219 WTERMSIG (w));
3220 debug_exit ();
3221 }
3222 }
3223
3224 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3225 return filter_exit_event (event_child, ourstatus);
3226
3227 return ptid_of (current_thread);
3228 }
3229
3230 /* If step-over executes a breakpoint instruction, in the case of a
3231 hardware single step it means a gdb/gdbserver breakpoint had been
3232 planted on top of a permanent breakpoint, in the case of a software
3233 single step it may just mean that gdbserver hit the reinsert breakpoint.
3234 The PC has been adjusted by save_stop_reason to point at
3235 the breakpoint address.
3236 So in the case of the hardware single step advance the PC manually
3237 past the breakpoint and in the case of software single step advance only
3238 if it's not the single_step_breakpoint we are hitting.
3239 This avoids that a program would keep trapping a permanent breakpoint
3240 forever. */
3241 if (step_over_bkpt != null_ptid
3242 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3243 && (event_child->stepping
3244 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3245 {
3246 int increment_pc = 0;
3247 int breakpoint_kind = 0;
3248 CORE_ADDR stop_pc = event_child->stop_pc;
3249
3250 breakpoint_kind =
3251 the_target->breakpoint_kind_from_current_state (&stop_pc);
3252 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3253
3254 if (debug_threads)
3255 {
3256 debug_printf ("step-over for %s executed software breakpoint\n",
3257 target_pid_to_str (ptid_of (current_thread)));
3258 }
3259
3260 if (increment_pc != 0)
3261 {
3262 struct regcache *regcache
3263 = get_thread_regcache (current_thread, 1);
3264
3265 event_child->stop_pc += increment_pc;
3266 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3267
3268 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3269 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3270 }
3271 }
3272
3273 /* If this event was not handled before, and is not a SIGTRAP, we
3274 report it. SIGILL and SIGSEGV are also treated as traps in case
3275 a breakpoint is inserted at the current PC. If this target does
3276 not support internal breakpoints at all, we also report the
3277 SIGTRAP without further processing; it's of no concern to us. */
3278 maybe_internal_trap
3279 = (supports_breakpoints ()
3280 && (WSTOPSIG (w) == SIGTRAP
3281 || ((WSTOPSIG (w) == SIGILL
3282 || WSTOPSIG (w) == SIGSEGV)
3283 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3284
3285 if (maybe_internal_trap)
3286 {
3287 /* Handle anything that requires bookkeeping before deciding to
3288 report the event or continue waiting. */
3289
3290 /* First check if we can explain the SIGTRAP with an internal
3291 breakpoint, or if we should possibly report the event to GDB.
3292 Do this before anything that may remove or insert a
3293 breakpoint. */
3294 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3295
3296 /* We have a SIGTRAP, possibly a step-over dance has just
3297 finished. If so, tweak the state machine accordingly,
3298 reinsert breakpoints and delete any single-step
3299 breakpoints. */
3300 step_over_finished = finish_step_over (event_child);
3301
3302 /* Now invoke the callbacks of any internal breakpoints there. */
3303 check_breakpoints (event_child->stop_pc);
3304
3305 /* Handle tracepoint data collecting. This may overflow the
3306 trace buffer, and cause a tracing stop, removing
3307 breakpoints. */
3308 trace_event = handle_tracepoints (event_child);
3309
3310 if (bp_explains_trap)
3311 {
3312 if (debug_threads)
3313 debug_printf ("Hit a gdbserver breakpoint.\n");
3314 }
3315 }
3316 else
3317 {
3318 /* We have some other signal, possibly a step-over dance was in
3319 progress, and it should be cancelled too. */
3320 step_over_finished = finish_step_over (event_child);
3321 }
3322
3323 /* We have all the data we need. Either report the event to GDB, or
3324 resume threads and keep waiting for more. */
3325
3326 /* If we're collecting a fast tracepoint, finish the collection and
3327 move out of the jump pad before delivering a signal. See
3328 linux_stabilize_threads. */
3329
3330 if (WIFSTOPPED (w)
3331 && WSTOPSIG (w) != SIGTRAP
3332 && supports_fast_tracepoints ()
3333 && agent_loaded_p ())
3334 {
3335 if (debug_threads)
3336 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3337 "to defer or adjust it.\n",
3338 WSTOPSIG (w), lwpid_of (current_thread));
3339
3340 /* Allow debugging the jump pad itself. */
3341 if (current_thread->last_resume_kind != resume_step
3342 && maybe_move_out_of_jump_pad (event_child, &w))
3343 {
3344 enqueue_one_deferred_signal (event_child, &w);
3345
3346 if (debug_threads)
3347 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3348 WSTOPSIG (w), lwpid_of (current_thread));
3349
3350 linux_resume_one_lwp (event_child, 0, 0, NULL);
3351
3352 if (debug_threads)
3353 debug_exit ();
3354 return ignore_event (ourstatus);
3355 }
3356 }
3357
3358 if (event_child->collecting_fast_tracepoint
3359 != fast_tpoint_collect_result::not_collecting)
3360 {
3361 if (debug_threads)
3362 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3363 "Check if we're already there.\n",
3364 lwpid_of (current_thread),
3365 (int) event_child->collecting_fast_tracepoint);
3366
3367 trace_event = 1;
3368
3369 event_child->collecting_fast_tracepoint
3370 = linux_fast_tracepoint_collecting (event_child, NULL);
3371
3372 if (event_child->collecting_fast_tracepoint
3373 != fast_tpoint_collect_result::before_insn)
3374 {
3375 /* No longer need this breakpoint. */
3376 if (event_child->exit_jump_pad_bkpt != NULL)
3377 {
3378 if (debug_threads)
3379 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3380 "stopping all threads momentarily.\n");
3381
3382 /* Other running threads could hit this breakpoint.
3383 We don't handle moribund locations like GDB does,
3384 instead we always pause all threads when removing
3385 breakpoints, so that any step-over or
3386 decr_pc_after_break adjustment is always taken
3387 care of while the breakpoint is still
3388 inserted. */
3389 stop_all_lwps (1, event_child);
3390
3391 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3392 event_child->exit_jump_pad_bkpt = NULL;
3393
3394 unstop_all_lwps (1, event_child);
3395
3396 gdb_assert (event_child->suspended >= 0);
3397 }
3398 }
3399
3400 if (event_child->collecting_fast_tracepoint
3401 == fast_tpoint_collect_result::not_collecting)
3402 {
3403 if (debug_threads)
3404 debug_printf ("fast tracepoint finished "
3405 "collecting successfully.\n");
3406
3407 /* We may have a deferred signal to report. */
3408 if (dequeue_one_deferred_signal (event_child, &w))
3409 {
3410 if (debug_threads)
3411 debug_printf ("dequeued one signal.\n");
3412 }
3413 else
3414 {
3415 if (debug_threads)
3416 debug_printf ("no deferred signals.\n");
3417
3418 if (stabilizing_threads)
3419 {
3420 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3421 ourstatus->value.sig = GDB_SIGNAL_0;
3422
3423 if (debug_threads)
3424 {
3425 debug_printf ("linux_wait_1 ret = %s, stopped "
3426 "while stabilizing threads\n",
3427 target_pid_to_str (ptid_of (current_thread)));
3428 debug_exit ();
3429 }
3430
3431 return ptid_of (current_thread);
3432 }
3433 }
3434 }
3435 }
3436
3437 /* Check whether GDB would be interested in this event. */
3438
3439 /* Check if GDB is interested in this syscall. */
3440 if (WIFSTOPPED (w)
3441 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3442 && !gdb_catch_this_syscall_p (event_child))
3443 {
3444 if (debug_threads)
3445 {
3446 debug_printf ("Ignored syscall for LWP %ld.\n",
3447 lwpid_of (current_thread));
3448 }
3449
3450 linux_resume_one_lwp (event_child, event_child->stepping,
3451 0, NULL);
3452
3453 if (debug_threads)
3454 debug_exit ();
3455 return ignore_event (ourstatus);
3456 }
3457
3458 /* If GDB is not interested in this signal, don't stop other
3459 threads, and don't report it to GDB. Just resume the inferior
3460 right away. We do this for threading-related signals as well as
3461 any that GDB specifically requested we ignore. But never ignore
3462 SIGSTOP if we sent it ourselves, and do not ignore signals when
3463 stepping - they may require special handling to skip the signal
3464 handler. Also never ignore signals that could be caused by a
3465 breakpoint. */
3466 if (WIFSTOPPED (w)
3467 && current_thread->last_resume_kind != resume_step
3468 && (
3469 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3470 (current_process ()->priv->thread_db != NULL
3471 && (WSTOPSIG (w) == __SIGRTMIN
3472 || WSTOPSIG (w) == __SIGRTMIN + 1))
3473 ||
3474 #endif
3475 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3476 && !(WSTOPSIG (w) == SIGSTOP
3477 && current_thread->last_resume_kind == resume_stop)
3478 && !linux_wstatus_maybe_breakpoint (w))))
3479 {
3480 siginfo_t info, *info_p;
3481
3482 if (debug_threads)
3483 debug_printf ("Ignored signal %d for LWP %ld.\n",
3484 WSTOPSIG (w), lwpid_of (current_thread));
3485
3486 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3487 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3488 info_p = &info;
3489 else
3490 info_p = NULL;
3491
3492 if (step_over_finished)
3493 {
3494 /* We cancelled this thread's step-over above. We still
3495 need to unsuspend all other LWPs, and set them back
3496 running again while the signal handler runs. */
3497 unsuspend_all_lwps (event_child);
3498
3499 /* Enqueue the pending signal info so that proceed_all_lwps
3500 doesn't lose it. */
3501 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3502
3503 proceed_all_lwps ();
3504 }
3505 else
3506 {
3507 linux_resume_one_lwp (event_child, event_child->stepping,
3508 WSTOPSIG (w), info_p);
3509 }
3510
3511 if (debug_threads)
3512 debug_exit ();
3513
3514 return ignore_event (ourstatus);
3515 }
3516
3517 /* Note that all addresses are always "out of the step range" when
3518 there's no range to begin with. */
3519 in_step_range = lwp_in_step_range (event_child);
3520
3521 /* If GDB wanted this thread to single step, and the thread is out
3522 of the step range, we always want to report the SIGTRAP, and let
3523 GDB handle it. Watchpoints should always be reported. So should
3524 signals we can't explain. A SIGTRAP we can't explain could be a
3525 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3526 do, we're be able to handle GDB breakpoints on top of internal
3527 breakpoints, by handling the internal breakpoint and still
3528 reporting the event to GDB. If we don't, we're out of luck, GDB
3529 won't see the breakpoint hit. If we see a single-step event but
3530 the thread should be continuing, don't pass the trap to gdb.
3531 That indicates that we had previously finished a single-step but
3532 left the single-step pending -- see
3533 complete_ongoing_step_over. */
3534 report_to_gdb = (!maybe_internal_trap
3535 || (current_thread->last_resume_kind == resume_step
3536 && !in_step_range)
3537 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3538 || (!in_step_range
3539 && !bp_explains_trap
3540 && !trace_event
3541 && !step_over_finished
3542 && !(current_thread->last_resume_kind == resume_continue
3543 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3544 || (gdb_breakpoint_here (event_child->stop_pc)
3545 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3546 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3547 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3548
3549 run_breakpoint_commands (event_child->stop_pc);
3550
3551 /* We found no reason GDB would want us to stop. We either hit one
3552 of our own breakpoints, or finished an internal step GDB
3553 shouldn't know about. */
3554 if (!report_to_gdb)
3555 {
3556 if (debug_threads)
3557 {
3558 if (bp_explains_trap)
3559 debug_printf ("Hit a gdbserver breakpoint.\n");
3560 if (step_over_finished)
3561 debug_printf ("Step-over finished.\n");
3562 if (trace_event)
3563 debug_printf ("Tracepoint event.\n");
3564 if (lwp_in_step_range (event_child))
3565 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3566 paddress (event_child->stop_pc),
3567 paddress (event_child->step_range_start),
3568 paddress (event_child->step_range_end));
3569 }
3570
3571 /* We're not reporting this breakpoint to GDB, so apply the
3572 decr_pc_after_break adjustment to the inferior's regcache
3573 ourselves. */
3574
3575 if (the_low_target.set_pc != NULL)
3576 {
3577 struct regcache *regcache
3578 = get_thread_regcache (current_thread, 1);
3579 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3580 }
3581
3582 if (step_over_finished)
3583 {
3584 /* If we have finished stepping over a breakpoint, we've
3585 stopped and suspended all LWPs momentarily except the
3586 stepping one. This is where we resume them all again.
3587 We're going to keep waiting, so use proceed, which
3588 handles stepping over the next breakpoint. */
3589 unsuspend_all_lwps (event_child);
3590 }
3591 else
3592 {
3593 /* Remove the single-step breakpoints if any. Note that
3594 there isn't single-step breakpoint if we finished stepping
3595 over. */
3596 if (can_software_single_step ()
3597 && has_single_step_breakpoints (current_thread))
3598 {
3599 stop_all_lwps (0, event_child);
3600 delete_single_step_breakpoints (current_thread);
3601 unstop_all_lwps (0, event_child);
3602 }
3603 }
3604
3605 if (debug_threads)
3606 debug_printf ("proceeding all threads.\n");
3607 proceed_all_lwps ();
3608
3609 if (debug_threads)
3610 debug_exit ();
3611
3612 return ignore_event (ourstatus);
3613 }
3614
3615 if (debug_threads)
3616 {
3617 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3618 {
3619 std::string str
3620 = target_waitstatus_to_string (&event_child->waitstatus);
3621
3622 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3623 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3624 }
3625 if (current_thread->last_resume_kind == resume_step)
3626 {
3627 if (event_child->step_range_start == event_child->step_range_end)
3628 debug_printf ("GDB wanted to single-step, reporting event.\n");
3629 else if (!lwp_in_step_range (event_child))
3630 debug_printf ("Out of step range, reporting event.\n");
3631 }
3632 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3633 debug_printf ("Stopped by watchpoint.\n");
3634 else if (gdb_breakpoint_here (event_child->stop_pc))
3635 debug_printf ("Stopped by GDB breakpoint.\n");
3636 if (debug_threads)
3637 debug_printf ("Hit a non-gdbserver trap event.\n");
3638 }
3639
3640 /* Alright, we're going to report a stop. */
3641
3642 /* Remove single-step breakpoints. */
3643 if (can_software_single_step ())
3644 {
3645 /* Remove single-step breakpoints or not. It it is true, stop all
3646 lwps, so that other threads won't hit the breakpoint in the
3647 staled memory. */
3648 int remove_single_step_breakpoints_p = 0;
3649
3650 if (non_stop)
3651 {
3652 remove_single_step_breakpoints_p
3653 = has_single_step_breakpoints (current_thread);
3654 }
3655 else
3656 {
3657 /* In all-stop, a stop reply cancels all previous resume
3658 requests. Delete all single-step breakpoints. */
3659
3660 find_thread ([&] (thread_info *thread) {
3661 if (has_single_step_breakpoints (thread))
3662 {
3663 remove_single_step_breakpoints_p = 1;
3664 return true;
3665 }
3666
3667 return false;
3668 });
3669 }
3670
3671 if (remove_single_step_breakpoints_p)
3672 {
3673 /* If we remove single-step breakpoints from memory, stop all lwps,
3674 so that other threads won't hit the breakpoint in the staled
3675 memory. */
3676 stop_all_lwps (0, event_child);
3677
3678 if (non_stop)
3679 {
3680 gdb_assert (has_single_step_breakpoints (current_thread));
3681 delete_single_step_breakpoints (current_thread);
3682 }
3683 else
3684 {
3685 for_each_thread ([] (thread_info *thread){
3686 if (has_single_step_breakpoints (thread))
3687 delete_single_step_breakpoints (thread);
3688 });
3689 }
3690
3691 unstop_all_lwps (0, event_child);
3692 }
3693 }
3694
3695 if (!stabilizing_threads)
3696 {
3697 /* In all-stop, stop all threads. */
3698 if (!non_stop)
3699 stop_all_lwps (0, NULL);
3700
3701 if (step_over_finished)
3702 {
3703 if (!non_stop)
3704 {
3705 /* If we were doing a step-over, all other threads but
3706 the stepping one had been paused in start_step_over,
3707 with their suspend counts incremented. We don't want
3708 to do a full unstop/unpause, because we're in
3709 all-stop mode (so we want threads stopped), but we
3710 still need to unsuspend the other threads, to
3711 decrement their `suspended' count back. */
3712 unsuspend_all_lwps (event_child);
3713 }
3714 else
3715 {
3716 /* If we just finished a step-over, then all threads had
3717 been momentarily paused. In all-stop, that's fine,
3718 we want threads stopped by now anyway. In non-stop,
3719 we need to re-resume threads that GDB wanted to be
3720 running. */
3721 unstop_all_lwps (1, event_child);
3722 }
3723 }
3724
3725 /* If we're not waiting for a specific LWP, choose an event LWP
3726 from among those that have had events. Giving equal priority
3727 to all LWPs that have had events helps prevent
3728 starvation. */
3729 if (ptid == minus_one_ptid)
3730 {
3731 event_child->status_pending_p = 1;
3732 event_child->status_pending = w;
3733
3734 select_event_lwp (&event_child);
3735
3736 /* current_thread and event_child must stay in sync. */
3737 current_thread = get_lwp_thread (event_child);
3738
3739 event_child->status_pending_p = 0;
3740 w = event_child->status_pending;
3741 }
3742
3743
3744 /* Stabilize threads (move out of jump pads). */
3745 if (!non_stop)
3746 stabilize_threads ();
3747 }
3748 else
3749 {
3750 /* If we just finished a step-over, then all threads had been
3751 momentarily paused. In all-stop, that's fine, we want
3752 threads stopped by now anyway. In non-stop, we need to
3753 re-resume threads that GDB wanted to be running. */
3754 if (step_over_finished)
3755 unstop_all_lwps (1, event_child);
3756 }
3757
3758 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3759 {
3760 /* If the reported event is an exit, fork, vfork or exec, let
3761 GDB know. */
3762
3763 /* Break the unreported fork relationship chain. */
3764 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3765 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3766 {
3767 event_child->fork_relative->fork_relative = NULL;
3768 event_child->fork_relative = NULL;
3769 }
3770
3771 *ourstatus = event_child->waitstatus;
3772 /* Clear the event lwp's waitstatus since we handled it already. */
3773 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3774 }
3775 else
3776 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3777
3778 /* Now that we've selected our final event LWP, un-adjust its PC if
3779 it was a software breakpoint, and the client doesn't know we can
3780 adjust the breakpoint ourselves. */
3781 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3782 && !cs.swbreak_feature)
3783 {
3784 int decr_pc = the_low_target.decr_pc_after_break;
3785
3786 if (decr_pc != 0)
3787 {
3788 struct regcache *regcache
3789 = get_thread_regcache (current_thread, 1);
3790 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3791 }
3792 }
3793
3794 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3795 {
3796 get_syscall_trapinfo (event_child,
3797 &ourstatus->value.syscall_number);
3798 ourstatus->kind = event_child->syscall_state;
3799 }
3800 else if (current_thread->last_resume_kind == resume_stop
3801 && WSTOPSIG (w) == SIGSTOP)
3802 {
3803 /* A thread that has been requested to stop by GDB with vCont;t,
3804 and it stopped cleanly, so report as SIG0. The use of
3805 SIGSTOP is an implementation detail. */
3806 ourstatus->value.sig = GDB_SIGNAL_0;
3807 }
3808 else if (current_thread->last_resume_kind == resume_stop
3809 && WSTOPSIG (w) != SIGSTOP)
3810 {
3811 /* A thread that has been requested to stop by GDB with vCont;t,
3812 but, it stopped for other reasons. */
3813 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3814 }
3815 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3816 {
3817 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3818 }
3819
3820 gdb_assert (step_over_bkpt == null_ptid);
3821
3822 if (debug_threads)
3823 {
3824 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3825 target_pid_to_str (ptid_of (current_thread)),
3826 ourstatus->kind, ourstatus->value.sig);
3827 debug_exit ();
3828 }
3829
3830 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3831 return filter_exit_event (event_child, ourstatus);
3832
3833 return ptid_of (current_thread);
3834 }
3835
3836 /* Get rid of any pending event in the pipe. */
3837 static void
3838 async_file_flush (void)
3839 {
3840 int ret;
3841 char buf;
3842
3843 do
3844 ret = read (linux_event_pipe[0], &buf, 1);
3845 while (ret >= 0 || (ret == -1 && errno == EINTR));
3846 }
3847
3848 /* Put something in the pipe, so the event loop wakes up. */
3849 static void
3850 async_file_mark (void)
3851 {
3852 int ret;
3853
3854 async_file_flush ();
3855
3856 do
3857 ret = write (linux_event_pipe[1], "+", 1);
3858 while (ret == 0 || (ret == -1 && errno == EINTR));
3859
3860 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3861 be awakened anyway. */
3862 }
3863
3864 static ptid_t
3865 linux_wait (ptid_t ptid,
3866 struct target_waitstatus *ourstatus, int target_options)
3867 {
3868 ptid_t event_ptid;
3869
3870 /* Flush the async file first. */
3871 if (target_is_async_p ())
3872 async_file_flush ();
3873
3874 do
3875 {
3876 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3877 }
3878 while ((target_options & TARGET_WNOHANG) == 0
3879 && event_ptid == null_ptid
3880 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3881
3882 /* If at least one stop was reported, there may be more. A single
3883 SIGCHLD can signal more than one child stop. */
3884 if (target_is_async_p ()
3885 && (target_options & TARGET_WNOHANG) != 0
3886 && event_ptid != null_ptid)
3887 async_file_mark ();
3888
3889 return event_ptid;
3890 }
3891
3892 /* Send a signal to an LWP. */
3893
3894 static int
3895 kill_lwp (unsigned long lwpid, int signo)
3896 {
3897 int ret;
3898
3899 errno = 0;
3900 ret = syscall (__NR_tkill, lwpid, signo);
3901 if (errno == ENOSYS)
3902 {
3903 /* If tkill fails, then we are not using nptl threads, a
3904 configuration we no longer support. */
3905 perror_with_name (("tkill"));
3906 }
3907 return ret;
3908 }
3909
3910 void
3911 linux_stop_lwp (struct lwp_info *lwp)
3912 {
3913 send_sigstop (lwp);
3914 }
3915
3916 static void
3917 send_sigstop (struct lwp_info *lwp)
3918 {
3919 int pid;
3920
3921 pid = lwpid_of (get_lwp_thread (lwp));
3922
3923 /* If we already have a pending stop signal for this process, don't
3924 send another. */
3925 if (lwp->stop_expected)
3926 {
3927 if (debug_threads)
3928 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3929
3930 return;
3931 }
3932
3933 if (debug_threads)
3934 debug_printf ("Sending sigstop to lwp %d\n", pid);
3935
3936 lwp->stop_expected = 1;
3937 kill_lwp (pid, SIGSTOP);
3938 }
3939
3940 static void
3941 send_sigstop (thread_info *thread, lwp_info *except)
3942 {
3943 struct lwp_info *lwp = get_thread_lwp (thread);
3944
3945 /* Ignore EXCEPT. */
3946 if (lwp == except)
3947 return;
3948
3949 if (lwp->stopped)
3950 return;
3951
3952 send_sigstop (lwp);
3953 }
3954
3955 /* Increment the suspend count of an LWP, and stop it, if not stopped
3956 yet. */
3957 static void
3958 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3959 {
3960 struct lwp_info *lwp = get_thread_lwp (thread);
3961
3962 /* Ignore EXCEPT. */
3963 if (lwp == except)
3964 return;
3965
3966 lwp_suspended_inc (lwp);
3967
3968 send_sigstop (thread, except);
3969 }
3970
3971 static void
3972 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3973 {
3974 /* Store the exit status for later. */
3975 lwp->status_pending_p = 1;
3976 lwp->status_pending = wstat;
3977
3978 /* Store in waitstatus as well, as there's nothing else to process
3979 for this event. */
3980 if (WIFEXITED (wstat))
3981 {
3982 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3983 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3984 }
3985 else if (WIFSIGNALED (wstat))
3986 {
3987 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3988 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3989 }
3990
3991 /* Prevent trying to stop it. */
3992 lwp->stopped = 1;
3993
3994 /* No further stops are expected from a dead lwp. */
3995 lwp->stop_expected = 0;
3996 }
3997
3998 /* Return true if LWP has exited already, and has a pending exit event
3999 to report to GDB. */
4000
4001 static int
4002 lwp_is_marked_dead (struct lwp_info *lwp)
4003 {
4004 return (lwp->status_pending_p
4005 && (WIFEXITED (lwp->status_pending)
4006 || WIFSIGNALED (lwp->status_pending)));
4007 }
4008
4009 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4010
4011 static void
4012 wait_for_sigstop (void)
4013 {
4014 struct thread_info *saved_thread;
4015 ptid_t saved_tid;
4016 int wstat;
4017 int ret;
4018
4019 saved_thread = current_thread;
4020 if (saved_thread != NULL)
4021 saved_tid = saved_thread->id;
4022 else
4023 saved_tid = null_ptid; /* avoid bogus unused warning */
4024
4025 if (debug_threads)
4026 debug_printf ("wait_for_sigstop: pulling events\n");
4027
4028 /* Passing NULL_PTID as filter indicates we want all events to be
4029 left pending. Eventually this returns when there are no
4030 unwaited-for children left. */
4031 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4032 &wstat, __WALL);
4033 gdb_assert (ret == -1);
4034
4035 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4036 current_thread = saved_thread;
4037 else
4038 {
4039 if (debug_threads)
4040 debug_printf ("Previously current thread died.\n");
4041
4042 /* We can't change the current inferior behind GDB's back,
4043 otherwise, a subsequent command may apply to the wrong
4044 process. */
4045 current_thread = NULL;
4046 }
4047 }
4048
4049 /* Returns true if THREAD is stopped in a jump pad, and we can't
4050 move it out, because we need to report the stop event to GDB. For
4051 example, if the user puts a breakpoint in the jump pad, it's
4052 because she wants to debug it. */
4053
4054 static bool
4055 stuck_in_jump_pad_callback (thread_info *thread)
4056 {
4057 struct lwp_info *lwp = get_thread_lwp (thread);
4058
4059 if (lwp->suspended != 0)
4060 {
4061 internal_error (__FILE__, __LINE__,
4062 "LWP %ld is suspended, suspended=%d\n",
4063 lwpid_of (thread), lwp->suspended);
4064 }
4065 gdb_assert (lwp->stopped);
4066
4067 /* Allow debugging the jump pad, gdb_collect, etc.. */
4068 return (supports_fast_tracepoints ()
4069 && agent_loaded_p ()
4070 && (gdb_breakpoint_here (lwp->stop_pc)
4071 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4072 || thread->last_resume_kind == resume_step)
4073 && (linux_fast_tracepoint_collecting (lwp, NULL)
4074 != fast_tpoint_collect_result::not_collecting));
4075 }
4076
4077 static void
4078 move_out_of_jump_pad_callback (thread_info *thread)
4079 {
4080 struct thread_info *saved_thread;
4081 struct lwp_info *lwp = get_thread_lwp (thread);
4082 int *wstat;
4083
4084 if (lwp->suspended != 0)
4085 {
4086 internal_error (__FILE__, __LINE__,
4087 "LWP %ld is suspended, suspended=%d\n",
4088 lwpid_of (thread), lwp->suspended);
4089 }
4090 gdb_assert (lwp->stopped);
4091
4092 /* For gdb_breakpoint_here. */
4093 saved_thread = current_thread;
4094 current_thread = thread;
4095
4096 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4097
4098 /* Allow debugging the jump pad, gdb_collect, etc. */
4099 if (!gdb_breakpoint_here (lwp->stop_pc)
4100 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4101 && thread->last_resume_kind != resume_step
4102 && maybe_move_out_of_jump_pad (lwp, wstat))
4103 {
4104 if (debug_threads)
4105 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4106 lwpid_of (thread));
4107
4108 if (wstat)
4109 {
4110 lwp->status_pending_p = 0;
4111 enqueue_one_deferred_signal (lwp, wstat);
4112
4113 if (debug_threads)
4114 debug_printf ("Signal %d for LWP %ld deferred "
4115 "(in jump pad)\n",
4116 WSTOPSIG (*wstat), lwpid_of (thread));
4117 }
4118
4119 linux_resume_one_lwp (lwp, 0, 0, NULL);
4120 }
4121 else
4122 lwp_suspended_inc (lwp);
4123
4124 current_thread = saved_thread;
4125 }
4126
4127 static bool
4128 lwp_running (thread_info *thread)
4129 {
4130 struct lwp_info *lwp = get_thread_lwp (thread);
4131
4132 if (lwp_is_marked_dead (lwp))
4133 return false;
4134
4135 return !lwp->stopped;
4136 }
4137
4138 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4139 If SUSPEND, then also increase the suspend count of every LWP,
4140 except EXCEPT. */
4141
4142 static void
4143 stop_all_lwps (int suspend, struct lwp_info *except)
4144 {
4145 /* Should not be called recursively. */
4146 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4147
4148 if (debug_threads)
4149 {
4150 debug_enter ();
4151 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4152 suspend ? "stop-and-suspend" : "stop",
4153 except != NULL
4154 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4155 : "none");
4156 }
4157
4158 stopping_threads = (suspend
4159 ? STOPPING_AND_SUSPENDING_THREADS
4160 : STOPPING_THREADS);
4161
4162 if (suspend)
4163 for_each_thread ([&] (thread_info *thread)
4164 {
4165 suspend_and_send_sigstop (thread, except);
4166 });
4167 else
4168 for_each_thread ([&] (thread_info *thread)
4169 {
4170 send_sigstop (thread, except);
4171 });
4172
4173 wait_for_sigstop ();
4174 stopping_threads = NOT_STOPPING_THREADS;
4175
4176 if (debug_threads)
4177 {
4178 debug_printf ("stop_all_lwps done, setting stopping_threads "
4179 "back to !stopping\n");
4180 debug_exit ();
4181 }
4182 }
4183
4184 /* Enqueue one signal in the chain of signals which need to be
4185 delivered to this process on next resume. */
4186
4187 static void
4188 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4189 {
4190 struct pending_signals *p_sig = XNEW (struct pending_signals);
4191
4192 p_sig->prev = lwp->pending_signals;
4193 p_sig->signal = signal;
4194 if (info == NULL)
4195 memset (&p_sig->info, 0, sizeof (siginfo_t));
4196 else
4197 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4198 lwp->pending_signals = p_sig;
4199 }
4200
4201 /* Install breakpoints for software single stepping. */
4202
4203 static void
4204 install_software_single_step_breakpoints (struct lwp_info *lwp)
4205 {
4206 struct thread_info *thread = get_lwp_thread (lwp);
4207 struct regcache *regcache = get_thread_regcache (thread, 1);
4208
4209 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4210
4211 current_thread = thread;
4212 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4213
4214 for (CORE_ADDR pc : next_pcs)
4215 set_single_step_breakpoint (pc, current_ptid);
4216 }
4217
4218 /* Single step via hardware or software single step.
4219 Return 1 if hardware single stepping, 0 if software single stepping
4220 or can't single step. */
4221
4222 static int
4223 single_step (struct lwp_info* lwp)
4224 {
4225 int step = 0;
4226
4227 if (can_hardware_single_step ())
4228 {
4229 step = 1;
4230 }
4231 else if (can_software_single_step ())
4232 {
4233 install_software_single_step_breakpoints (lwp);
4234 step = 0;
4235 }
4236 else
4237 {
4238 if (debug_threads)
4239 debug_printf ("stepping is not implemented on this target");
4240 }
4241
4242 return step;
4243 }
4244
4245 /* The signal can be delivered to the inferior if we are not trying to
4246 finish a fast tracepoint collect. Since signal can be delivered in
4247 the step-over, the program may go to signal handler and trap again
4248 after return from the signal handler. We can live with the spurious
4249 double traps. */
4250
4251 static int
4252 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4253 {
4254 return (lwp->collecting_fast_tracepoint
4255 == fast_tpoint_collect_result::not_collecting);
4256 }
4257
4258 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4259 SIGNAL is nonzero, give it that signal. */
4260
4261 static void
4262 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4263 int step, int signal, siginfo_t *info)
4264 {
4265 struct thread_info *thread = get_lwp_thread (lwp);
4266 struct thread_info *saved_thread;
4267 int ptrace_request;
4268 struct process_info *proc = get_thread_process (thread);
4269
4270 /* Note that target description may not be initialised
4271 (proc->tdesc == NULL) at this point because the program hasn't
4272 stopped at the first instruction yet. It means GDBserver skips
4273 the extra traps from the wrapper program (see option --wrapper).
4274 Code in this function that requires register access should be
4275 guarded by proc->tdesc == NULL or something else. */
4276
4277 if (lwp->stopped == 0)
4278 return;
4279
4280 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4281
4282 fast_tpoint_collect_result fast_tp_collecting
4283 = lwp->collecting_fast_tracepoint;
4284
4285 gdb_assert (!stabilizing_threads
4286 || (fast_tp_collecting
4287 != fast_tpoint_collect_result::not_collecting));
4288
4289 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4290 user used the "jump" command, or "set $pc = foo"). */
4291 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4292 {
4293 /* Collecting 'while-stepping' actions doesn't make sense
4294 anymore. */
4295 release_while_stepping_state_list (thread);
4296 }
4297
4298 /* If we have pending signals or status, and a new signal, enqueue the
4299 signal. Also enqueue the signal if it can't be delivered to the
4300 inferior right now. */
4301 if (signal != 0
4302 && (lwp->status_pending_p
4303 || lwp->pending_signals != NULL
4304 || !lwp_signal_can_be_delivered (lwp)))
4305 {
4306 enqueue_pending_signal (lwp, signal, info);
4307
4308 /* Postpone any pending signal. It was enqueued above. */
4309 signal = 0;
4310 }
4311
4312 if (lwp->status_pending_p)
4313 {
4314 if (debug_threads)
4315 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4316 " has pending status\n",
4317 lwpid_of (thread), step ? "step" : "continue",
4318 lwp->stop_expected ? "expected" : "not expected");
4319 return;
4320 }
4321
4322 saved_thread = current_thread;
4323 current_thread = thread;
4324
4325 /* This bit needs some thinking about. If we get a signal that
4326 we must report while a single-step reinsert is still pending,
4327 we often end up resuming the thread. It might be better to
4328 (ew) allow a stack of pending events; then we could be sure that
4329 the reinsert happened right away and not lose any signals.
4330
4331 Making this stack would also shrink the window in which breakpoints are
4332 uninserted (see comment in linux_wait_for_lwp) but not enough for
4333 complete correctness, so it won't solve that problem. It may be
4334 worthwhile just to solve this one, however. */
4335 if (lwp->bp_reinsert != 0)
4336 {
4337 if (debug_threads)
4338 debug_printf (" pending reinsert at 0x%s\n",
4339 paddress (lwp->bp_reinsert));
4340
4341 if (can_hardware_single_step ())
4342 {
4343 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4344 {
4345 if (step == 0)
4346 warning ("BAD - reinserting but not stepping.");
4347 if (lwp->suspended)
4348 warning ("BAD - reinserting and suspended(%d).",
4349 lwp->suspended);
4350 }
4351 }
4352
4353 step = maybe_hw_step (thread);
4354 }
4355
4356 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4357 {
4358 if (debug_threads)
4359 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4360 " (exit-jump-pad-bkpt)\n",
4361 lwpid_of (thread));
4362 }
4363 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4364 {
4365 if (debug_threads)
4366 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4367 " single-stepping\n",
4368 lwpid_of (thread));
4369
4370 if (can_hardware_single_step ())
4371 step = 1;
4372 else
4373 {
4374 internal_error (__FILE__, __LINE__,
4375 "moving out of jump pad single-stepping"
4376 " not implemented on this target");
4377 }
4378 }
4379
4380 /* If we have while-stepping actions in this thread set it stepping.
4381 If we have a signal to deliver, it may or may not be set to
4382 SIG_IGN, we don't know. Assume so, and allow collecting
4383 while-stepping into a signal handler. A possible smart thing to
4384 do would be to set an internal breakpoint at the signal return
4385 address, continue, and carry on catching this while-stepping
4386 action only when that breakpoint is hit. A future
4387 enhancement. */
4388 if (thread->while_stepping != NULL)
4389 {
4390 if (debug_threads)
4391 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4392 lwpid_of (thread));
4393
4394 step = single_step (lwp);
4395 }
4396
4397 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4398 {
4399 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4400
4401 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4402
4403 if (debug_threads)
4404 {
4405 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4406 (long) lwp->stop_pc);
4407 }
4408 }
4409
4410 /* If we have pending signals, consume one if it can be delivered to
4411 the inferior. */
4412 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4413 {
4414 struct pending_signals **p_sig;
4415
4416 p_sig = &lwp->pending_signals;
4417 while ((*p_sig)->prev != NULL)
4418 p_sig = &(*p_sig)->prev;
4419
4420 signal = (*p_sig)->signal;
4421 if ((*p_sig)->info.si_signo != 0)
4422 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4423 &(*p_sig)->info);
4424
4425 free (*p_sig);
4426 *p_sig = NULL;
4427 }
4428
4429 if (debug_threads)
4430 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4431 lwpid_of (thread), step ? "step" : "continue", signal,
4432 lwp->stop_expected ? "expected" : "not expected");
4433
4434 if (the_low_target.prepare_to_resume != NULL)
4435 the_low_target.prepare_to_resume (lwp);
4436
4437 regcache_invalidate_thread (thread);
4438 errno = 0;
4439 lwp->stepping = step;
4440 if (step)
4441 ptrace_request = PTRACE_SINGLESTEP;
4442 else if (gdb_catching_syscalls_p (lwp))
4443 ptrace_request = PTRACE_SYSCALL;
4444 else
4445 ptrace_request = PTRACE_CONT;
4446 ptrace (ptrace_request,
4447 lwpid_of (thread),
4448 (PTRACE_TYPE_ARG3) 0,
4449 /* Coerce to a uintptr_t first to avoid potential gcc warning
4450 of coercing an 8 byte integer to a 4 byte pointer. */
4451 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4452
4453 current_thread = saved_thread;
4454 if (errno)
4455 perror_with_name ("resuming thread");
4456
4457 /* Successfully resumed. Clear state that no longer makes sense,
4458 and mark the LWP as running. Must not do this before resuming
4459 otherwise if that fails other code will be confused. E.g., we'd
4460 later try to stop the LWP and hang forever waiting for a stop
4461 status. Note that we must not throw after this is cleared,
4462 otherwise handle_zombie_lwp_error would get confused. */
4463 lwp->stopped = 0;
4464 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4465 }
4466
4467 /* Called when we try to resume a stopped LWP and that errors out. If
4468 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4469 or about to become), discard the error, clear any pending status
4470 the LWP may have, and return true (we'll collect the exit status
4471 soon enough). Otherwise, return false. */
4472
4473 static int
4474 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4475 {
4476 struct thread_info *thread = get_lwp_thread (lp);
4477
4478 /* If we get an error after resuming the LWP successfully, we'd
4479 confuse !T state for the LWP being gone. */
4480 gdb_assert (lp->stopped);
4481
4482 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4483 because even if ptrace failed with ESRCH, the tracee may be "not
4484 yet fully dead", but already refusing ptrace requests. In that
4485 case the tracee has 'R (Running)' state for a little bit
4486 (observed in Linux 3.18). See also the note on ESRCH in the
4487 ptrace(2) man page. Instead, check whether the LWP has any state
4488 other than ptrace-stopped. */
4489
4490 /* Don't assume anything if /proc/PID/status can't be read. */
4491 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4492 {
4493 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4494 lp->status_pending_p = 0;
4495 return 1;
4496 }
4497 return 0;
4498 }
4499
4500 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4501 disappears while we try to resume it. */
4502
4503 static void
4504 linux_resume_one_lwp (struct lwp_info *lwp,
4505 int step, int signal, siginfo_t *info)
4506 {
4507 TRY
4508 {
4509 linux_resume_one_lwp_throw (lwp, step, signal, info);
4510 }
4511 CATCH (ex, RETURN_MASK_ERROR)
4512 {
4513 if (!check_ptrace_stopped_lwp_gone (lwp))
4514 throw_exception (ex);
4515 }
4516 END_CATCH
4517 }
4518
4519 /* This function is called once per thread via for_each_thread.
4520 We look up which resume request applies to THREAD and mark it with a
4521 pointer to the appropriate resume request.
4522
4523 This algorithm is O(threads * resume elements), but resume elements
4524 is small (and will remain small at least until GDB supports thread
4525 suspension). */
4526
4527 static void
4528 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4529 {
4530 struct lwp_info *lwp = get_thread_lwp (thread);
4531
4532 for (int ndx = 0; ndx < n; ndx++)
4533 {
4534 ptid_t ptid = resume[ndx].thread;
4535 if (ptid == minus_one_ptid
4536 || ptid == thread->id
4537 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4538 of PID'. */
4539 || (ptid.pid () == pid_of (thread)
4540 && (ptid.is_pid ()
4541 || ptid.lwp () == -1)))
4542 {
4543 if (resume[ndx].kind == resume_stop
4544 && thread->last_resume_kind == resume_stop)
4545 {
4546 if (debug_threads)
4547 debug_printf ("already %s LWP %ld at GDB's request\n",
4548 (thread->last_status.kind
4549 == TARGET_WAITKIND_STOPPED)
4550 ? "stopped"
4551 : "stopping",
4552 lwpid_of (thread));
4553
4554 continue;
4555 }
4556
4557 /* Ignore (wildcard) resume requests for already-resumed
4558 threads. */
4559 if (resume[ndx].kind != resume_stop
4560 && thread->last_resume_kind != resume_stop)
4561 {
4562 if (debug_threads)
4563 debug_printf ("already %s LWP %ld at GDB's request\n",
4564 (thread->last_resume_kind
4565 == resume_step)
4566 ? "stepping"
4567 : "continuing",
4568 lwpid_of (thread));
4569 continue;
4570 }
4571
4572 /* Don't let wildcard resumes resume fork children that GDB
4573 does not yet know are new fork children. */
4574 if (lwp->fork_relative != NULL)
4575 {
4576 struct lwp_info *rel = lwp->fork_relative;
4577
4578 if (rel->status_pending_p
4579 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4580 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4581 {
4582 if (debug_threads)
4583 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4584 lwpid_of (thread));
4585 continue;
4586 }
4587 }
4588
4589 /* If the thread has a pending event that has already been
4590 reported to GDBserver core, but GDB has not pulled the
4591 event out of the vStopped queue yet, likewise, ignore the
4592 (wildcard) resume request. */
4593 if (in_queued_stop_replies (thread->id))
4594 {
4595 if (debug_threads)
4596 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4597 lwpid_of (thread));
4598 continue;
4599 }
4600
4601 lwp->resume = &resume[ndx];
4602 thread->last_resume_kind = lwp->resume->kind;
4603
4604 lwp->step_range_start = lwp->resume->step_range_start;
4605 lwp->step_range_end = lwp->resume->step_range_end;
4606
4607 /* If we had a deferred signal to report, dequeue one now.
4608 This can happen if LWP gets more than one signal while
4609 trying to get out of a jump pad. */
4610 if (lwp->stopped
4611 && !lwp->status_pending_p
4612 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4613 {
4614 lwp->status_pending_p = 1;
4615
4616 if (debug_threads)
4617 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4618 "leaving status pending.\n",
4619 WSTOPSIG (lwp->status_pending),
4620 lwpid_of (thread));
4621 }
4622
4623 return;
4624 }
4625 }
4626
4627 /* No resume action for this thread. */
4628 lwp->resume = NULL;
4629 }
4630
4631 /* find_thread callback for linux_resume. Return true if this lwp has an
4632 interesting status pending. */
4633
4634 static bool
4635 resume_status_pending_p (thread_info *thread)
4636 {
4637 struct lwp_info *lwp = get_thread_lwp (thread);
4638
4639 /* LWPs which will not be resumed are not interesting, because
4640 we might not wait for them next time through linux_wait. */
4641 if (lwp->resume == NULL)
4642 return false;
4643
4644 return thread_still_has_status_pending_p (thread);
4645 }
4646
4647 /* Return 1 if this lwp that GDB wants running is stopped at an
4648 internal breakpoint that we need to step over. It assumes that any
4649 required STOP_PC adjustment has already been propagated to the
4650 inferior's regcache. */
4651
4652 static bool
4653 need_step_over_p (thread_info *thread)
4654 {
4655 struct lwp_info *lwp = get_thread_lwp (thread);
4656 struct thread_info *saved_thread;
4657 CORE_ADDR pc;
4658 struct process_info *proc = get_thread_process (thread);
4659
4660 /* GDBserver is skipping the extra traps from the wrapper program,
4661 don't have to do step over. */
4662 if (proc->tdesc == NULL)
4663 return false;
4664
4665 /* LWPs which will not be resumed are not interesting, because we
4666 might not wait for them next time through linux_wait. */
4667
4668 if (!lwp->stopped)
4669 {
4670 if (debug_threads)
4671 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4672 lwpid_of (thread));
4673 return false;
4674 }
4675
4676 if (thread->last_resume_kind == resume_stop)
4677 {
4678 if (debug_threads)
4679 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4680 " stopped\n",
4681 lwpid_of (thread));
4682 return false;
4683 }
4684
4685 gdb_assert (lwp->suspended >= 0);
4686
4687 if (lwp->suspended)
4688 {
4689 if (debug_threads)
4690 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4691 lwpid_of (thread));
4692 return false;
4693 }
4694
4695 if (lwp->status_pending_p)
4696 {
4697 if (debug_threads)
4698 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4699 " status.\n",
4700 lwpid_of (thread));
4701 return false;
4702 }
4703
4704 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4705 or we have. */
4706 pc = get_pc (lwp);
4707
4708 /* If the PC has changed since we stopped, then don't do anything,
4709 and let the breakpoint/tracepoint be hit. This happens if, for
4710 instance, GDB handled the decr_pc_after_break subtraction itself,
4711 GDB is OOL stepping this thread, or the user has issued a "jump"
4712 command, or poked thread's registers herself. */
4713 if (pc != lwp->stop_pc)
4714 {
4715 if (debug_threads)
4716 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4717 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4718 lwpid_of (thread),
4719 paddress (lwp->stop_pc), paddress (pc));
4720 return false;
4721 }
4722
4723 /* On software single step target, resume the inferior with signal
4724 rather than stepping over. */
4725 if (can_software_single_step ()
4726 && lwp->pending_signals != NULL
4727 && lwp_signal_can_be_delivered (lwp))
4728 {
4729 if (debug_threads)
4730 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4731 " signals.\n",
4732 lwpid_of (thread));
4733
4734 return false;
4735 }
4736
4737 saved_thread = current_thread;
4738 current_thread = thread;
4739
4740 /* We can only step over breakpoints we know about. */
4741 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4742 {
4743 /* Don't step over a breakpoint that GDB expects to hit
4744 though. If the condition is being evaluated on the target's side
4745 and it evaluate to false, step over this breakpoint as well. */
4746 if (gdb_breakpoint_here (pc)
4747 && gdb_condition_true_at_breakpoint (pc)
4748 && gdb_no_commands_at_breakpoint (pc))
4749 {
4750 if (debug_threads)
4751 debug_printf ("Need step over [LWP %ld]? yes, but found"
4752 " GDB breakpoint at 0x%s; skipping step over\n",
4753 lwpid_of (thread), paddress (pc));
4754
4755 current_thread = saved_thread;
4756 return false;
4757 }
4758 else
4759 {
4760 if (debug_threads)
4761 debug_printf ("Need step over [LWP %ld]? yes, "
4762 "found breakpoint at 0x%s\n",
4763 lwpid_of (thread), paddress (pc));
4764
4765 /* We've found an lwp that needs stepping over --- return 1 so
4766 that find_thread stops looking. */
4767 current_thread = saved_thread;
4768
4769 return true;
4770 }
4771 }
4772
4773 current_thread = saved_thread;
4774
4775 if (debug_threads)
4776 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4777 " at 0x%s\n",
4778 lwpid_of (thread), paddress (pc));
4779
4780 return false;
4781 }
4782
4783 /* Start a step-over operation on LWP. When LWP stopped at a
4784 breakpoint, to make progress, we need to remove the breakpoint out
4785 of the way. If we let other threads run while we do that, they may
4786 pass by the breakpoint location and miss hitting it. To avoid
4787 that, a step-over momentarily stops all threads while LWP is
4788 single-stepped by either hardware or software while the breakpoint
4789 is temporarily uninserted from the inferior. When the single-step
4790 finishes, we reinsert the breakpoint, and let all threads that are
4791 supposed to be running, run again. */
4792
4793 static int
4794 start_step_over (struct lwp_info *lwp)
4795 {
4796 struct thread_info *thread = get_lwp_thread (lwp);
4797 struct thread_info *saved_thread;
4798 CORE_ADDR pc;
4799 int step;
4800
4801 if (debug_threads)
4802 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4803 lwpid_of (thread));
4804
4805 stop_all_lwps (1, lwp);
4806
4807 if (lwp->suspended != 0)
4808 {
4809 internal_error (__FILE__, __LINE__,
4810 "LWP %ld suspended=%d\n", lwpid_of (thread),
4811 lwp->suspended);
4812 }
4813
4814 if (debug_threads)
4815 debug_printf ("Done stopping all threads for step-over.\n");
4816
4817 /* Note, we should always reach here with an already adjusted PC,
4818 either by GDB (if we're resuming due to GDB's request), or by our
4819 caller, if we just finished handling an internal breakpoint GDB
4820 shouldn't care about. */
4821 pc = get_pc (lwp);
4822
4823 saved_thread = current_thread;
4824 current_thread = thread;
4825
4826 lwp->bp_reinsert = pc;
4827 uninsert_breakpoints_at (pc);
4828 uninsert_fast_tracepoint_jumps_at (pc);
4829
4830 step = single_step (lwp);
4831
4832 current_thread = saved_thread;
4833
4834 linux_resume_one_lwp (lwp, step, 0, NULL);
4835
4836 /* Require next event from this LWP. */
4837 step_over_bkpt = thread->id;
4838 return 1;
4839 }
4840
4841 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4842 start_step_over, if still there, and delete any single-step
4843 breakpoints we've set, on non hardware single-step targets. */
4844
4845 static int
4846 finish_step_over (struct lwp_info *lwp)
4847 {
4848 if (lwp->bp_reinsert != 0)
4849 {
4850 struct thread_info *saved_thread = current_thread;
4851
4852 if (debug_threads)
4853 debug_printf ("Finished step over.\n");
4854
4855 current_thread = get_lwp_thread (lwp);
4856
4857 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4858 may be no breakpoint to reinsert there by now. */
4859 reinsert_breakpoints_at (lwp->bp_reinsert);
4860 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4861
4862 lwp->bp_reinsert = 0;
4863
4864 /* Delete any single-step breakpoints. No longer needed. We
4865 don't have to worry about other threads hitting this trap,
4866 and later not being able to explain it, because we were
4867 stepping over a breakpoint, and we hold all threads but
4868 LWP stopped while doing that. */
4869 if (!can_hardware_single_step ())
4870 {
4871 gdb_assert (has_single_step_breakpoints (current_thread));
4872 delete_single_step_breakpoints (current_thread);
4873 }
4874
4875 step_over_bkpt = null_ptid;
4876 current_thread = saved_thread;
4877 return 1;
4878 }
4879 else
4880 return 0;
4881 }
4882
4883 /* If there's a step over in progress, wait until all threads stop
4884 (that is, until the stepping thread finishes its step), and
4885 unsuspend all lwps. The stepping thread ends with its status
4886 pending, which is processed later when we get back to processing
4887 events. */
4888
4889 static void
4890 complete_ongoing_step_over (void)
4891 {
4892 if (step_over_bkpt != null_ptid)
4893 {
4894 struct lwp_info *lwp;
4895 int wstat;
4896 int ret;
4897
4898 if (debug_threads)
4899 debug_printf ("detach: step over in progress, finish it first\n");
4900
4901 /* Passing NULL_PTID as filter indicates we want all events to
4902 be left pending. Eventually this returns when there are no
4903 unwaited-for children left. */
4904 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4905 &wstat, __WALL);
4906 gdb_assert (ret == -1);
4907
4908 lwp = find_lwp_pid (step_over_bkpt);
4909 if (lwp != NULL)
4910 finish_step_over (lwp);
4911 step_over_bkpt = null_ptid;
4912 unsuspend_all_lwps (lwp);
4913 }
4914 }
4915
4916 /* This function is called once per thread. We check the thread's resume
4917 request, which will tell us whether to resume, step, or leave the thread
4918 stopped; and what signal, if any, it should be sent.
4919
4920 For threads which we aren't explicitly told otherwise, we preserve
4921 the stepping flag; this is used for stepping over gdbserver-placed
4922 breakpoints.
4923
4924 If pending_flags was set in any thread, we queue any needed
4925 signals, since we won't actually resume. We already have a pending
4926 event to report, so we don't need to preserve any step requests;
4927 they should be re-issued if necessary. */
4928
4929 static void
4930 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4931 {
4932 struct lwp_info *lwp = get_thread_lwp (thread);
4933 int leave_pending;
4934
4935 if (lwp->resume == NULL)
4936 return;
4937
4938 if (lwp->resume->kind == resume_stop)
4939 {
4940 if (debug_threads)
4941 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4942
4943 if (!lwp->stopped)
4944 {
4945 if (debug_threads)
4946 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4947
4948 /* Stop the thread, and wait for the event asynchronously,
4949 through the event loop. */
4950 send_sigstop (lwp);
4951 }
4952 else
4953 {
4954 if (debug_threads)
4955 debug_printf ("already stopped LWP %ld\n",
4956 lwpid_of (thread));
4957
4958 /* The LWP may have been stopped in an internal event that
4959 was not meant to be notified back to GDB (e.g., gdbserver
4960 breakpoint), so we should be reporting a stop event in
4961 this case too. */
4962
4963 /* If the thread already has a pending SIGSTOP, this is a
4964 no-op. Otherwise, something later will presumably resume
4965 the thread and this will cause it to cancel any pending
4966 operation, due to last_resume_kind == resume_stop. If
4967 the thread already has a pending status to report, we
4968 will still report it the next time we wait - see
4969 status_pending_p_callback. */
4970
4971 /* If we already have a pending signal to report, then
4972 there's no need to queue a SIGSTOP, as this means we're
4973 midway through moving the LWP out of the jumppad, and we
4974 will report the pending signal as soon as that is
4975 finished. */
4976 if (lwp->pending_signals_to_report == NULL)
4977 send_sigstop (lwp);
4978 }
4979
4980 /* For stop requests, we're done. */
4981 lwp->resume = NULL;
4982 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4983 return;
4984 }
4985
4986 /* If this thread which is about to be resumed has a pending status,
4987 then don't resume it - we can just report the pending status.
4988 Likewise if it is suspended, because e.g., another thread is
4989 stepping past a breakpoint. Make sure to queue any signals that
4990 would otherwise be sent. In all-stop mode, we do this decision
4991 based on if *any* thread has a pending status. If there's a
4992 thread that needs the step-over-breakpoint dance, then don't
4993 resume any other thread but that particular one. */
4994 leave_pending = (lwp->suspended
4995 || lwp->status_pending_p
4996 || leave_all_stopped);
4997
4998 /* If we have a new signal, enqueue the signal. */
4999 if (lwp->resume->sig != 0)
5000 {
5001 siginfo_t info, *info_p;
5002
5003 /* If this is the same signal we were previously stopped by,
5004 make sure to queue its siginfo. */
5005 if (WIFSTOPPED (lwp->last_status)
5006 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5007 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5008 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5009 info_p = &info;
5010 else
5011 info_p = NULL;
5012
5013 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5014 }
5015
5016 if (!leave_pending)
5017 {
5018 if (debug_threads)
5019 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5020
5021 proceed_one_lwp (thread, NULL);
5022 }
5023 else
5024 {
5025 if (debug_threads)
5026 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5027 }
5028
5029 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5030 lwp->resume = NULL;
5031 }
5032
5033 static void
5034 linux_resume (struct thread_resume *resume_info, size_t n)
5035 {
5036 struct thread_info *need_step_over = NULL;
5037
5038 if (debug_threads)
5039 {
5040 debug_enter ();
5041 debug_printf ("linux_resume:\n");
5042 }
5043
5044 for_each_thread ([&] (thread_info *thread)
5045 {
5046 linux_set_resume_request (thread, resume_info, n);
5047 });
5048
5049 /* If there is a thread which would otherwise be resumed, which has
5050 a pending status, then don't resume any threads - we can just
5051 report the pending status. Make sure to queue any signals that
5052 would otherwise be sent. In non-stop mode, we'll apply this
5053 logic to each thread individually. We consume all pending events
5054 before considering to start a step-over (in all-stop). */
5055 bool any_pending = false;
5056 if (!non_stop)
5057 any_pending = find_thread (resume_status_pending_p) != NULL;
5058
5059 /* If there is a thread which would otherwise be resumed, which is
5060 stopped at a breakpoint that needs stepping over, then don't
5061 resume any threads - have it step over the breakpoint with all
5062 other threads stopped, then resume all threads again. Make sure
5063 to queue any signals that would otherwise be delivered or
5064 queued. */
5065 if (!any_pending && supports_breakpoints ())
5066 need_step_over = find_thread (need_step_over_p);
5067
5068 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5069
5070 if (debug_threads)
5071 {
5072 if (need_step_over != NULL)
5073 debug_printf ("Not resuming all, need step over\n");
5074 else if (any_pending)
5075 debug_printf ("Not resuming, all-stop and found "
5076 "an LWP with pending status\n");
5077 else
5078 debug_printf ("Resuming, no pending status or step over needed\n");
5079 }
5080
5081 /* Even if we're leaving threads stopped, queue all signals we'd
5082 otherwise deliver. */
5083 for_each_thread ([&] (thread_info *thread)
5084 {
5085 linux_resume_one_thread (thread, leave_all_stopped);
5086 });
5087
5088 if (need_step_over)
5089 start_step_over (get_thread_lwp (need_step_over));
5090
5091 if (debug_threads)
5092 {
5093 debug_printf ("linux_resume done\n");
5094 debug_exit ();
5095 }
5096
5097 /* We may have events that were pending that can/should be sent to
5098 the client now. Trigger a linux_wait call. */
5099 if (target_is_async_p ())
5100 async_file_mark ();
5101 }
5102
5103 /* This function is called once per thread. We check the thread's
5104 last resume request, which will tell us whether to resume, step, or
5105 leave the thread stopped. Any signal the client requested to be
5106 delivered has already been enqueued at this point.
5107
5108 If any thread that GDB wants running is stopped at an internal
5109 breakpoint that needs stepping over, we start a step-over operation
5110 on that particular thread, and leave all others stopped. */
5111
5112 static void
5113 proceed_one_lwp (thread_info *thread, lwp_info *except)
5114 {
5115 struct lwp_info *lwp = get_thread_lwp (thread);
5116 int step;
5117
5118 if (lwp == except)
5119 return;
5120
5121 if (debug_threads)
5122 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5123
5124 if (!lwp->stopped)
5125 {
5126 if (debug_threads)
5127 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5128 return;
5129 }
5130
5131 if (thread->last_resume_kind == resume_stop
5132 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5133 {
5134 if (debug_threads)
5135 debug_printf (" client wants LWP to remain %ld stopped\n",
5136 lwpid_of (thread));
5137 return;
5138 }
5139
5140 if (lwp->status_pending_p)
5141 {
5142 if (debug_threads)
5143 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5144 lwpid_of (thread));
5145 return;
5146 }
5147
5148 gdb_assert (lwp->suspended >= 0);
5149
5150 if (lwp->suspended)
5151 {
5152 if (debug_threads)
5153 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5154 return;
5155 }
5156
5157 if (thread->last_resume_kind == resume_stop
5158 && lwp->pending_signals_to_report == NULL
5159 && (lwp->collecting_fast_tracepoint
5160 == fast_tpoint_collect_result::not_collecting))
5161 {
5162 /* We haven't reported this LWP as stopped yet (otherwise, the
5163 last_status.kind check above would catch it, and we wouldn't
5164 reach here. This LWP may have been momentarily paused by a
5165 stop_all_lwps call while handling for example, another LWP's
5166 step-over. In that case, the pending expected SIGSTOP signal
5167 that was queued at vCont;t handling time will have already
5168 been consumed by wait_for_sigstop, and so we need to requeue
5169 another one here. Note that if the LWP already has a SIGSTOP
5170 pending, this is a no-op. */
5171
5172 if (debug_threads)
5173 debug_printf ("Client wants LWP %ld to stop. "
5174 "Making sure it has a SIGSTOP pending\n",
5175 lwpid_of (thread));
5176
5177 send_sigstop (lwp);
5178 }
5179
5180 if (thread->last_resume_kind == resume_step)
5181 {
5182 if (debug_threads)
5183 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5184 lwpid_of (thread));
5185
5186 /* If resume_step is requested by GDB, install single-step
5187 breakpoints when the thread is about to be actually resumed if
5188 the single-step breakpoints weren't removed. */
5189 if (can_software_single_step ()
5190 && !has_single_step_breakpoints (thread))
5191 install_software_single_step_breakpoints (lwp);
5192
5193 step = maybe_hw_step (thread);
5194 }
5195 else if (lwp->bp_reinsert != 0)
5196 {
5197 if (debug_threads)
5198 debug_printf (" stepping LWP %ld, reinsert set\n",
5199 lwpid_of (thread));
5200
5201 step = maybe_hw_step (thread);
5202 }
5203 else
5204 step = 0;
5205
5206 linux_resume_one_lwp (lwp, step, 0, NULL);
5207 }
5208
5209 static void
5210 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5211 {
5212 struct lwp_info *lwp = get_thread_lwp (thread);
5213
5214 if (lwp == except)
5215 return;
5216
5217 lwp_suspended_decr (lwp);
5218
5219 proceed_one_lwp (thread, except);
5220 }
5221
5222 /* When we finish a step-over, set threads running again. If there's
5223 another thread that may need a step-over, now's the time to start
5224 it. Eventually, we'll move all threads past their breakpoints. */
5225
5226 static void
5227 proceed_all_lwps (void)
5228 {
5229 struct thread_info *need_step_over;
5230
5231 /* If there is a thread which would otherwise be resumed, which is
5232 stopped at a breakpoint that needs stepping over, then don't
5233 resume any threads - have it step over the breakpoint with all
5234 other threads stopped, then resume all threads again. */
5235
5236 if (supports_breakpoints ())
5237 {
5238 need_step_over = find_thread (need_step_over_p);
5239
5240 if (need_step_over != NULL)
5241 {
5242 if (debug_threads)
5243 debug_printf ("proceed_all_lwps: found "
5244 "thread %ld needing a step-over\n",
5245 lwpid_of (need_step_over));
5246
5247 start_step_over (get_thread_lwp (need_step_over));
5248 return;
5249 }
5250 }
5251
5252 if (debug_threads)
5253 debug_printf ("Proceeding, no step-over needed\n");
5254
5255 for_each_thread ([] (thread_info *thread)
5256 {
5257 proceed_one_lwp (thread, NULL);
5258 });
5259 }
5260
5261 /* Stopped LWPs that the client wanted to be running, that don't have
5262 pending statuses, are set to run again, except for EXCEPT, if not
5263 NULL. This undoes a stop_all_lwps call. */
5264
5265 static void
5266 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5267 {
5268 if (debug_threads)
5269 {
5270 debug_enter ();
5271 if (except)
5272 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5273 lwpid_of (get_lwp_thread (except)));
5274 else
5275 debug_printf ("unstopping all lwps\n");
5276 }
5277
5278 if (unsuspend)
5279 for_each_thread ([&] (thread_info *thread)
5280 {
5281 unsuspend_and_proceed_one_lwp (thread, except);
5282 });
5283 else
5284 for_each_thread ([&] (thread_info *thread)
5285 {
5286 proceed_one_lwp (thread, except);
5287 });
5288
5289 if (debug_threads)
5290 {
5291 debug_printf ("unstop_all_lwps done\n");
5292 debug_exit ();
5293 }
5294 }
5295
5296
5297 #ifdef HAVE_LINUX_REGSETS
5298
5299 #define use_linux_regsets 1
5300
5301 /* Returns true if REGSET has been disabled. */
5302
5303 static int
5304 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5305 {
5306 return (info->disabled_regsets != NULL
5307 && info->disabled_regsets[regset - info->regsets]);
5308 }
5309
5310 /* Disable REGSET. */
5311
5312 static void
5313 disable_regset (struct regsets_info *info, struct regset_info *regset)
5314 {
5315 int dr_offset;
5316
5317 dr_offset = regset - info->regsets;
5318 if (info->disabled_regsets == NULL)
5319 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5320 info->disabled_regsets[dr_offset] = 1;
5321 }
5322
5323 static int
5324 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5325 struct regcache *regcache)
5326 {
5327 struct regset_info *regset;
5328 int saw_general_regs = 0;
5329 int pid;
5330 struct iovec iov;
5331
5332 pid = lwpid_of (current_thread);
5333 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5334 {
5335 void *buf, *data;
5336 int nt_type, res;
5337
5338 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5339 continue;
5340
5341 buf = xmalloc (regset->size);
5342
5343 nt_type = regset->nt_type;
5344 if (nt_type)
5345 {
5346 iov.iov_base = buf;
5347 iov.iov_len = regset->size;
5348 data = (void *) &iov;
5349 }
5350 else
5351 data = buf;
5352
5353 #ifndef __sparc__
5354 res = ptrace (regset->get_request, pid,
5355 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5356 #else
5357 res = ptrace (regset->get_request, pid, data, nt_type);
5358 #endif
5359 if (res < 0)
5360 {
5361 if (errno == EIO
5362 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5363 {
5364 /* If we get EIO on a regset, or an EINVAL and the regset is
5365 optional, do not try it again for this process mode. */
5366 disable_regset (regsets_info, regset);
5367 }
5368 else if (errno == ENODATA)
5369 {
5370 /* ENODATA may be returned if the regset is currently
5371 not "active". This can happen in normal operation,
5372 so suppress the warning in this case. */
5373 }
5374 else if (errno == ESRCH)
5375 {
5376 /* At this point, ESRCH should mean the process is
5377 already gone, in which case we simply ignore attempts
5378 to read its registers. */
5379 }
5380 else
5381 {
5382 char s[256];
5383 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5384 pid);
5385 perror (s);
5386 }
5387 }
5388 else
5389 {
5390 if (regset->type == GENERAL_REGS)
5391 saw_general_regs = 1;
5392 regset->store_function (regcache, buf);
5393 }
5394 free (buf);
5395 }
5396 if (saw_general_regs)
5397 return 0;
5398 else
5399 return 1;
5400 }
5401
5402 static int
5403 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5404 struct regcache *regcache)
5405 {
5406 struct regset_info *regset;
5407 int saw_general_regs = 0;
5408 int pid;
5409 struct iovec iov;
5410
5411 pid = lwpid_of (current_thread);
5412 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5413 {
5414 void *buf, *data;
5415 int nt_type, res;
5416
5417 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5418 || regset->fill_function == NULL)
5419 continue;
5420
5421 buf = xmalloc (regset->size);
5422
5423 /* First fill the buffer with the current register set contents,
5424 in case there are any items in the kernel's regset that are
5425 not in gdbserver's regcache. */
5426
5427 nt_type = regset->nt_type;
5428 if (nt_type)
5429 {
5430 iov.iov_base = buf;
5431 iov.iov_len = regset->size;
5432 data = (void *) &iov;
5433 }
5434 else
5435 data = buf;
5436
5437 #ifndef __sparc__
5438 res = ptrace (regset->get_request, pid,
5439 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5440 #else
5441 res = ptrace (regset->get_request, pid, data, nt_type);
5442 #endif
5443
5444 if (res == 0)
5445 {
5446 /* Then overlay our cached registers on that. */
5447 regset->fill_function (regcache, buf);
5448
5449 /* Only now do we write the register set. */
5450 #ifndef __sparc__
5451 res = ptrace (regset->set_request, pid,
5452 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5453 #else
5454 res = ptrace (regset->set_request, pid, data, nt_type);
5455 #endif
5456 }
5457
5458 if (res < 0)
5459 {
5460 if (errno == EIO
5461 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5462 {
5463 /* If we get EIO on a regset, or an EINVAL and the regset is
5464 optional, do not try it again for this process mode. */
5465 disable_regset (regsets_info, regset);
5466 }
5467 else if (errno == ESRCH)
5468 {
5469 /* At this point, ESRCH should mean the process is
5470 already gone, in which case we simply ignore attempts
5471 to change its registers. See also the related
5472 comment in linux_resume_one_lwp. */
5473 free (buf);
5474 return 0;
5475 }
5476 else
5477 {
5478 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5479 }
5480 }
5481 else if (regset->type == GENERAL_REGS)
5482 saw_general_regs = 1;
5483 free (buf);
5484 }
5485 if (saw_general_regs)
5486 return 0;
5487 else
5488 return 1;
5489 }
5490
5491 #else /* !HAVE_LINUX_REGSETS */
5492
5493 #define use_linux_regsets 0
5494 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5495 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5496
5497 #endif
5498
5499 /* Return 1 if register REGNO is supported by one of the regset ptrace
5500 calls or 0 if it has to be transferred individually. */
5501
5502 static int
5503 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5504 {
5505 unsigned char mask = 1 << (regno % 8);
5506 size_t index = regno / 8;
5507
5508 return (use_linux_regsets
5509 && (regs_info->regset_bitmap == NULL
5510 || (regs_info->regset_bitmap[index] & mask) != 0));
5511 }
5512
5513 #ifdef HAVE_LINUX_USRREGS
5514
5515 static int
5516 register_addr (const struct usrregs_info *usrregs, int regnum)
5517 {
5518 int addr;
5519
5520 if (regnum < 0 || regnum >= usrregs->num_regs)
5521 error ("Invalid register number %d.", regnum);
5522
5523 addr = usrregs->regmap[regnum];
5524
5525 return addr;
5526 }
5527
5528 /* Fetch one register. */
5529 static void
5530 fetch_register (const struct usrregs_info *usrregs,
5531 struct regcache *regcache, int regno)
5532 {
5533 CORE_ADDR regaddr;
5534 int i, size;
5535 char *buf;
5536 int pid;
5537
5538 if (regno >= usrregs->num_regs)
5539 return;
5540 if ((*the_low_target.cannot_fetch_register) (regno))
5541 return;
5542
5543 regaddr = register_addr (usrregs, regno);
5544 if (regaddr == -1)
5545 return;
5546
5547 size = ((register_size (regcache->tdesc, regno)
5548 + sizeof (PTRACE_XFER_TYPE) - 1)
5549 & -sizeof (PTRACE_XFER_TYPE));
5550 buf = (char *) alloca (size);
5551
5552 pid = lwpid_of (current_thread);
5553 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5554 {
5555 errno = 0;
5556 *(PTRACE_XFER_TYPE *) (buf + i) =
5557 ptrace (PTRACE_PEEKUSER, pid,
5558 /* Coerce to a uintptr_t first to avoid potential gcc warning
5559 of coercing an 8 byte integer to a 4 byte pointer. */
5560 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5561 regaddr += sizeof (PTRACE_XFER_TYPE);
5562 if (errno != 0)
5563 {
5564 /* Mark register REGNO unavailable. */
5565 supply_register (regcache, regno, NULL);
5566 return;
5567 }
5568 }
5569
5570 if (the_low_target.supply_ptrace_register)
5571 the_low_target.supply_ptrace_register (regcache, regno, buf);
5572 else
5573 supply_register (regcache, regno, buf);
5574 }
5575
5576 /* Store one register. */
5577 static void
5578 store_register (const struct usrregs_info *usrregs,
5579 struct regcache *regcache, int regno)
5580 {
5581 CORE_ADDR regaddr;
5582 int i, size;
5583 char *buf;
5584 int pid;
5585
5586 if (regno >= usrregs->num_regs)
5587 return;
5588 if ((*the_low_target.cannot_store_register) (regno))
5589 return;
5590
5591 regaddr = register_addr (usrregs, regno);
5592 if (regaddr == -1)
5593 return;
5594
5595 size = ((register_size (regcache->tdesc, regno)
5596 + sizeof (PTRACE_XFER_TYPE) - 1)
5597 & -sizeof (PTRACE_XFER_TYPE));
5598 buf = (char *) alloca (size);
5599 memset (buf, 0, size);
5600
5601 if (the_low_target.collect_ptrace_register)
5602 the_low_target.collect_ptrace_register (regcache, regno, buf);
5603 else
5604 collect_register (regcache, regno, buf);
5605
5606 pid = lwpid_of (current_thread);
5607 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5608 {
5609 errno = 0;
5610 ptrace (PTRACE_POKEUSER, pid,
5611 /* Coerce to a uintptr_t first to avoid potential gcc warning
5612 about coercing an 8 byte integer to a 4 byte pointer. */
5613 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5614 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5615 if (errno != 0)
5616 {
5617 /* At this point, ESRCH should mean the process is
5618 already gone, in which case we simply ignore attempts
5619 to change its registers. See also the related
5620 comment in linux_resume_one_lwp. */
5621 if (errno == ESRCH)
5622 return;
5623
5624 if ((*the_low_target.cannot_store_register) (regno) == 0)
5625 error ("writing register %d: %s", regno, strerror (errno));
5626 }
5627 regaddr += sizeof (PTRACE_XFER_TYPE);
5628 }
5629 }
5630
5631 /* Fetch all registers, or just one, from the child process.
5632 If REGNO is -1, do this for all registers, skipping any that are
5633 assumed to have been retrieved by regsets_fetch_inferior_registers,
5634 unless ALL is non-zero.
5635 Otherwise, REGNO specifies which register (so we can save time). */
5636 static void
5637 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5638 struct regcache *regcache, int regno, int all)
5639 {
5640 struct usrregs_info *usr = regs_info->usrregs;
5641
5642 if (regno == -1)
5643 {
5644 for (regno = 0; regno < usr->num_regs; regno++)
5645 if (all || !linux_register_in_regsets (regs_info, regno))
5646 fetch_register (usr, regcache, regno);
5647 }
5648 else
5649 fetch_register (usr, regcache, regno);
5650 }
5651
5652 /* Store our register values back into the inferior.
5653 If REGNO is -1, do this for all registers, skipping any that are
5654 assumed to have been saved by regsets_store_inferior_registers,
5655 unless ALL is non-zero.
5656 Otherwise, REGNO specifies which register (so we can save time). */
5657 static void
5658 usr_store_inferior_registers (const struct regs_info *regs_info,
5659 struct regcache *regcache, int regno, int all)
5660 {
5661 struct usrregs_info *usr = regs_info->usrregs;
5662
5663 if (regno == -1)
5664 {
5665 for (regno = 0; regno < usr->num_regs; regno++)
5666 if (all || !linux_register_in_regsets (regs_info, regno))
5667 store_register (usr, regcache, regno);
5668 }
5669 else
5670 store_register (usr, regcache, regno);
5671 }
5672
5673 #else /* !HAVE_LINUX_USRREGS */
5674
5675 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5676 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5677
5678 #endif
5679
5680
5681 static void
5682 linux_fetch_registers (struct regcache *regcache, int regno)
5683 {
5684 int use_regsets;
5685 int all = 0;
5686 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5687
5688 if (regno == -1)
5689 {
5690 if (the_low_target.fetch_register != NULL
5691 && regs_info->usrregs != NULL)
5692 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5693 (*the_low_target.fetch_register) (regcache, regno);
5694
5695 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5696 if (regs_info->usrregs != NULL)
5697 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5698 }
5699 else
5700 {
5701 if (the_low_target.fetch_register != NULL
5702 && (*the_low_target.fetch_register) (regcache, regno))
5703 return;
5704
5705 use_regsets = linux_register_in_regsets (regs_info, regno);
5706 if (use_regsets)
5707 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5708 regcache);
5709 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5710 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5711 }
5712 }
5713
5714 static void
5715 linux_store_registers (struct regcache *regcache, int regno)
5716 {
5717 int use_regsets;
5718 int all = 0;
5719 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5720
5721 if (regno == -1)
5722 {
5723 all = regsets_store_inferior_registers (regs_info->regsets_info,
5724 regcache);
5725 if (regs_info->usrregs != NULL)
5726 usr_store_inferior_registers (regs_info, regcache, regno, all);
5727 }
5728 else
5729 {
5730 use_regsets = linux_register_in_regsets (regs_info, regno);
5731 if (use_regsets)
5732 all = regsets_store_inferior_registers (regs_info->regsets_info,
5733 regcache);
5734 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5735 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5736 }
5737 }
5738
5739
5740 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5741 to debugger memory starting at MYADDR. */
5742
5743 static int
5744 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5745 {
5746 int pid = lwpid_of (current_thread);
5747 PTRACE_XFER_TYPE *buffer;
5748 CORE_ADDR addr;
5749 int count;
5750 char filename[64];
5751 int i;
5752 int ret;
5753 int fd;
5754
5755 /* Try using /proc. Don't bother for one word. */
5756 if (len >= 3 * sizeof (long))
5757 {
5758 int bytes;
5759
5760 /* We could keep this file open and cache it - possibly one per
5761 thread. That requires some juggling, but is even faster. */
5762 sprintf (filename, "/proc/%d/mem", pid);
5763 fd = open (filename, O_RDONLY | O_LARGEFILE);
5764 if (fd == -1)
5765 goto no_proc;
5766
5767 /* If pread64 is available, use it. It's faster if the kernel
5768 supports it (only one syscall), and it's 64-bit safe even on
5769 32-bit platforms (for instance, SPARC debugging a SPARC64
5770 application). */
5771 #ifdef HAVE_PREAD64
5772 bytes = pread64 (fd, myaddr, len, memaddr);
5773 #else
5774 bytes = -1;
5775 if (lseek (fd, memaddr, SEEK_SET) != -1)
5776 bytes = read (fd, myaddr, len);
5777 #endif
5778
5779 close (fd);
5780 if (bytes == len)
5781 return 0;
5782
5783 /* Some data was read, we'll try to get the rest with ptrace. */
5784 if (bytes > 0)
5785 {
5786 memaddr += bytes;
5787 myaddr += bytes;
5788 len -= bytes;
5789 }
5790 }
5791
5792 no_proc:
5793 /* Round starting address down to longword boundary. */
5794 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5795 /* Round ending address up; get number of longwords that makes. */
5796 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5797 / sizeof (PTRACE_XFER_TYPE));
5798 /* Allocate buffer of that many longwords. */
5799 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5800
5801 /* Read all the longwords */
5802 errno = 0;
5803 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5804 {
5805 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5806 about coercing an 8 byte integer to a 4 byte pointer. */
5807 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5808 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5809 (PTRACE_TYPE_ARG4) 0);
5810 if (errno)
5811 break;
5812 }
5813 ret = errno;
5814
5815 /* Copy appropriate bytes out of the buffer. */
5816 if (i > 0)
5817 {
5818 i *= sizeof (PTRACE_XFER_TYPE);
5819 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5820 memcpy (myaddr,
5821 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5822 i < len ? i : len);
5823 }
5824
5825 return ret;
5826 }
5827
5828 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5829 memory at MEMADDR. On failure (cannot write to the inferior)
5830 returns the value of errno. Always succeeds if LEN is zero. */
5831
5832 static int
5833 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5834 {
5835 int i;
5836 /* Round starting address down to longword boundary. */
5837 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5838 /* Round ending address up; get number of longwords that makes. */
5839 int count
5840 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5841 / sizeof (PTRACE_XFER_TYPE);
5842
5843 /* Allocate buffer of that many longwords. */
5844 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5845
5846 int pid = lwpid_of (current_thread);
5847
5848 if (len == 0)
5849 {
5850 /* Zero length write always succeeds. */
5851 return 0;
5852 }
5853
5854 if (debug_threads)
5855 {
5856 /* Dump up to four bytes. */
5857 char str[4 * 2 + 1];
5858 char *p = str;
5859 int dump = len < 4 ? len : 4;
5860
5861 for (i = 0; i < dump; i++)
5862 {
5863 sprintf (p, "%02x", myaddr[i]);
5864 p += 2;
5865 }
5866 *p = '\0';
5867
5868 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5869 str, (long) memaddr, pid);
5870 }
5871
5872 /* Fill start and end extra bytes of buffer with existing memory data. */
5873
5874 errno = 0;
5875 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5876 about coercing an 8 byte integer to a 4 byte pointer. */
5877 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5878 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5879 (PTRACE_TYPE_ARG4) 0);
5880 if (errno)
5881 return errno;
5882
5883 if (count > 1)
5884 {
5885 errno = 0;
5886 buffer[count - 1]
5887 = ptrace (PTRACE_PEEKTEXT, pid,
5888 /* Coerce to a uintptr_t first to avoid potential gcc warning
5889 about coercing an 8 byte integer to a 4 byte pointer. */
5890 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5891 * sizeof (PTRACE_XFER_TYPE)),
5892 (PTRACE_TYPE_ARG4) 0);
5893 if (errno)
5894 return errno;
5895 }
5896
5897 /* Copy data to be written over corresponding part of buffer. */
5898
5899 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5900 myaddr, len);
5901
5902 /* Write the entire buffer. */
5903
5904 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5905 {
5906 errno = 0;
5907 ptrace (PTRACE_POKETEXT, pid,
5908 /* Coerce to a uintptr_t first to avoid potential gcc warning
5909 about coercing an 8 byte integer to a 4 byte pointer. */
5910 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5911 (PTRACE_TYPE_ARG4) buffer[i]);
5912 if (errno)
5913 return errno;
5914 }
5915
5916 return 0;
5917 }
5918
5919 static void
5920 linux_look_up_symbols (void)
5921 {
5922 #ifdef USE_THREAD_DB
5923 struct process_info *proc = current_process ();
5924
5925 if (proc->priv->thread_db != NULL)
5926 return;
5927
5928 thread_db_init ();
5929 #endif
5930 }
5931
5932 static void
5933 linux_request_interrupt (void)
5934 {
5935 /* Send a SIGINT to the process group. This acts just like the user
5936 typed a ^C on the controlling terminal. */
5937 kill (-signal_pid, SIGINT);
5938 }
5939
5940 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5941 to debugger memory starting at MYADDR. */
5942
5943 static int
5944 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5945 {
5946 char filename[PATH_MAX];
5947 int fd, n;
5948 int pid = lwpid_of (current_thread);
5949
5950 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5951
5952 fd = open (filename, O_RDONLY);
5953 if (fd < 0)
5954 return -1;
5955
5956 if (offset != (CORE_ADDR) 0
5957 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5958 n = -1;
5959 else
5960 n = read (fd, myaddr, len);
5961
5962 close (fd);
5963
5964 return n;
5965 }
5966
5967 /* These breakpoint and watchpoint related wrapper functions simply
5968 pass on the function call if the target has registered a
5969 corresponding function. */
5970
5971 static int
5972 linux_supports_z_point_type (char z_type)
5973 {
5974 return (the_low_target.supports_z_point_type != NULL
5975 && the_low_target.supports_z_point_type (z_type));
5976 }
5977
5978 static int
5979 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5980 int size, struct raw_breakpoint *bp)
5981 {
5982 if (type == raw_bkpt_type_sw)
5983 return insert_memory_breakpoint (bp);
5984 else if (the_low_target.insert_point != NULL)
5985 return the_low_target.insert_point (type, addr, size, bp);
5986 else
5987 /* Unsupported (see target.h). */
5988 return 1;
5989 }
5990
5991 static int
5992 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5993 int size, struct raw_breakpoint *bp)
5994 {
5995 if (type == raw_bkpt_type_sw)
5996 return remove_memory_breakpoint (bp);
5997 else if (the_low_target.remove_point != NULL)
5998 return the_low_target.remove_point (type, addr, size, bp);
5999 else
6000 /* Unsupported (see target.h). */
6001 return 1;
6002 }
6003
6004 /* Implement the to_stopped_by_sw_breakpoint target_ops
6005 method. */
6006
6007 static int
6008 linux_stopped_by_sw_breakpoint (void)
6009 {
6010 struct lwp_info *lwp = get_thread_lwp (current_thread);
6011
6012 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6013 }
6014
6015 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6016 method. */
6017
6018 static int
6019 linux_supports_stopped_by_sw_breakpoint (void)
6020 {
6021 return USE_SIGTRAP_SIGINFO;
6022 }
6023
6024 /* Implement the to_stopped_by_hw_breakpoint target_ops
6025 method. */
6026
6027 static int
6028 linux_stopped_by_hw_breakpoint (void)
6029 {
6030 struct lwp_info *lwp = get_thread_lwp (current_thread);
6031
6032 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6033 }
6034
6035 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6036 method. */
6037
6038 static int
6039 linux_supports_stopped_by_hw_breakpoint (void)
6040 {
6041 return USE_SIGTRAP_SIGINFO;
6042 }
6043
6044 /* Implement the supports_hardware_single_step target_ops method. */
6045
6046 static int
6047 linux_supports_hardware_single_step (void)
6048 {
6049 return can_hardware_single_step ();
6050 }
6051
6052 static int
6053 linux_supports_software_single_step (void)
6054 {
6055 return can_software_single_step ();
6056 }
6057
6058 static int
6059 linux_stopped_by_watchpoint (void)
6060 {
6061 struct lwp_info *lwp = get_thread_lwp (current_thread);
6062
6063 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6064 }
6065
6066 static CORE_ADDR
6067 linux_stopped_data_address (void)
6068 {
6069 struct lwp_info *lwp = get_thread_lwp (current_thread);
6070
6071 return lwp->stopped_data_address;
6072 }
6073
6074 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6075 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6076 && defined(PT_TEXT_END_ADDR)
6077
6078 /* This is only used for targets that define PT_TEXT_ADDR,
6079 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6080 the target has different ways of acquiring this information, like
6081 loadmaps. */
6082
6083 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6084 to tell gdb about. */
6085
6086 static int
6087 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6088 {
6089 unsigned long text, text_end, data;
6090 int pid = lwpid_of (current_thread);
6091
6092 errno = 0;
6093
6094 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6095 (PTRACE_TYPE_ARG4) 0);
6096 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6097 (PTRACE_TYPE_ARG4) 0);
6098 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6099 (PTRACE_TYPE_ARG4) 0);
6100
6101 if (errno == 0)
6102 {
6103 /* Both text and data offsets produced at compile-time (and so
6104 used by gdb) are relative to the beginning of the program,
6105 with the data segment immediately following the text segment.
6106 However, the actual runtime layout in memory may put the data
6107 somewhere else, so when we send gdb a data base-address, we
6108 use the real data base address and subtract the compile-time
6109 data base-address from it (which is just the length of the
6110 text segment). BSS immediately follows data in both
6111 cases. */
6112 *text_p = text;
6113 *data_p = data - (text_end - text);
6114
6115 return 1;
6116 }
6117 return 0;
6118 }
6119 #endif
6120
6121 static int
6122 linux_qxfer_osdata (const char *annex,
6123 unsigned char *readbuf, unsigned const char *writebuf,
6124 CORE_ADDR offset, int len)
6125 {
6126 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6127 }
6128
6129 /* Convert a native/host siginfo object, into/from the siginfo in the
6130 layout of the inferiors' architecture. */
6131
6132 static void
6133 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6134 {
6135 int done = 0;
6136
6137 if (the_low_target.siginfo_fixup != NULL)
6138 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6139
6140 /* If there was no callback, or the callback didn't do anything,
6141 then just do a straight memcpy. */
6142 if (!done)
6143 {
6144 if (direction == 1)
6145 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6146 else
6147 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6148 }
6149 }
6150
6151 static int
6152 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6153 unsigned const char *writebuf, CORE_ADDR offset, int len)
6154 {
6155 int pid;
6156 siginfo_t siginfo;
6157 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6158
6159 if (current_thread == NULL)
6160 return -1;
6161
6162 pid = lwpid_of (current_thread);
6163
6164 if (debug_threads)
6165 debug_printf ("%s siginfo for lwp %d.\n",
6166 readbuf != NULL ? "Reading" : "Writing",
6167 pid);
6168
6169 if (offset >= sizeof (siginfo))
6170 return -1;
6171
6172 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6173 return -1;
6174
6175 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6176 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6177 inferior with a 64-bit GDBSERVER should look the same as debugging it
6178 with a 32-bit GDBSERVER, we need to convert it. */
6179 siginfo_fixup (&siginfo, inf_siginfo, 0);
6180
6181 if (offset + len > sizeof (siginfo))
6182 len = sizeof (siginfo) - offset;
6183
6184 if (readbuf != NULL)
6185 memcpy (readbuf, inf_siginfo + offset, len);
6186 else
6187 {
6188 memcpy (inf_siginfo + offset, writebuf, len);
6189
6190 /* Convert back to ptrace layout before flushing it out. */
6191 siginfo_fixup (&siginfo, inf_siginfo, 1);
6192
6193 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6194 return -1;
6195 }
6196
6197 return len;
6198 }
6199
6200 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6201 so we notice when children change state; as the handler for the
6202 sigsuspend in my_waitpid. */
6203
6204 static void
6205 sigchld_handler (int signo)
6206 {
6207 int old_errno = errno;
6208
6209 if (debug_threads)
6210 {
6211 do
6212 {
6213 /* fprintf is not async-signal-safe, so call write
6214 directly. */
6215 if (write (2, "sigchld_handler\n",
6216 sizeof ("sigchld_handler\n") - 1) < 0)
6217 break; /* just ignore */
6218 } while (0);
6219 }
6220
6221 if (target_is_async_p ())
6222 async_file_mark (); /* trigger a linux_wait */
6223
6224 errno = old_errno;
6225 }
6226
6227 static int
6228 linux_supports_non_stop (void)
6229 {
6230 return 1;
6231 }
6232
6233 static int
6234 linux_async (int enable)
6235 {
6236 int previous = target_is_async_p ();
6237
6238 if (debug_threads)
6239 debug_printf ("linux_async (%d), previous=%d\n",
6240 enable, previous);
6241
6242 if (previous != enable)
6243 {
6244 sigset_t mask;
6245 sigemptyset (&mask);
6246 sigaddset (&mask, SIGCHLD);
6247
6248 sigprocmask (SIG_BLOCK, &mask, NULL);
6249
6250 if (enable)
6251 {
6252 if (pipe (linux_event_pipe) == -1)
6253 {
6254 linux_event_pipe[0] = -1;
6255 linux_event_pipe[1] = -1;
6256 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6257
6258 warning ("creating event pipe failed.");
6259 return previous;
6260 }
6261
6262 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6263 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6264
6265 /* Register the event loop handler. */
6266 add_file_handler (linux_event_pipe[0],
6267 handle_target_event, NULL);
6268
6269 /* Always trigger a linux_wait. */
6270 async_file_mark ();
6271 }
6272 else
6273 {
6274 delete_file_handler (linux_event_pipe[0]);
6275
6276 close (linux_event_pipe[0]);
6277 close (linux_event_pipe[1]);
6278 linux_event_pipe[0] = -1;
6279 linux_event_pipe[1] = -1;
6280 }
6281
6282 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6283 }
6284
6285 return previous;
6286 }
6287
6288 static int
6289 linux_start_non_stop (int nonstop)
6290 {
6291 /* Register or unregister from event-loop accordingly. */
6292 linux_async (nonstop);
6293
6294 if (target_is_async_p () != (nonstop != 0))
6295 return -1;
6296
6297 return 0;
6298 }
6299
6300 static int
6301 linux_supports_multi_process (void)
6302 {
6303 return 1;
6304 }
6305
6306 /* Check if fork events are supported. */
6307
6308 static int
6309 linux_supports_fork_events (void)
6310 {
6311 return linux_supports_tracefork ();
6312 }
6313
6314 /* Check if vfork events are supported. */
6315
6316 static int
6317 linux_supports_vfork_events (void)
6318 {
6319 return linux_supports_tracefork ();
6320 }
6321
6322 /* Check if exec events are supported. */
6323
6324 static int
6325 linux_supports_exec_events (void)
6326 {
6327 return linux_supports_traceexec ();
6328 }
6329
6330 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6331 ptrace flags for all inferiors. This is in case the new GDB connection
6332 doesn't support the same set of events that the previous one did. */
6333
6334 static void
6335 linux_handle_new_gdb_connection (void)
6336 {
6337 /* Request that all the lwps reset their ptrace options. */
6338 for_each_thread ([] (thread_info *thread)
6339 {
6340 struct lwp_info *lwp = get_thread_lwp (thread);
6341
6342 if (!lwp->stopped)
6343 {
6344 /* Stop the lwp so we can modify its ptrace options. */
6345 lwp->must_set_ptrace_flags = 1;
6346 linux_stop_lwp (lwp);
6347 }
6348 else
6349 {
6350 /* Already stopped; go ahead and set the ptrace options. */
6351 struct process_info *proc = find_process_pid (pid_of (thread));
6352 int options = linux_low_ptrace_options (proc->attached);
6353
6354 linux_enable_event_reporting (lwpid_of (thread), options);
6355 lwp->must_set_ptrace_flags = 0;
6356 }
6357 });
6358 }
6359
6360 static int
6361 linux_supports_disable_randomization (void)
6362 {
6363 #ifdef HAVE_PERSONALITY
6364 return 1;
6365 #else
6366 return 0;
6367 #endif
6368 }
6369
6370 static int
6371 linux_supports_agent (void)
6372 {
6373 return 1;
6374 }
6375
6376 static int
6377 linux_supports_range_stepping (void)
6378 {
6379 if (can_software_single_step ())
6380 return 1;
6381 if (*the_low_target.supports_range_stepping == NULL)
6382 return 0;
6383
6384 return (*the_low_target.supports_range_stepping) ();
6385 }
6386
6387 /* Enumerate spufs IDs for process PID. */
6388 static int
6389 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6390 {
6391 int pos = 0;
6392 int written = 0;
6393 char path[128];
6394 DIR *dir;
6395 struct dirent *entry;
6396
6397 sprintf (path, "/proc/%ld/fd", pid);
6398 dir = opendir (path);
6399 if (!dir)
6400 return -1;
6401
6402 rewinddir (dir);
6403 while ((entry = readdir (dir)) != NULL)
6404 {
6405 struct stat st;
6406 struct statfs stfs;
6407 int fd;
6408
6409 fd = atoi (entry->d_name);
6410 if (!fd)
6411 continue;
6412
6413 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6414 if (stat (path, &st) != 0)
6415 continue;
6416 if (!S_ISDIR (st.st_mode))
6417 continue;
6418
6419 if (statfs (path, &stfs) != 0)
6420 continue;
6421 if (stfs.f_type != SPUFS_MAGIC)
6422 continue;
6423
6424 if (pos >= offset && pos + 4 <= offset + len)
6425 {
6426 *(unsigned int *)(buf + pos - offset) = fd;
6427 written += 4;
6428 }
6429 pos += 4;
6430 }
6431
6432 closedir (dir);
6433 return written;
6434 }
6435
6436 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6437 object type, using the /proc file system. */
6438 static int
6439 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6440 unsigned const char *writebuf,
6441 CORE_ADDR offset, int len)
6442 {
6443 long pid = lwpid_of (current_thread);
6444 char buf[128];
6445 int fd = 0;
6446 int ret = 0;
6447
6448 if (!writebuf && !readbuf)
6449 return -1;
6450
6451 if (!*annex)
6452 {
6453 if (!readbuf)
6454 return -1;
6455 else
6456 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6457 }
6458
6459 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6460 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6461 if (fd <= 0)
6462 return -1;
6463
6464 if (offset != 0
6465 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6466 {
6467 close (fd);
6468 return 0;
6469 }
6470
6471 if (writebuf)
6472 ret = write (fd, writebuf, (size_t) len);
6473 else
6474 ret = read (fd, readbuf, (size_t) len);
6475
6476 close (fd);
6477 return ret;
6478 }
6479
6480 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6481 struct target_loadseg
6482 {
6483 /* Core address to which the segment is mapped. */
6484 Elf32_Addr addr;
6485 /* VMA recorded in the program header. */
6486 Elf32_Addr p_vaddr;
6487 /* Size of this segment in memory. */
6488 Elf32_Word p_memsz;
6489 };
6490
6491 # if defined PT_GETDSBT
6492 struct target_loadmap
6493 {
6494 /* Protocol version number, must be zero. */
6495 Elf32_Word version;
6496 /* Pointer to the DSBT table, its size, and the DSBT index. */
6497 unsigned *dsbt_table;
6498 unsigned dsbt_size, dsbt_index;
6499 /* Number of segments in this map. */
6500 Elf32_Word nsegs;
6501 /* The actual memory map. */
6502 struct target_loadseg segs[/*nsegs*/];
6503 };
6504 # define LINUX_LOADMAP PT_GETDSBT
6505 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6506 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6507 # else
6508 struct target_loadmap
6509 {
6510 /* Protocol version number, must be zero. */
6511 Elf32_Half version;
6512 /* Number of segments in this map. */
6513 Elf32_Half nsegs;
6514 /* The actual memory map. */
6515 struct target_loadseg segs[/*nsegs*/];
6516 };
6517 # define LINUX_LOADMAP PTRACE_GETFDPIC
6518 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6519 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6520 # endif
6521
6522 static int
6523 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6524 unsigned char *myaddr, unsigned int len)
6525 {
6526 int pid = lwpid_of (current_thread);
6527 int addr = -1;
6528 struct target_loadmap *data = NULL;
6529 unsigned int actual_length, copy_length;
6530
6531 if (strcmp (annex, "exec") == 0)
6532 addr = (int) LINUX_LOADMAP_EXEC;
6533 else if (strcmp (annex, "interp") == 0)
6534 addr = (int) LINUX_LOADMAP_INTERP;
6535 else
6536 return -1;
6537
6538 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6539 return -1;
6540
6541 if (data == NULL)
6542 return -1;
6543
6544 actual_length = sizeof (struct target_loadmap)
6545 + sizeof (struct target_loadseg) * data->nsegs;
6546
6547 if (offset < 0 || offset > actual_length)
6548 return -1;
6549
6550 copy_length = actual_length - offset < len ? actual_length - offset : len;
6551 memcpy (myaddr, (char *) data + offset, copy_length);
6552 return copy_length;
6553 }
6554 #else
6555 # define linux_read_loadmap NULL
6556 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6557
6558 static void
6559 linux_process_qsupported (char **features, int count)
6560 {
6561 if (the_low_target.process_qsupported != NULL)
6562 the_low_target.process_qsupported (features, count);
6563 }
6564
6565 static int
6566 linux_supports_catch_syscall (void)
6567 {
6568 return (the_low_target.get_syscall_trapinfo != NULL
6569 && linux_supports_tracesysgood ());
6570 }
6571
6572 static int
6573 linux_get_ipa_tdesc_idx (void)
6574 {
6575 if (the_low_target.get_ipa_tdesc_idx == NULL)
6576 return 0;
6577
6578 return (*the_low_target.get_ipa_tdesc_idx) ();
6579 }
6580
6581 static int
6582 linux_supports_tracepoints (void)
6583 {
6584 if (*the_low_target.supports_tracepoints == NULL)
6585 return 0;
6586
6587 return (*the_low_target.supports_tracepoints) ();
6588 }
6589
6590 static CORE_ADDR
6591 linux_read_pc (struct regcache *regcache)
6592 {
6593 if (the_low_target.get_pc == NULL)
6594 return 0;
6595
6596 return (*the_low_target.get_pc) (regcache);
6597 }
6598
6599 static void
6600 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6601 {
6602 gdb_assert (the_low_target.set_pc != NULL);
6603
6604 (*the_low_target.set_pc) (regcache, pc);
6605 }
6606
6607 static int
6608 linux_thread_stopped (struct thread_info *thread)
6609 {
6610 return get_thread_lwp (thread)->stopped;
6611 }
6612
6613 /* This exposes stop-all-threads functionality to other modules. */
6614
6615 static void
6616 linux_pause_all (int freeze)
6617 {
6618 stop_all_lwps (freeze, NULL);
6619 }
6620
6621 /* This exposes unstop-all-threads functionality to other gdbserver
6622 modules. */
6623
6624 static void
6625 linux_unpause_all (int unfreeze)
6626 {
6627 unstop_all_lwps (unfreeze, NULL);
6628 }
6629
6630 static int
6631 linux_prepare_to_access_memory (void)
6632 {
6633 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6634 running LWP. */
6635 if (non_stop)
6636 linux_pause_all (1);
6637 return 0;
6638 }
6639
6640 static void
6641 linux_done_accessing_memory (void)
6642 {
6643 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6644 running LWP. */
6645 if (non_stop)
6646 linux_unpause_all (1);
6647 }
6648
6649 static int
6650 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6651 CORE_ADDR collector,
6652 CORE_ADDR lockaddr,
6653 ULONGEST orig_size,
6654 CORE_ADDR *jump_entry,
6655 CORE_ADDR *trampoline,
6656 ULONGEST *trampoline_size,
6657 unsigned char *jjump_pad_insn,
6658 ULONGEST *jjump_pad_insn_size,
6659 CORE_ADDR *adjusted_insn_addr,
6660 CORE_ADDR *adjusted_insn_addr_end,
6661 char *err)
6662 {
6663 return (*the_low_target.install_fast_tracepoint_jump_pad)
6664 (tpoint, tpaddr, collector, lockaddr, orig_size,
6665 jump_entry, trampoline, trampoline_size,
6666 jjump_pad_insn, jjump_pad_insn_size,
6667 adjusted_insn_addr, adjusted_insn_addr_end,
6668 err);
6669 }
6670
6671 static struct emit_ops *
6672 linux_emit_ops (void)
6673 {
6674 if (the_low_target.emit_ops != NULL)
6675 return (*the_low_target.emit_ops) ();
6676 else
6677 return NULL;
6678 }
6679
6680 static int
6681 linux_get_min_fast_tracepoint_insn_len (void)
6682 {
6683 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6684 }
6685
6686 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6687
6688 static int
6689 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6690 CORE_ADDR *phdr_memaddr, int *num_phdr)
6691 {
6692 char filename[PATH_MAX];
6693 int fd;
6694 const int auxv_size = is_elf64
6695 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6696 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6697
6698 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6699
6700 fd = open (filename, O_RDONLY);
6701 if (fd < 0)
6702 return 1;
6703
6704 *phdr_memaddr = 0;
6705 *num_phdr = 0;
6706 while (read (fd, buf, auxv_size) == auxv_size
6707 && (*phdr_memaddr == 0 || *num_phdr == 0))
6708 {
6709 if (is_elf64)
6710 {
6711 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6712
6713 switch (aux->a_type)
6714 {
6715 case AT_PHDR:
6716 *phdr_memaddr = aux->a_un.a_val;
6717 break;
6718 case AT_PHNUM:
6719 *num_phdr = aux->a_un.a_val;
6720 break;
6721 }
6722 }
6723 else
6724 {
6725 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6726
6727 switch (aux->a_type)
6728 {
6729 case AT_PHDR:
6730 *phdr_memaddr = aux->a_un.a_val;
6731 break;
6732 case AT_PHNUM:
6733 *num_phdr = aux->a_un.a_val;
6734 break;
6735 }
6736 }
6737 }
6738
6739 close (fd);
6740
6741 if (*phdr_memaddr == 0 || *num_phdr == 0)
6742 {
6743 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6744 "phdr_memaddr = %ld, phdr_num = %d",
6745 (long) *phdr_memaddr, *num_phdr);
6746 return 2;
6747 }
6748
6749 return 0;
6750 }
6751
6752 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6753
6754 static CORE_ADDR
6755 get_dynamic (const int pid, const int is_elf64)
6756 {
6757 CORE_ADDR phdr_memaddr, relocation;
6758 int num_phdr, i;
6759 unsigned char *phdr_buf;
6760 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6761
6762 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6763 return 0;
6764
6765 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6766 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6767
6768 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6769 return 0;
6770
6771 /* Compute relocation: it is expected to be 0 for "regular" executables,
6772 non-zero for PIE ones. */
6773 relocation = -1;
6774 for (i = 0; relocation == -1 && i < num_phdr; i++)
6775 if (is_elf64)
6776 {
6777 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6778
6779 if (p->p_type == PT_PHDR)
6780 relocation = phdr_memaddr - p->p_vaddr;
6781 }
6782 else
6783 {
6784 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6785
6786 if (p->p_type == PT_PHDR)
6787 relocation = phdr_memaddr - p->p_vaddr;
6788 }
6789
6790 if (relocation == -1)
6791 {
6792 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6793 any real world executables, including PIE executables, have always
6794 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6795 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6796 or present DT_DEBUG anyway (fpc binaries are statically linked).
6797
6798 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6799
6800 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6801
6802 return 0;
6803 }
6804
6805 for (i = 0; i < num_phdr; i++)
6806 {
6807 if (is_elf64)
6808 {
6809 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6810
6811 if (p->p_type == PT_DYNAMIC)
6812 return p->p_vaddr + relocation;
6813 }
6814 else
6815 {
6816 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6817
6818 if (p->p_type == PT_DYNAMIC)
6819 return p->p_vaddr + relocation;
6820 }
6821 }
6822
6823 return 0;
6824 }
6825
6826 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6827 can be 0 if the inferior does not yet have the library list initialized.
6828 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6829 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6830
6831 static CORE_ADDR
6832 get_r_debug (const int pid, const int is_elf64)
6833 {
6834 CORE_ADDR dynamic_memaddr;
6835 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6836 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6837 CORE_ADDR map = -1;
6838
6839 dynamic_memaddr = get_dynamic (pid, is_elf64);
6840 if (dynamic_memaddr == 0)
6841 return map;
6842
6843 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6844 {
6845 if (is_elf64)
6846 {
6847 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6848 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6849 union
6850 {
6851 Elf64_Xword map;
6852 unsigned char buf[sizeof (Elf64_Xword)];
6853 }
6854 rld_map;
6855 #endif
6856 #ifdef DT_MIPS_RLD_MAP
6857 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6858 {
6859 if (linux_read_memory (dyn->d_un.d_val,
6860 rld_map.buf, sizeof (rld_map.buf)) == 0)
6861 return rld_map.map;
6862 else
6863 break;
6864 }
6865 #endif /* DT_MIPS_RLD_MAP */
6866 #ifdef DT_MIPS_RLD_MAP_REL
6867 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6868 {
6869 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6870 rld_map.buf, sizeof (rld_map.buf)) == 0)
6871 return rld_map.map;
6872 else
6873 break;
6874 }
6875 #endif /* DT_MIPS_RLD_MAP_REL */
6876
6877 if (dyn->d_tag == DT_DEBUG && map == -1)
6878 map = dyn->d_un.d_val;
6879
6880 if (dyn->d_tag == DT_NULL)
6881 break;
6882 }
6883 else
6884 {
6885 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6886 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6887 union
6888 {
6889 Elf32_Word map;
6890 unsigned char buf[sizeof (Elf32_Word)];
6891 }
6892 rld_map;
6893 #endif
6894 #ifdef DT_MIPS_RLD_MAP
6895 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6896 {
6897 if (linux_read_memory (dyn->d_un.d_val,
6898 rld_map.buf, sizeof (rld_map.buf)) == 0)
6899 return rld_map.map;
6900 else
6901 break;
6902 }
6903 #endif /* DT_MIPS_RLD_MAP */
6904 #ifdef DT_MIPS_RLD_MAP_REL
6905 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6906 {
6907 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6908 rld_map.buf, sizeof (rld_map.buf)) == 0)
6909 return rld_map.map;
6910 else
6911 break;
6912 }
6913 #endif /* DT_MIPS_RLD_MAP_REL */
6914
6915 if (dyn->d_tag == DT_DEBUG && map == -1)
6916 map = dyn->d_un.d_val;
6917
6918 if (dyn->d_tag == DT_NULL)
6919 break;
6920 }
6921
6922 dynamic_memaddr += dyn_size;
6923 }
6924
6925 return map;
6926 }
6927
6928 /* Read one pointer from MEMADDR in the inferior. */
6929
6930 static int
6931 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6932 {
6933 int ret;
6934
6935 /* Go through a union so this works on either big or little endian
6936 hosts, when the inferior's pointer size is smaller than the size
6937 of CORE_ADDR. It is assumed the inferior's endianness is the
6938 same of the superior's. */
6939 union
6940 {
6941 CORE_ADDR core_addr;
6942 unsigned int ui;
6943 unsigned char uc;
6944 } addr;
6945
6946 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6947 if (ret == 0)
6948 {
6949 if (ptr_size == sizeof (CORE_ADDR))
6950 *ptr = addr.core_addr;
6951 else if (ptr_size == sizeof (unsigned int))
6952 *ptr = addr.ui;
6953 else
6954 gdb_assert_not_reached ("unhandled pointer size");
6955 }
6956 return ret;
6957 }
6958
6959 struct link_map_offsets
6960 {
6961 /* Offset and size of r_debug.r_version. */
6962 int r_version_offset;
6963
6964 /* Offset and size of r_debug.r_map. */
6965 int r_map_offset;
6966
6967 /* Offset to l_addr field in struct link_map. */
6968 int l_addr_offset;
6969
6970 /* Offset to l_name field in struct link_map. */
6971 int l_name_offset;
6972
6973 /* Offset to l_ld field in struct link_map. */
6974 int l_ld_offset;
6975
6976 /* Offset to l_next field in struct link_map. */
6977 int l_next_offset;
6978
6979 /* Offset to l_prev field in struct link_map. */
6980 int l_prev_offset;
6981 };
6982
6983 /* Construct qXfer:libraries-svr4:read reply. */
6984
6985 static int
6986 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6987 unsigned const char *writebuf,
6988 CORE_ADDR offset, int len)
6989 {
6990 struct process_info_private *const priv = current_process ()->priv;
6991 char filename[PATH_MAX];
6992 int pid, is_elf64;
6993
6994 static const struct link_map_offsets lmo_32bit_offsets =
6995 {
6996 0, /* r_version offset. */
6997 4, /* r_debug.r_map offset. */
6998 0, /* l_addr offset in link_map. */
6999 4, /* l_name offset in link_map. */
7000 8, /* l_ld offset in link_map. */
7001 12, /* l_next offset in link_map. */
7002 16 /* l_prev offset in link_map. */
7003 };
7004
7005 static const struct link_map_offsets lmo_64bit_offsets =
7006 {
7007 0, /* r_version offset. */
7008 8, /* r_debug.r_map offset. */
7009 0, /* l_addr offset in link_map. */
7010 8, /* l_name offset in link_map. */
7011 16, /* l_ld offset in link_map. */
7012 24, /* l_next offset in link_map. */
7013 32 /* l_prev offset in link_map. */
7014 };
7015 const struct link_map_offsets *lmo;
7016 unsigned int machine;
7017 int ptr_size;
7018 CORE_ADDR lm_addr = 0, lm_prev = 0;
7019 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7020 int header_done = 0;
7021
7022 if (writebuf != NULL)
7023 return -2;
7024 if (readbuf == NULL)
7025 return -1;
7026
7027 pid = lwpid_of (current_thread);
7028 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7029 is_elf64 = elf_64_file_p (filename, &machine);
7030 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7031 ptr_size = is_elf64 ? 8 : 4;
7032
7033 while (annex[0] != '\0')
7034 {
7035 const char *sep;
7036 CORE_ADDR *addrp;
7037 int name_len;
7038
7039 sep = strchr (annex, '=');
7040 if (sep == NULL)
7041 break;
7042
7043 name_len = sep - annex;
7044 if (name_len == 5 && startswith (annex, "start"))
7045 addrp = &lm_addr;
7046 else if (name_len == 4 && startswith (annex, "prev"))
7047 addrp = &lm_prev;
7048 else
7049 {
7050 annex = strchr (sep, ';');
7051 if (annex == NULL)
7052 break;
7053 annex++;
7054 continue;
7055 }
7056
7057 annex = decode_address_to_semicolon (addrp, sep + 1);
7058 }
7059
7060 if (lm_addr == 0)
7061 {
7062 int r_version = 0;
7063
7064 if (priv->r_debug == 0)
7065 priv->r_debug = get_r_debug (pid, is_elf64);
7066
7067 /* We failed to find DT_DEBUG. Such situation will not change
7068 for this inferior - do not retry it. Report it to GDB as
7069 E01, see for the reasons at the GDB solib-svr4.c side. */
7070 if (priv->r_debug == (CORE_ADDR) -1)
7071 return -1;
7072
7073 if (priv->r_debug != 0)
7074 {
7075 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7076 (unsigned char *) &r_version,
7077 sizeof (r_version)) != 0
7078 || r_version != 1)
7079 {
7080 warning ("unexpected r_debug version %d", r_version);
7081 }
7082 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7083 &lm_addr, ptr_size) != 0)
7084 {
7085 warning ("unable to read r_map from 0x%lx",
7086 (long) priv->r_debug + lmo->r_map_offset);
7087 }
7088 }
7089 }
7090
7091 std::string document = "<library-list-svr4 version=\"1.0\"";
7092
7093 while (lm_addr
7094 && read_one_ptr (lm_addr + lmo->l_name_offset,
7095 &l_name, ptr_size) == 0
7096 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7097 &l_addr, ptr_size) == 0
7098 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7099 &l_ld, ptr_size) == 0
7100 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7101 &l_prev, ptr_size) == 0
7102 && read_one_ptr (lm_addr + lmo->l_next_offset,
7103 &l_next, ptr_size) == 0)
7104 {
7105 unsigned char libname[PATH_MAX];
7106
7107 if (lm_prev != l_prev)
7108 {
7109 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7110 (long) lm_prev, (long) l_prev);
7111 break;
7112 }
7113
7114 /* Ignore the first entry even if it has valid name as the first entry
7115 corresponds to the main executable. The first entry should not be
7116 skipped if the dynamic loader was loaded late by a static executable
7117 (see solib-svr4.c parameter ignore_first). But in such case the main
7118 executable does not have PT_DYNAMIC present and this function already
7119 exited above due to failed get_r_debug. */
7120 if (lm_prev == 0)
7121 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7122 else
7123 {
7124 /* Not checking for error because reading may stop before
7125 we've got PATH_MAX worth of characters. */
7126 libname[0] = '\0';
7127 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7128 libname[sizeof (libname) - 1] = '\0';
7129 if (libname[0] != '\0')
7130 {
7131 if (!header_done)
7132 {
7133 /* Terminate `<library-list-svr4'. */
7134 document += '>';
7135 header_done = 1;
7136 }
7137
7138 string_appendf (document, "<library name=\"");
7139 xml_escape_text_append (&document, (char *) libname);
7140 string_appendf (document, "\" lm=\"0x%lx\" "
7141 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7142 (unsigned long) lm_addr, (unsigned long) l_addr,
7143 (unsigned long) l_ld);
7144 }
7145 }
7146
7147 lm_prev = lm_addr;
7148 lm_addr = l_next;
7149 }
7150
7151 if (!header_done)
7152 {
7153 /* Empty list; terminate `<library-list-svr4'. */
7154 document += "/>";
7155 }
7156 else
7157 document += "</library-list-svr4>";
7158
7159 int document_len = document.length ();
7160 if (offset < document_len)
7161 document_len -= offset;
7162 else
7163 document_len = 0;
7164 if (len > document_len)
7165 len = document_len;
7166
7167 memcpy (readbuf, document.data () + offset, len);
7168
7169 return len;
7170 }
7171
7172 #ifdef HAVE_LINUX_BTRACE
7173
7174 /* See to_disable_btrace target method. */
7175
7176 static int
7177 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7178 {
7179 enum btrace_error err;
7180
7181 err = linux_disable_btrace (tinfo);
7182 return (err == BTRACE_ERR_NONE ? 0 : -1);
7183 }
7184
7185 /* Encode an Intel Processor Trace configuration. */
7186
7187 static void
7188 linux_low_encode_pt_config (struct buffer *buffer,
7189 const struct btrace_data_pt_config *config)
7190 {
7191 buffer_grow_str (buffer, "<pt-config>\n");
7192
7193 switch (config->cpu.vendor)
7194 {
7195 case CV_INTEL:
7196 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7197 "model=\"%u\" stepping=\"%u\"/>\n",
7198 config->cpu.family, config->cpu.model,
7199 config->cpu.stepping);
7200 break;
7201
7202 default:
7203 break;
7204 }
7205
7206 buffer_grow_str (buffer, "</pt-config>\n");
7207 }
7208
7209 /* Encode a raw buffer. */
7210
7211 static void
7212 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7213 unsigned int size)
7214 {
7215 if (size == 0)
7216 return;
7217
7218 /* We use hex encoding - see common/rsp-low.h. */
7219 buffer_grow_str (buffer, "<raw>\n");
7220
7221 while (size-- > 0)
7222 {
7223 char elem[2];
7224
7225 elem[0] = tohex ((*data >> 4) & 0xf);
7226 elem[1] = tohex (*data++ & 0xf);
7227
7228 buffer_grow (buffer, elem, 2);
7229 }
7230
7231 buffer_grow_str (buffer, "</raw>\n");
7232 }
7233
7234 /* See to_read_btrace target method. */
7235
7236 static int
7237 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7238 enum btrace_read_type type)
7239 {
7240 struct btrace_data btrace;
7241 struct btrace_block *block;
7242 enum btrace_error err;
7243 int i;
7244
7245 err = linux_read_btrace (&btrace, tinfo, type);
7246 if (err != BTRACE_ERR_NONE)
7247 {
7248 if (err == BTRACE_ERR_OVERFLOW)
7249 buffer_grow_str0 (buffer, "E.Overflow.");
7250 else
7251 buffer_grow_str0 (buffer, "E.Generic Error.");
7252
7253 return -1;
7254 }
7255
7256 switch (btrace.format)
7257 {
7258 case BTRACE_FORMAT_NONE:
7259 buffer_grow_str0 (buffer, "E.No Trace.");
7260 return -1;
7261
7262 case BTRACE_FORMAT_BTS:
7263 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7264 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7265
7266 for (i = 0;
7267 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7268 i++)
7269 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7270 paddress (block->begin), paddress (block->end));
7271
7272 buffer_grow_str0 (buffer, "</btrace>\n");
7273 break;
7274
7275 case BTRACE_FORMAT_PT:
7276 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7277 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7278 buffer_grow_str (buffer, "<pt>\n");
7279
7280 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7281
7282 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7283 btrace.variant.pt.size);
7284
7285 buffer_grow_str (buffer, "</pt>\n");
7286 buffer_grow_str0 (buffer, "</btrace>\n");
7287 break;
7288
7289 default:
7290 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7291 return -1;
7292 }
7293
7294 return 0;
7295 }
7296
7297 /* See to_btrace_conf target method. */
7298
7299 static int
7300 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7301 struct buffer *buffer)
7302 {
7303 const struct btrace_config *conf;
7304
7305 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7306 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7307
7308 conf = linux_btrace_conf (tinfo);
7309 if (conf != NULL)
7310 {
7311 switch (conf->format)
7312 {
7313 case BTRACE_FORMAT_NONE:
7314 break;
7315
7316 case BTRACE_FORMAT_BTS:
7317 buffer_xml_printf (buffer, "<bts");
7318 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7319 buffer_xml_printf (buffer, " />\n");
7320 break;
7321
7322 case BTRACE_FORMAT_PT:
7323 buffer_xml_printf (buffer, "<pt");
7324 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7325 buffer_xml_printf (buffer, "/>\n");
7326 break;
7327 }
7328 }
7329
7330 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7331 return 0;
7332 }
7333 #endif /* HAVE_LINUX_BTRACE */
7334
7335 /* See nat/linux-nat.h. */
7336
7337 ptid_t
7338 current_lwp_ptid (void)
7339 {
7340 return ptid_of (current_thread);
7341 }
7342
7343 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7344
7345 static int
7346 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7347 {
7348 if (the_low_target.breakpoint_kind_from_pc != NULL)
7349 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7350 else
7351 return default_breakpoint_kind_from_pc (pcptr);
7352 }
7353
7354 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7355
7356 static const gdb_byte *
7357 linux_sw_breakpoint_from_kind (int kind, int *size)
7358 {
7359 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7360
7361 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7362 }
7363
7364 /* Implementation of the target_ops method
7365 "breakpoint_kind_from_current_state". */
7366
7367 static int
7368 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7369 {
7370 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7371 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7372 else
7373 return linux_breakpoint_kind_from_pc (pcptr);
7374 }
7375
7376 /* Default implementation of linux_target_ops method "set_pc" for
7377 32-bit pc register which is literally named "pc". */
7378
7379 void
7380 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7381 {
7382 uint32_t newpc = pc;
7383
7384 supply_register_by_name (regcache, "pc", &newpc);
7385 }
7386
7387 /* Default implementation of linux_target_ops method "get_pc" for
7388 32-bit pc register which is literally named "pc". */
7389
7390 CORE_ADDR
7391 linux_get_pc_32bit (struct regcache *regcache)
7392 {
7393 uint32_t pc;
7394
7395 collect_register_by_name (regcache, "pc", &pc);
7396 if (debug_threads)
7397 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7398 return pc;
7399 }
7400
7401 /* Default implementation of linux_target_ops method "set_pc" for
7402 64-bit pc register which is literally named "pc". */
7403
7404 void
7405 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7406 {
7407 uint64_t newpc = pc;
7408
7409 supply_register_by_name (regcache, "pc", &newpc);
7410 }
7411
7412 /* Default implementation of linux_target_ops method "get_pc" for
7413 64-bit pc register which is literally named "pc". */
7414
7415 CORE_ADDR
7416 linux_get_pc_64bit (struct regcache *regcache)
7417 {
7418 uint64_t pc;
7419
7420 collect_register_by_name (regcache, "pc", &pc);
7421 if (debug_threads)
7422 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7423 return pc;
7424 }
7425
7426
7427 static struct target_ops linux_target_ops = {
7428 linux_create_inferior,
7429 linux_post_create_inferior,
7430 linux_attach,
7431 linux_kill,
7432 linux_detach,
7433 linux_mourn,
7434 linux_join,
7435 linux_thread_alive,
7436 linux_resume,
7437 linux_wait,
7438 linux_fetch_registers,
7439 linux_store_registers,
7440 linux_prepare_to_access_memory,
7441 linux_done_accessing_memory,
7442 linux_read_memory,
7443 linux_write_memory,
7444 linux_look_up_symbols,
7445 linux_request_interrupt,
7446 linux_read_auxv,
7447 linux_supports_z_point_type,
7448 linux_insert_point,
7449 linux_remove_point,
7450 linux_stopped_by_sw_breakpoint,
7451 linux_supports_stopped_by_sw_breakpoint,
7452 linux_stopped_by_hw_breakpoint,
7453 linux_supports_stopped_by_hw_breakpoint,
7454 linux_supports_hardware_single_step,
7455 linux_stopped_by_watchpoint,
7456 linux_stopped_data_address,
7457 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7458 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7459 && defined(PT_TEXT_END_ADDR)
7460 linux_read_offsets,
7461 #else
7462 NULL,
7463 #endif
7464 #ifdef USE_THREAD_DB
7465 thread_db_get_tls_address,
7466 #else
7467 NULL,
7468 #endif
7469 linux_qxfer_spu,
7470 hostio_last_error_from_errno,
7471 linux_qxfer_osdata,
7472 linux_xfer_siginfo,
7473 linux_supports_non_stop,
7474 linux_async,
7475 linux_start_non_stop,
7476 linux_supports_multi_process,
7477 linux_supports_fork_events,
7478 linux_supports_vfork_events,
7479 linux_supports_exec_events,
7480 linux_handle_new_gdb_connection,
7481 #ifdef USE_THREAD_DB
7482 thread_db_handle_monitor_command,
7483 #else
7484 NULL,
7485 #endif
7486 linux_common_core_of_thread,
7487 linux_read_loadmap,
7488 linux_process_qsupported,
7489 linux_supports_tracepoints,
7490 linux_read_pc,
7491 linux_write_pc,
7492 linux_thread_stopped,
7493 NULL,
7494 linux_pause_all,
7495 linux_unpause_all,
7496 linux_stabilize_threads,
7497 linux_install_fast_tracepoint_jump_pad,
7498 linux_emit_ops,
7499 linux_supports_disable_randomization,
7500 linux_get_min_fast_tracepoint_insn_len,
7501 linux_qxfer_libraries_svr4,
7502 linux_supports_agent,
7503 #ifdef HAVE_LINUX_BTRACE
7504 linux_enable_btrace,
7505 linux_low_disable_btrace,
7506 linux_low_read_btrace,
7507 linux_low_btrace_conf,
7508 #else
7509 NULL,
7510 NULL,
7511 NULL,
7512 NULL,
7513 #endif
7514 linux_supports_range_stepping,
7515 linux_proc_pid_to_exec_file,
7516 linux_mntns_open_cloexec,
7517 linux_mntns_unlink,
7518 linux_mntns_readlink,
7519 linux_breakpoint_kind_from_pc,
7520 linux_sw_breakpoint_from_kind,
7521 linux_proc_tid_get_name,
7522 linux_breakpoint_kind_from_current_state,
7523 linux_supports_software_single_step,
7524 linux_supports_catch_syscall,
7525 linux_get_ipa_tdesc_idx,
7526 #if USE_THREAD_DB
7527 thread_db_thread_handle,
7528 #else
7529 NULL,
7530 #endif
7531 };
7532
7533 #ifdef HAVE_LINUX_REGSETS
7534 void
7535 initialize_regsets_info (struct regsets_info *info)
7536 {
7537 for (info->num_regsets = 0;
7538 info->regsets[info->num_regsets].size >= 0;
7539 info->num_regsets++)
7540 ;
7541 }
7542 #endif
7543
7544 void
7545 initialize_low (void)
7546 {
7547 struct sigaction sigchld_action;
7548
7549 memset (&sigchld_action, 0, sizeof (sigchld_action));
7550 set_target_ops (&linux_target_ops);
7551
7552 linux_ptrace_init_warnings ();
7553 linux_proc_init_warnings ();
7554
7555 sigchld_action.sa_handler = sigchld_handler;
7556 sigemptyset (&sigchld_action.sa_mask);
7557 sigchld_action.sa_flags = SA_RESTART;
7558 sigaction (SIGCHLD, &sigchld_action, NULL);
7559
7560 initialize_low_arch ();
7561
7562 linux_check_ptrace_features ();
7563 }
This page took 0.197645 seconds and 4 git commands to generate.